diff --git a/.gitignore b/.gitignore index 960ca0c8d5..8f815279e3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,10 @@ .idea/* .vscode/* +.serena *.out *.test .DS_Store pkg/parser/testdata/lotto.graphql *node_modules* -*vendor* \ No newline at end of file +*vendor* +docs/superpowers/ \ No newline at end of file diff --git a/AGENTS.md b/AGENTS.md new file mode 120000 index 0000000000..681311eb9c --- /dev/null +++ b/AGENTS.md @@ -0,0 +1 @@ +CLAUDE.md \ No newline at end of file diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000000..d33357513a --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,283 @@ +# graphql-go-tools + +GraphQL Router / API Gateway framework for Go. Federation-first, with query planning, parallel resolution, and entity caching. + +Module: `github.com/wundergraph/graphql-go-tools` (Go 1.25, go.work workspace) + +## Data Flow + +```text +parse → normalize → validate → plan → resolve → response +``` + +## Package Map + +### Core (v2/pkg/) + +| Package | Purpose | +|---------|---------| +| `ast` | GraphQL AST representation | +| `astparser` | GraphQL parser (schema + operations) | +| `astnormalization` | AST normalization passes | +| `astvalidation` | Schema and query validation | +| `astvisitor` | AST visitor pattern for tree walking | +| `astprinter` | AST to string serialization | +| `asttransform` | AST transformations | +| `astimport` | AST import/merge utilities | +| `fastjsonext` | JSON manipulation extensions (astjson API) | +| `federation` | Federation composition utilities | +| `errorcodes` | Error code definitions | + +### Engine (v2/pkg/engine/) + +| Package | Purpose | +|---------|---------| +| `plan` | Query planning, federation metadata, cache configuration types | +| **`resolve`** | **Resolution engine: fetching, caching, rendering** → see [resolve/CLAUDE.md](v2/pkg/engine/resolve/CLAUDE.md) | +| `datasource/graphql_datasource` | GraphQL subgraph datasource adapter | +| `postprocess` | Response post-processing passes (L1 cache optimization, fetch tree building) | + +### Execution (execution/) + +| Package | Purpose | +|---------|---------| +| `engine` | Federation engine config factory (`SubgraphCachingConfig`, `WithSubgraphEntityCachingConfigs`), E2E tests | +| `federationtesting` | Test federation services: accounts, products, reviews | +| `graphql` | GraphQL execution utilities | + +## Key Architectural Decisions + +- **Federation-first**: designed for federated GraphQL with entity resolution and `@key`/`@provides`/`@requires` +- **Arena-based allocation**: JSON values live on arena memory (no GC pressure), released per-request +- **Parallel resolution**: fetch tree with Sequence/Parallel nodes, 4-phase parallel execution with L1/L2 caching +- **Two-pass rendering**: pre-walk (validate, collect errors) + print-walk (render JSON) + +## Entity Caching + +Two-level entity caching system (L1 per-request + L2 external). +See: +- [v2/pkg/engine/resolve/CLAUDE.md](v2/pkg/engine/resolve/CLAUDE.md) — full resolve package reference (resolution pipeline + caching internals) +- [ENTITY_CACHING_INTEGRATION.md](docs/entity-caching/ENTITY_CACHING_INTEGRATION.md) — router integration guide (public APIs, configuration, examples) +- [ENTITY_CACHING_ACCEPTANCE_CRITERIA.md](docs/entity-caching/ENTITY_CACHING_ACCEPTANCE_CRITERIA.md) — acceptance criteria with test references (includes AC-RS-01..07 for @requestScoped) + +Critical L1 invariant: +- **Always-StructuralCopy L1 writes and reads**: L1 writes (`l1Cache` and + `requestScopedL1`) always StructuralCopy onto `l.jsonArena`. + Entity L1 uses `structuralCopyNormalizedPassthrough` — renames aliases + to schema names via `astjson.Transform` but keeps ALL source fields + (including @key fields not in ProvidesData) via `Transform.Passthrough`. + L1 reads use `structuralCopyDenormalizedPassthrough` — restores aliases + while preserving all accumulated fields. + StructuralCopy clones container nodes (objects, arrays) on the arena + while aliasing leaf nodes from the source — safe because all values + share the same arena lifetime within a request. + Transforms are ephemeral: built inline via reusable `l.transformEntries` + slab, consumed by `l.parser.StructuralCopyWithTransform`, then discarded. + Merges into an existing L1 entry use the working-copy-and-swap pattern: + StructuralCopy the existing entry into a working copy, + run `astjson.MergeValues` against the working copy, + and store either the working copy (on success) or the fresh incoming value (on merge failure). + Never mutate the live cache entry in place — `MergeValues` is non-atomic on failure + and a partial mutation would corrupt every sibling L1 key pointing at the same entry. + L2 writes use non-passthrough `structuralCopyNormalized` which projects + to ProvidesData fields only (rename + drop unlisted fields). + +### @requestScoped Coordinate L1 (symmetric model) + +Separate per-request `map[string]*astjson.Value` (`requestScopedL1`) on the Loader. +Main-thread only — read and written from `tryRequestScopedInjection` and `exportRequestScopedFields`, +which run on the resolver's main thread in parallel Phase 1.5, parallel Phase 3.5, +and `resolveSingle`. + +**Directive (composition-side)**: +```graphql +directive @requestScoped(key: String!) on FIELD_DEFINITION +``` + +**Semantics**: purely symmetric — every field annotated with `@requestScoped(key: "X")` +in the same subgraph shares the same L1 entry `{subgraphName}.X`. There is no +receiver/provider distinction. Each participating field is BOTH a reader (hint) AND +a writer (export). Whichever field is resolved first populates L1; subsequent fields +with the same key inject from L1 and may skip their fetch. + +Composition validates `key` is mandatory and warns when a key is declared on only +one field in the subgraph (the directive is meaningless without a second reader). + +Key files: +- `v2/pkg/engine/resolve/fetch.go` — `RequestScopedField` carries `ProvidesData *Object` for alias-aware normalization +- `v2/pkg/engine/resolve/loader.go` — `requestScopedL1 map[string]*astjson.Value`, injection in `resolveParallel` Phase 1.5 + 3.5 and `resolveSingle` +- `v2/pkg/engine/resolve/loader_cache.go` — `tryRequestScopedInjection` and `exportRequestScopedFields` use `validateItemHasRequiredData` and ephemeral normalize / denormalize transforms via `structuralCopyNormalized` / `structuralCopyDenormalized` (the same StructuralCopy-driven pipeline as entity L1/L2) +- `v2/pkg/engine/datasource/graphql_datasource/graphql_datasource.go` — `ConfigureFetch` emits a `RequestScopedField` for every @requestScoped field (symmetric) +- `v2/pkg/engine/plan/federation_metadata.go` — `RequestScopedField` (no more `ResolveFrom`), `RequestScopedExportsForField` returns the field's own L1 key +- `v2/pkg/engine/plan/visitor.go` — `configureFetchCaching` populates `ProvidesData` and rewrites `FieldName`/`FieldPath` to the outer query's alias via `populateRequestScopedFieldsProvidesData` + +Critical invariants: +- **Field widening check**: `tryRequestScopedInjection` must verify the cached value has ALL fields + listed in `hint.ProvidesData` (alias-aware `*Object`) before injecting, via `validateItemHasRequiredData`. + Otherwise a narrow root query (`{id, name}`) poisons the L1 for a wider entity fetch (`{id, name, email}`). + Use collect-then-inject: verify all hints first, only mutate items if ALL succeed. + Never partial-inject — a later hint failure must leave items untouched. +- **Copy-on-inject**: cached values must be StructuralCopy'd via `structuralCopyDenormalized` + before injection to prevent pointer aliasing with the response data tree. +- **Copy-on-export**: `exportRequestScopedFields` must ALSO copy values via + `structuralCopyNormalized` before storing in `requestScopedL1`. + StructuralCopy creates independent container nodes while aliasing leaf values + on the same arena — safe for same-arena, same-request lifetime. +- **L1 gating**: `tryRequestScopedInjection` and `exportRequestScopedFields` must check + `l.ctx.ExecutionOptions.Caching.EnableL1Cache`. The coordinate L1 is part of the L1 cache layer + and must be disabled when L1 is disabled per-request. +- **Trace reporting (LoadSkipped)**: when injection succeeds and fetch is skipped, + set `ensureFetchTrace(f).LoadSkipped = true` at ALL call sites (parallel Phase 1.5 + 3.5 and 3 single fetch variants). +- **Trace reporting (L1 hit counters)**: when injection succeeds, set + `res.cacheTraceRequestScopedHits = res.cacheTraceEntityCount`. The `buildCacheTrace` function + folds these into `L1Hit` / `L1Miss` so the trace UI correctly shows a red L1 hit instead of + stale L1 misses recorded during Phase 1. Never mutate `cacheTraceL1Hits`/`cacheTraceL1Misses` + directly at the injection site — use the dedicated counter and fold at trace-build time. +- **InterfaceObject mapping**: the planner resolves concrete entity types (Article) to interface types + (Personalized) via `InterfaceObjects` config to find @requestScoped fields on the interface. + +### Subscription Entity Caching + +`SubscriptionEntityPopulationConfiguration` requires BOTH `TypeName` AND `FieldName` to be set. +The lookup method `FindByTypeAndFieldName` matches on both fields. +If `FieldName` is empty, the lookup always fails and subscription cache populate/invalidate silently does nothing. + +The router's `factoryresolver.go` must set `FieldName: cp.FieldName` (populate) and `FieldName: ci.FieldName` (invalidate) +when creating these configs. + +### @requestScoped Alias Handling + +The coordinate L1 cache is fully alias-aware via the unified `*Object`/ProvidesData +pipeline shared with entity L1 and L2: +- **L1 key** is `{subgraphName}.{key}` — alias-independent by construction +- **L1 stored value** uses schema field names (aliases normalized away via `structuralCopyNormalized` with ephemeral Transform) +- **Widening check** uses `validateItemHasRequiredData` against the query's `ProvidesData` +- **Denormalized read** via `structuralCopyDenormalized` re-applies aliases for the current query + +Planner populates `ProvidesData` on `RequestScopedFields` in `configureFetchCaching` by +locating the matching sub-Object in `plannerObjects[fetchID]` and rewriting +`FieldName`/`FieldPath` to the outer query's alias when needed. + +### Per-Request Cache Control Headers + +The router supports per-request cache control via headers (for debugging / playground): +- `X-WG-Disable-Entity-Cache: true` — disable both L1 and L2 +- `X-WG-Disable-Entity-Cache-L1: true` — disable L1 only +- `X-WG-Disable-Entity-Cache-L2: true` — disable L2 only + +These headers are gated on `reqCtx.operation.traceOptions.Enable` (i.e., dev mode or a valid studio +request token) to prevent production abuse. The gate is in `GraphQLHandler.cachingOptions` in +`router/core/graphql_handler.go`. Disabling L1 via these headers also disables @requestScoped +coordinate L1 (since it shares the `EnableL1Cache` flag). + +## Code Comment Conventions + +**Never reference pull requests, issue numbers, review threads, or reviewer names in code comments.** + +Comments live in the codebase forever and outlive the workflow context they were written in. +A `PR #1259` reference is meaningful for two weeks and noise for the next ten years. +Reviewer attribution (`as requested in ysmolski's review`, `addresses SkArchon's comment`) belongs in commit messages and PR descriptions, never in source files. + +If a comment exists to explain a non-obvious behavior, explain the **behavior**, not the historical reason it was added. + +```go +// CORRECT — explains the invariant +// isEntityRootField previously compared a non-normalized current path against a +// normalized boundary path. Without normalizing here first, queries that wrap the +// boundary in `... on User { ... }` cause the prefix check to silently fail. + +// WRONG — references the PR / review / ticket where the fix was discussed +// Regression guard for the A42 bug in PR #1259 raised by ysmolski: +// isEntityRootField previously compared a non-normalized current path... +``` + +This applies to all code comments — production, tests, doc comments, file headers. +Commit messages may reference PRs and reviewers; code may not. + +## Testing Conventions + +**Before writing or modifying any test, read the package's `CLAUDE.md` if one exists.** +Package-level conventions are mandatory and stricter than the universal rules below. +Known package conventions: +- [v2/pkg/engine/resolve/CLAUDE.md](v2/pkg/engine/resolve/CLAUDE.md) — unit and integration tests for the resolve engine. +- [execution/engine/CLAUDE.md](execution/engine/CLAUDE.md) — E2E tests against the federation gateway. **Stricter rules apply — see "E2E rules" below.** + +### Universal rules (every package) + +- **Exact assertions only**: use `assert.Equal` with exact expected values. + Never use `GreaterOrEqual`, `Contains`, `Greater`, or any vague comparison. + If you do not know the expected value, investigate until you do. +- **Assert entire structs**: always `assert.Equal` on the complete struct. + Never iterate over fields with individual assertions. + For large structs, construct the full expected value inline anyway. +- **Inline literal data**: GraphQL queries, cache keys, byte sizes, expected JSON responses must appear inline at the assertion or setup site that uses them. + Never hidden in file-level `const` blocks or shared vars that force reviewers to jump around. +- **Snapshot comments**: every event line in a `CacheAnalyticsSnapshot` (or any other event-stream assertion) must have a brief trailing comment explaining **why** that event occurred. +- **Cache log rule**: every `defaultCache.ClearLog()` must be followed by `GetLog()` + full assertions before the next `ClearLog()` or end of test. + Never clear a log without verifying its contents. +- **Multi-key / multi-event struct literals must wrap one item per line**: + cache log entries, snapshot events, and any struct literal with two or more nested slices, maps, or long string fields are unreadable on a single line. + Format vertically. + + ```go + // CORRECT — vertical, scannable + wantLog := []CacheLogEntry{ + { + Operation: "get", + Keys: []string{ + `{"__typename":"Query","field":"cat"}`, + `{"__typename":"Query","field":"me"}`, + }, + Hits: []bool{false, false}, + }, + { + Operation: "set", + Keys: []string{`{"__typename":"Query","field":"me"}`}, + }, + } + + // WRONG — single 200-character line, eye has to parse comma-by-comma + wantLog := []CacheLogEntry{ + {Operation: "get", Keys: []string{`{"__typename":"Query","field":"cat"}`, `{"__typename":"Query","field":"me"}`}, Hits: []bool{false, false}}, + } + ``` + +### E2E rules (under `execution/engine/`) + +In addition to the universal rules above, [execution/engine/CLAUDE.md](execution/engine/CLAUDE.md) requires: + +- **Self-contained subtests**: each `t.Run` must be independently readable top to bottom. + **Duplication across subtests is preferred over sharing.** + Do NOT extract setup into shared helpers like `newXxxFederationTestEnv(...)`. + Do NOT define config structs as named vars when they are used in only one subtest. +- **Inline setup**: cache instances, tracker setup, gateway options, context, and URL parsing belong inside each subtest body. +- **Inline GraphQL queries**: use `QueryStringWithHeaders` with the query string inline. + Do not load queries from external files. +- **No new shared test helpers** in `execution/engine/` without explicit approval — they violate the self-contained-subtest rule. + +### LLM agent self-check (mandatory) + +Before writing or editing any test, ask yourself: + +| If you are about to... | STOP and instead... | +|---|---| +| Create a `newXxxEnv(...)` style helper used by multiple subtests in `execution/engine/` | Inline the setup into each subtest. | +| Pull a config struct out of a `t.Run` body into a top-level var or helper used once | Inline it back into the subtest. | +| Put two or more `Keys`/`Hits`/event-list entries on one line of a struct literal | Wrap to one item per line. | +| Add a test under `execution/engine/` | Re-read [execution/engine/CLAUDE.md](execution/engine/CLAUDE.md) first. | +| Add a test under `v2/pkg/engine/resolve/` | Re-read [v2/pkg/engine/resolve/CLAUDE.md](v2/pkg/engine/resolve/CLAUDE.md) first. | +| Use `assert.Contains`, `assert.GreaterOrEqual`, or any partial assertion | Investigate the actual expected value and use `assert.Equal`. | + +If you find yourself extracting shared test scaffolding "to reduce duplication" in `execution/engine/`, that is the smell. +Duplication is the convention. + +### Federation test services + +`accounts`, `products`, `reviews` live in `execution/federationtesting/`. + +### Run tests + +```sh +go test ./v2/pkg/engine/resolve/... -v +go test ./execution/engine/... -v +``` diff --git a/README.md b/README.md index fb7245a804..40ac53f42a 100644 --- a/README.md +++ b/README.md @@ -647,7 +647,7 @@ func ExampleExecuteOperation() { switch p := preparedPlan.(type) { case *plan.SynchronousResponsePlan: out := &bytes.Buffer{} - err, _ := resolver.ResolveGraphQLResponse(ctx, p.Response, nil, out) + _, err := resolver.ResolveGraphQLResponse(ctx, p.Response, out) if err != nil { panic(err) } diff --git a/docs/entity-caching/ENTITY_CACHING_ACCEPTANCE_CRITERIA.md b/docs/entity-caching/ENTITY_CACHING_ACCEPTANCE_CRITERIA.md new file mode 100644 index 0000000000..47607fc655 --- /dev/null +++ b/docs/entity-caching/ENTITY_CACHING_ACCEPTANCE_CRITERIA.md @@ -0,0 +1,1240 @@ +# Entity Caching Acceptance Criteria + +Two-level entity caching system for GraphQL federation: L1 (per-request, in-memory) eliminates +redundant entity fetches within a single request; L2 (cross-request, external) shares cached +entities across requests via external stores like Redis. + +## L1 Cache (Per-Request, In-Memory) + +### AC-L1-01: Request-scoped isolation +Each GraphQL request gets its own L1 cache instances on the Loader, discarded when the +request completes. Two plain maps live at the L1 layer, both freshly allocated per Loader +and both accessed main-thread-only: + +- **Entity L1 cache** (`Loader.l1Cache`, `map[string]*astjson.Value`): per-request entity + dedup; read via `tryL1CacheLoad`, written via `populateL1Cache` (entity fetches) and + `populateL1CacheForRootFieldEntities` (root-field entity promotion). No locking — + goroutines never touch it. +- **`@requestScoped` coordinate L1** (`Loader.requestScopedL1`, `map[string]*astjson.Value`): + per-subgraph export values keyed by `{subgraphName}.{key}`, populated/read in Phase 1.5, + Phase 3.5 and `resolveSingle`. + +Neither is a `sync.Map`. No data leaks between requests. + +Tests: +- `v2/pkg/engine/resolve/l1_cache_test.go:24` — `TestL1Cache / "L1 hit - same entity fetched twice in same request"` + +### AC-L1-02: Entity fetches only +L1 caches entity fetch results (fetches with `@key`-based representations), not root field +query results. Root fields never _read_ from L1 — they use L2 for cross-request caching. +However, root fields that return entities can _populate_ L1 (see AC-L1-08), so that a +subsequent entity fetch within the same request can hit L1. + +Tests: +- `execution/engine/federation_caching_l1_test.go:56` — `TestL1CacheReducesHTTPCalls / "L1 enabled - entity fetches use L1 cache"` + +### AC-L1-03: Cache keys use only @key fields +L1 cache keys are derived exclusively from the entity's `@key` directive fields +(see AC-KEY-01 for canonical format). `@requires` fields are never included because +they vary per consuming subgraph and would fragment the cache. + +Tests: +- `v2/pkg/engine/resolve/cache_key_test.go:632` — `TestCachingRenderEntityQueryCacheKeyTemplate / "single entity with typename and id"` + +### AC-L1-04: Main-thread L1 check; full hit skips goroutine +L1 lookup happens in Phase 1 (`prepareCacheKeys` + `tryL1CacheLoad`) on the main thread, +before any goroutine is spawned. When every entity in a fetch batch is found in L1, the +fetch sets `cacheSkipFetch=true` and no goroutine is spawned for that fetch. The cached +values are used directly, saving both the goroutine allocation and the network call. + +Tests: +- `v2/pkg/engine/resolve/l1_l2_cache_e2e_test.go:899` — `TestL1CacheSkipsParallelFetch` +- `execution/engine/federation_caching_l1_test.go:449` — `TestL1CacheSelfReferentialEntity / "L1 enabled - sameUserReviewers fetch entirely skipped via L1 cache"` + +### AC-L1-05: Disabled by default +L1 caching must be explicitly enabled per-request via +`ctx.ExecutionOptions.Caching.EnableL1Cache = true`. When disabled, every entity fetch +goes through the normal L2/subgraph path. + +Tests: +- `execution/engine/federation_caching_l1_test.go:93` — `TestL1CacheReducesHTTPCalls / "L1 disabled - more accounts calls without cache"` + +### AC-L1-06: StructuralCopy on L1 read and write +Every L1 cache write StructuralCopies the value onto `l.jsonArena`. +Entity L1 uses `structuralCopyNormalizedPassthrough` — renames aliases +to schema names via an ephemeral `astjson.Transform` while keeping ALL +source fields (including @key fields not in ProvidesData) via +`Transform.Passthrough`. +This preserves field accumulation across fetches: fetch 1 stores `{name}`, +fetch 2 merges `{email}`, L1 has `{name, email}` for fetch 3. + +Every L1 cache read uses `structuralCopyDenormalizedPassthrough` — +restores aliases while preserving all accumulated fields. +StructuralCopy clones container nodes on the arena while aliasing leaf +nodes from the source. +This gives the consumer a structurally independent value and prevents +pointer aliasing during JSON merge for self-referential entities. +Strings are always eagerly decoded (no lazy mutation), making aliased +leaf values safe for concurrent reads. + +L2 writes use non-passthrough `structuralCopyNormalized` which projects +to ProvidesData fields only (rename + drop unlisted fields). + +Merges into an existing L1 entry use the working-copy-and-swap pattern: +StructuralCopy the existing entry into a working copy, +run `astjson.MergeValues` against the working copy, +and store either the working copy (on success) or the fresh incoming +value (on merge failure). +The live cache entry pointer is never mutated in place, +so a partial `MergeValues` failure cannot corrupt sibling L1 keys +pointing at the same entry. + +Tests: +- `execution/engine/federation_caching_l1_test.go:344` — `TestL1CacheSelfReferentialEntity` +- `v2/pkg/engine/resolve/loader_cache_phase2_test.go:21` — `TestL1Cache_RootFieldPromotionWithAliases` (alias-aware StructuralCopy on root-field promotion) +- `v2/pkg/engine/resolve/loader_cache_phase2_test.go:147` — `TestExportRequestScopedFields_MergeWorkingCopyOnFailure` (working-copy-and-swap on merge failure) +- `v2/pkg/engine/resolve/loader_cache_transform_test.go` — `TestStructuralCopyNormalized_*` (alias/arg-suffix normalize + denormalize) +- `v2/pkg/engine/resolve/l1_l2_cache_e2e_test.go` — `TestL1CacheFieldAccumulation` (3-fetch field accumulation with passthrough) + +### AC-L1-07: Union-based L1 optimization +The postprocessor (`optimize_l1_cache.go`) computes the **union** of all +ancestor providers' ProvidesData fields when deciding whether to enable +L1 for a fetch. +If no single provider covers the consumer's field needs, +the union of all prior providers (same entity type, in dependency chain) +is checked. +This enables L1 for fetches whose required fields are spread across +multiple prior fetches. +A fetch is enabled as a writer if it contributes to a union that covers +any descendant consumer. + +Tests: +- `v2/pkg/engine/postprocess/optimize_l1_cache_test.go` — `TestOptimizeL1Cache_Union_*` (9 tests: basic, insufficient, overlapping, 4-fetch chain, etc.) +- `execution/engine/federation_caching_l1_test.go` — `TestL1CacheEntityUnionOptimization` (6 E2E subtests using CacheEntity type) + +### AC-L1-08: Root field entity population +When a root field query (e.g., `topProducts`) returns entities, those entities are +extracted and stored in L1 using their `@key`-based cache keys. This means a subsequent +entity fetch for the same entity within the same request can hit L1 instead of making +another subgraph call. Requires `RootFieldL1EntityCacheKeyTemplates` to be configured. + +If the client's query doesn't select the `@key` fields (e.g., omits `id`), the cache key +is produced with an empty key object (`{"__typename":"Product","key":{}}`) and the entity +is silently stored under this degraded key. It will never match a real entity fetch, so the +behavior is benign but wasteful. + +When the root field is aliased (e.g., `myUser: user(id: $id)`), the entity cache key +template path uses the alias (`myUser`), not the schema field name (`user`), because +the response JSON is keyed by the alias. + +Tests: +- `execution/engine/federation_caching_l1_test.go:667` — `TestL1CacheRootFieldEntityListPopulation` +- `v2/pkg/engine/resolve/l1_cache_test.go:1813` — `TestPopulateL1CacheForRootFieldEntities_MissingKeyFields` +- `v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_entity_key_mapping_test.go:871` — `aliased root fields use alias in entity cache key path` (verifies alias-based path in `RootFieldL1EntityCacheKeyTemplates`) + +### AC-L1-09: Argument-variant coexistence via field merging +When the same entity is fetched with different field arguments (e.g., `friends(first:5)` +and `friends(first:20)`), each variant gets a unique suffixed field name +(e.g., `friends_`, `friends_`). When a second fetch for the same entity +arrives, L1 merges the new fields into the existing cached entity using first-writer-wins +semantics, so all arg variants coexist in a single cached entity. + +L2 also performs arg-variant merging during `updateL2Cache`: before writing a new entity, +existing cached fields from other arg variants are merged in via `MergeValues` so they +are not lost (see AC-L2-08). + +Tests: +- `execution/engine/federation_caching_entity_field_args_test.go:129` — `TestEntityFieldArgsCaching` +- `v2/pkg/engine/resolve/l1_cache_test.go:2609` — `TestMergeEntityFields` (6 subtests: new field added, existing preserved, nil dst/src, non-object, multiple fields coexist) + +## L1/L2 Interaction Ordering + +### AC-L1L2-01: L1 checked before L2; L1 hit skips L2 entirely +Within a single request, L1 is always checked first (Phase 1, main thread). When L1 has +a hit, L2 is never consulted and no subgraph call is made. This holds regardless of L2 +TTL state — even if the L2 entry is expired, stale, or missing, an L1 hit is authoritative. + +L1 is always fresh within a request because it is populated from the current request's own +subgraph fetches (or root field entity extraction), not from L2. L1 and L2 are independent +caches with different scopes: +- L1: per-request, in-memory, populated by fetches within the current request +- L2: cross-request, external, populated after successful subgraph calls + +Tests: +- `v2/pkg/engine/resolve/l1_l2_cache_e2e_test.go:496` — `TestL1L2CacheEndToEnd / "L1+L2 - L1 hit prevents L2 lookup"` (two sequential entity fetches: first populates L1+L2, second hits L1 with zero L2 operations) +- `v2/pkg/engine/resolve/l1_l2_cache_e2e_test.go:605` — `TestL1L2CacheEndToEnd / "L1+L2 - L1 miss, L2 hit provides data"` (L1 miss falls through to L2) +- `v2/pkg/engine/resolve/l1_l2_cache_e2e_test.go:698` — `TestL1L2CacheEndToEnd / "L1+L2 - cross-request: L1 isolated, L2 shared"` (new request has empty L1, uses L2) +- `v2/pkg/engine/resolve/l1_l2_cache_e2e_test.go:899` — `TestL1CacheSkipsParallelFetch` (L1 hit prevents goroutine spawn for parallel fetch) + +## L2 Cache (Cross-Request, External) + +### AC-L2-01: External cache via LoaderCache interface +L2 caching delegates to user-provided implementations of the `LoaderCache` interface +(`Get`/`Set`/`Delete`). Typical backends: Redis, Memcached. Multiple named cache instances +are supported (e.g., different Redis clusters for different entity types). + +Tests: +- `execution/engine/federation_caching_l2_test.go:20` — `TestL2CacheOnly / "L2 enabled - miss then hit across requests"` + +### AC-L2-02: L2 reads use main-thread bulk Get; HTTP runs in goroutines +Within `resolveParallel`, L2 cache reads are issued by `bulkL2Lookup` on the main +thread: one bulk `cache.Get` per cache instance, covering every fetch in the batch +that routes to that instance. Parsed values are materialized on `l.parser` / +`l.jsonArena` and distributed back to each fetch's `l2CacheKeys[].FromCache`. +Only the fallback subgraph HTTP calls run in parallel goroutines (Phase 2HTTP); +those goroutines do HTTP only and do not touch the arena or cache. + +Because a single bulk Get now covers the whole batch, **a bulk Get failure causes +every fetch in the batch to fall back to the subgraph** (documented behavior change +from the old per-fetch isolation). Each affected fetch is marked +`cacheMustBeUpdated`, its `cacheTraceL2GetError` is set, and a +`CacheOperationError` is recorded per fetch in `l2CacheOpErrors`. + +`LoaderCache` implementations still must be safe for concurrent access because +`Set` / `Delete` operations (write-side) continue to run from Phase 4 and may +overlap across concurrent router requests. + +Tests: +- `v2/pkg/engine/resolve/cache_load_test.go:828` — `TestCacheLoadSequential / "two sequential calls - miss then hit"` + +### AC-L2-03: Configurable TTL per entity type +Each entity type (or root field) can have its own TTL configured via +`EntityCacheConfiguration.TTL`. The TTL is passed to `LoaderCache.Set()`. If the cache +backend supports TTL introspection, it returns `RemainingTTL` on `Get` for analytics. + +Tests: +- `execution/engine/federation_caching_test.go:1386` — `TestFederationCaching / "TTL expiry"` + +### AC-L2-04: L2 keys follow canonical format with optional prefix +L2 cache keys use the canonical entity key format (see AC-KEY-01) or root field key +format (see AC-KEY-02), with an optional header hash prefix (AC-KEY-03) and optional +global prefix (AC-KEY-07) prepended for cache isolation. + +Tests: +- `v2/pkg/engine/resolve/cache_key_test.go:632` — `TestCachingRenderEntityQueryCacheKeyTemplate` +- `v2/pkg/engine/resolve/cache_key_test.go:13` — `TestCachingRenderRootQueryCacheKeyTemplate` +- `v2/pkg/engine/resolve/cache_key_parity_test.go:17` — `TestCacheKeyParityRegression_ReadWriteInvalidation` (combined entity-key read/write/delete parity with global + header prefix) + +### AC-L2-05: Disabled by default +L2 caching must be explicitly enabled per-request via +`ctx.ExecutionOptions.Caching.EnableL2Cache = true` AND configured per-subgraph with +entity/root field cache configurations. + +Tests: +- `execution/engine/federation_caching_l2_test.go:191` — `TestL2CacheOnly / "L2 disabled - no external cache operations"` + +### AC-L2-06: Normalization before storage +Before writing to L2, field names are normalized: aliases are replaced with original +schema field names, and fields with arguments get an xxhash suffix appended. +This ensures cached data is query-independent and can be reused across different +GraphQL operations that request the same entity. + +Normalization uses ephemeral `astjson.Transform` descriptors built inline via +`structuralCopyNormalized(value, providesData)`. +The Transform walks `FetchInfo.ProvidesData` and emits one `TransformEntry` per +aliased or arg-suffixed field. +Transforms are built into reusable `l.transformEntries` / `l.transforms` slabs +(resliced to [:0] before each use) and consumed by +`l.parser.StructuralCopyWithTransform` — no stored transforms on `result`. + +L2 writes use non-passthrough normalization (projects to ProvidesData fields only). +L1 writes use passthrough normalization (renames aliases but keeps all fields). +L2 reads stay verbatim at parse time; denormalization is applied at the +materialization site via `structuralCopyDenormalized` so the writeback merge +in `updateL2Cache` can preserve fields outside the current selection (see AC-L2-08). + +Tests: +- `v2/pkg/engine/resolve/loader_cache_transform_test.go` — `TestStructuralCopyNormalized_*` (7 tests: nil, alias, nested, array, arg-suffix, request-scoped invariant, mixed) +- `execution/engine/federation_caching_entity_field_args_test.go` — `TestEntityFieldArgsCaching` (E2E arg-hash normalization) +- `v2/pkg/engine/resolve/loader_cache_transform_test.go:174` — `TestBuildNormalizeTransform_MixedAliases` +- `v2/pkg/engine/resolve/loader_cache_phase2_test.go:125` — `TestL2WritePreservesFieldsOutsideSelection` (verbatim parse preserves fields outside selection for writeback merge) + +### AC-L2-07: Validation before serving cached data +When reading from L2, the cached entity is validated against the `ProvidesData` schema +(the set of fields the current fetch expects). Every required field must be present; if +any are missing, the cached entry is treated as a miss and the entity is refetched from +the subgraph. + +Tests: +- `execution/engine/federation_caching_l2_test.go:504` — `TestPartialEntityCaching / "only configured entities are cached"` +- `v2/pkg/engine/resolve/l1_cache_test.go:2159` — `TestValidateItemHasRequiredData` (22 subtests: nil, scalars, nullable/non-nullable, nested objects, arrays, CacheArgs suffixed lookup, empty arrays) +- `v2/pkg/engine/resolve/l1_cache_test.go:1953` — `TestValidateFieldDataWithAliases` (validates using original name on normalized cache data) + +### AC-L2-08: Failed validation preserves old entity for field merging +When L2 validation fails (cached entity is missing fields the current query needs), the +old cached entity is preserved in `FromCache`. After the subgraph returns fresh data, the +old and new entities are merged so that previously-cached fields from other arg variants +are not lost. The merged result is then written back to L2. + +Enforced by the verbatim-parse rule in `bulkL2Lookup`: cached entries are parsed without +applying the denormalize Transform at parse time, so `l2CacheKeys[i].FromCache` retains +every field that was in the cached value even if the current query selects a narrower +set. The denormalize Transform is applied only at the L2-to-response materialization +site for `l1CacheKeys[i].FromCache`, leaving `l2CacheKeys[i].FromCache` in cache-shape +for the writeback merge in `updateL2Cache`. + +Tests: +- `v2/pkg/engine/resolve/cache_load_test.go:605` — `TestCacheLoadSequential / "single entity fetch with cache miss"` +- `v2/pkg/engine/resolve/loader_cache_phase2_test.go:125` — `TestL2WritePreservesFieldsOutsideSelection` (writeback merge preserves fields outside current selection) + +## Negative Caching + +### AC-NEG-01: Null entity responses cached as negative sentinels +When a subgraph returns `null` for an entity in `_entities` (entity not found, no errors), +and `NegativeCacheTTL > 0` is configured for that entity type, the null result is stored in +L2 as a sentinel value (`"null"` bytes). On subsequent requests, the sentinel is recognized +as a negative cache hit and served without calling the subgraph. + +This prevents repeated subgraph lookups for non-existent entities (e.g., a deleted product +that is still referenced by other entities). + +Tests: +- `v2/pkg/engine/resolve/negative_cache_test.go:60` — `TestNegativeCaching / "null entity stored as negative sentinel and served on second request"` + +### AC-NEG-02: Disabled by default (NegativeCacheTTL = 0) +When `NegativeCacheTTL` is 0 (default), null entity responses are NOT cached. Each request +re-fetches from the subgraph, preserving the pre-negative-caching behavior. + +Tests: +- `v2/pkg/engine/resolve/negative_cache_test.go:229` — `TestNegativeCaching / "negative caching disabled when NegativeCacheTTL is 0"` (subgraph called twice, no sentinel stored) + +### AC-NEG-03: Separate TTL for negative sentinels +Negative cache entries use `NegativeCacheTTL` (not the regular entity `TTL`) when calling +`LoaderCache.Set()`. This allows shorter TTLs for negative entries (e.g., 5s) compared to +regular entity data (e.g., 60s), so deleted entities are re-checked sooner. + +Tests: +- `v2/pkg/engine/resolve/negative_cache_test.go:353` — `TestNegativeCaching / "negative cache sentinel uses NegativeCacheTTL not regular TTL"` + +### AC-NEG-04: Per-entity-type opt-in +Negative caching is configured per entity type via `EntityCacheConfiguration.NegativeCacheTTL`. +Different entity types can have different negative cache TTLs, or have it disabled entirely +(TTL = 0). + +### AC-NEG-05: Negative cache with mutation population +When a mutation with `EnableMutationL2CachePopulation=true` triggers an entity fetch that +returns null and `NegativeCacheTTL > 0`, the negative sentinel is stored with the +`NegativeCacheTTL`, not the entity's regular TTL. + +Tests: +- `v2/pkg/engine/resolve/negative_cache_test.go` — `TestNegativeCaching / "negative cache with mutation population stores sentinel with NegativeCacheTTL"` + +### AC-NEG-06: Negative cache entry replaced after TTL expiry +When a negative cache sentinel expires (TTL elapses) and the entity subsequently becomes +available, the next fetch retrieves real data from the subgraph and stores it with the +entity's regular TTL, replacing the expired negative sentinel. + +Tests: +- `v2/pkg/engine/resolve/negative_cache_test.go` — `TestNegativeCaching / "negative cache entry overwritten by real data on subsequent fetch"` + +## Cache Key Construction + +### AC-KEY-01: Entity key format +Entity cache keys use the canonical format `{"__typename":"T","key":{...}}` where the +key object contains only the fields declared in the entity's `@key` directive. Composite +keys (multiple fields) and nested keys are supported. + +Tests: +- `v2/pkg/engine/resolve/cache_key_test.go:632` — `TestCachingRenderEntityQueryCacheKeyTemplate` +- `v2/pkg/engine/resolve/cache_key_test.go:1125` — `TestDerivedEntityCacheKey / "dot-notation entity key field"` (single-level nesting) +- `v2/pkg/engine/resolve/cache_key_test.go:1148` — `TestDerivedEntityCacheKey / "deeply nested dot-notation entity key field"` (multi-level nesting) +- `v2/pkg/engine/resolve/cache_key_test.go:1171` — `TestDerivedEntityCacheKey / "dot-notation shared prefix merges into same object"` (shared-prefix merge) +- `v2/pkg/engine/resolve/cache_key_test.go` — `TestDerivedEntityCacheKey / "flat key + composite key - all args present"` (flat + composite multi-key) +- `v2/pkg/engine/resolve/cache_key_test.go` — `TestDerivedEntityCacheKey / "flat key + nested composite key - all args present"` (flat + nested multi-key) +- `v2/pkg/engine/resolve/cache_key_test.go` — `TestDerivedEntityCacheKey / "nested composite key - structured argument input"` (structured input arg) +- `v2/pkg/engine/resolve/cache_key_test.go` — `TestDerivedEntityCacheKey / "two nested composite keys with structured args - both resolve"` (two nested keys) + +### AC-KEY-02: Root field key format +Root field cache keys use `{"__typename":"Query","field":"fieldName","args":{...}}`. +Arguments are included when present. Root field keys can optionally map to entity keys +via `EntityKeyMappings` so that a root field query and an entity query share the same +cache entry. + +When `EntityKeyMappings` is configured with multiple mappings, the system generates one +cache key per mapping whose arguments are all available. Mappings with missing arguments +are skipped — only the mappings where every argument resolves produce a key. This means +a root field with partial argument coverage generates fewer keys than one with full +coverage on the read path. + +On the write path, the system uses smart cache key backfill (see AC-L2-BACKFILL section) +to make precise per-key write decisions based on final entity data. Requested missing keys +are backfilled when the final entity value proves them, and additional derived keys are +written when the entity data contains the mapped key fields. + +Variable remapping (`ctx.RemapVariables`) applies to single-element argument paths only. +Multi-element paths (structured argument inputs like `["store", "id"]`) are not remapped. + +Tests: +- `v2/pkg/engine/resolve/cache_key_test.go:13` — `TestCachingRenderRootQueryCacheKeyTemplate` +- `v2/pkg/engine/resolve/cache_key_test.go` — `TestDerivedEntityCacheKey / "flat key + composite key - only composite args present"` (partial arg coverage skips flat key) +- `v2/pkg/engine/resolve/cache_key_test.go` — `TestDerivedEntityCacheKey / "flat key + nested composite key - only nested args present"` (partial with nested keys) +- `v2/pkg/engine/resolve/cache_key_test.go` — `TestDerivedEntityCacheKey / "flat key + nested composite key with structured arg - only nested resolves"` (structured arg partial) +- `v2/pkg/engine/resolve/cache_key_test.go` — `TestDerivedEntityCacheKey / "two nested composite keys with structured args - only first resolves"` (two nested, one skipped) +- `v2/pkg/engine/resolve/cache_key_test.go` — `TestDerivedEntityCacheKey / "remap variables - flat key remapped"` (RemapVariables with entity key mapping) +- `v2/pkg/engine/resolve/cache_key_test.go` — `TestDerivedEntityCacheKey / "remap variables - multiple mappings only flat keys remapped"` (remap with multi-key) +- `v2/pkg/engine/resolve/cache_key_test.go` — `TestDerivedEntityCacheKey / "remap variables - structured arg path not remapped"` (multi-element path not remapped) +- `v2/pkg/engine/resolve/cache_key_test.go` — `TestDerivedEntityCacheKey / "remap variables - partial remap with multi-key"` (partial remap across mappings) +- `execution/engine/federation_caching_test.go` — `TestRootFieldCachingWithArgs / "entity key mapping - two root fields asymmetric key coverage"` (E2E: full-key write, partial-key read cross-lookup) +- `execution/engine/federation_caching_test.go` — `TestRootFieldCachingWithArgs_PartialKeyWrite / "entity key mapping - partial key write does not generate extra keys from response"` (E2E: partial-arg write backfills derived keys from response with Peek verification) +- `execution/engine/federation_caching_test.go` — `TestRootFieldCachingWithArgs_PartialKeyWrite / "entity key mapping - flat key cross-lookup from composite key write"` (E2E: flat key cross-lookup from composite write) + +### AC-KEY-03: Subgraph header hash prefix +When `IncludeSubgraphHeaderPrefix` is enabled, the L2 cache key is prefixed with a hash +of the forwarded subgraph headers (e.g., auth tokens). Format: `{hash}:{json_key}`. This +ensures different auth contexts get separate cache entries, preventing data leakage +between tenants or users. + +Tests: +- `execution/engine/federation_caching_test.go:418` — `TestFederationCaching / "two subgraphs - with subgraph header prefix"` +- `v2/pkg/engine/resolve/cache_key_parity_test.go:17` — `TestCacheKeyParityRegression_ReadWriteInvalidation` (header prefix parity across args-derived read, entity writeback, and extension invalidation) + +### AC-KEY-04: L2CacheKeyInterceptor transform +After the header prefix is applied, the key passes through an optional user-provided +`L2CacheKeyInterceptor` function. This allows custom transformations like adding tenant +prefixes or routing to different cache namespaces. The interceptor receives the subgraph +name and cache name as context. + +Tests: +- `v2/pkg/engine/resolve/l2_cache_key_interceptor_test.go:80` — `TestL2CacheKeyInterceptor` + +### AC-KEY-05: Field argument suffix for entity fields +When an entity field has arguments (e.g., `friends(first:5)`), the _field name in the +cached entity data_ gets an `_<16-hex-digit-xxhash>` suffix computed from the sorted, +canonicalized argument values. This ensures `friends(first:5)` and `friends(first:20)` +produce different field names _within_ the cached entity and don't overwrite each other. + +Note: the suffix applies to field names in the stored JSON, not to the entity's L1 or L2 +cache key. Cache keys are always derived from `@key` fields only (see AC-KEY-01). +Both L1 and L2 use the `cacheFieldName()` function to apply these suffixes during +normalization before storage and during denormalization on read. + +Tests: +- `v2/pkg/engine/resolve/l1_cache_test.go:2502` — `TestComputeArgSuffix` (8 subtests: deterministic suffix, different values, null handling, sorted args, RemapVariables, object arg canonical JSON) + +### AC-KEY-06: Canonical JSON for deterministic hashing +Argument values are serialized as canonical JSON (object keys sorted alphabetically, +arrays in order, scalars as-is) before hashing. This guarantees the same logical arguments +always produce the same hash, regardless of the JSON key order sent by the client. + +Tests: +- `v2/pkg/engine/resolve/cache_load_test.go:1979` — `TestWriteCanonicalJSON` + +### AC-KEY-07: Global cache key prefix for schema versioning +When `CachingOptions.GlobalCacheKeyPrefix` is set, the prefix is prepended to all L2 cache +keys (both entity and root field). Format: `{prefix}:{rest_of_key}`. This allows the +router to separate cache entries by schema version — when the schema changes, a new prefix +automatically invalidates all old cache entries without explicit cache flushing. + +The global prefix is applied as the outermost prefix, before the header hash prefix. When +both are active: `{global}:{header_hash}:{json_key}`. When only global prefix: +`{global}:{json_key}`. + +The global prefix is applied consistently across all cache operations: L2 reads, L2 writes, +extension-based invalidation, mutation invalidation, and subscription populate/invalidate. + +Tests: +- `v2/pkg/engine/resolve/l2_cache_key_interceptor_test.go:504` — `TestL2CacheKeyInterceptor / "global prefix is prepended to L2 keys"` +- `v2/pkg/engine/resolve/l2_cache_key_interceptor_test.go:597` — `TestL2CacheKeyInterceptor / "global prefix combined with interceptor"` +- `v2/pkg/engine/resolve/cache_key_parity_test.go:17` — `TestCacheKeyParityRegression_ReadWriteInvalidation` (partial: query read/write and extension invalidation only; mutation/subscription paths are not exercised) + +## Partial Cache Loading + +### AC-PARTIAL-01: Default behavior (full refetch on any miss) +When `EnablePartialCacheLoad` is false (default), if any entity in a batch has a cache +miss, ALL entities in that batch are refetched from the subgraph. This keeps the cache +maximally fresh because every entity gets a new value on every batch that includes a miss. + +Tests: +- `execution/engine/partial_cache_test.go:233` — `TestPartialCacheLoading / "L2 partial cache loading disabled - all entities fetched even with partial cache hit"` + +### AC-PARTIAL-02: Partial loading fetches only missing entities +When `EnablePartialCacheLoad` is true, only entities with cache misses are included in the +subgraph fetch request. Cached entities are served directly from cache within their TTL. +The subgraph receives a smaller representations list containing only the missed entities. + +Tests: +- `execution/engine/partial_cache_test.go:85` — `TestPartialCacheLoading / "L2 partial cache loading enabled - only missing entities fetched"` + +### AC-PARTIAL-03: Freshness vs load tradeoff +Partial loading reduces subgraph load (fewer entities per request) at the cost of +potentially serving slightly stale data for the cached entities. Full refetch (default) +ensures maximum freshness but increases subgraph load. + +Tests: +- `v2/pkg/engine/resolve/l1_cache_test.go:555` — `TestL1CachePartialLoading / "partial cache loading with L2 - only missing entities fetched"` + +## Mutations and Cache Coherency + +### AC-MUT-01: Mutations never read from L2 +When the operation type is Mutation, the L2 cache is never consulted for reads. Mutations +always go to the subgraph to ensure they execute against live data. This prevents serving +stale cached data during write operations. + +Tests: +- `execution/engine/federation_caching_test.go:2165` — `TestFederationCaching_MutationSkipsL2Read` +- `v2/pkg/engine/resolve/cache_load_test.go:2225` — `TestMutationSkipsL2Read` (unit test: mutation skips L2 read and always fetches from subgraph) + +### AC-MUT-02: Mutations skip L2 writes by default +Mutation responses are not written to L2 cache by default. This is because mutation +responses often contain partial entity data that could overwrite a more complete cached +entity. + +Tests: +- `execution/engine/federation_caching_test.go:2447` — `TestFederationCaching / "mutation skips L2 write by default without EnableEntityL2CachePopulation"` + +### AC-MUT-03: Opt-in mutation L2 population +When `EnableMutationL2CachePopulation` is set to true for a specific mutation field, that +mutation's response IS written to L2. This is useful when a mutation returns a complete, +canonical entity representation that should update the cache. + +Tests: +- `execution/engine/federation_caching_l2_test.go:1115` — `TestMutationCacheInvalidationE2E` + +### AC-MUT-04: Mutation-triggered L2 invalidation +When `MutationCacheInvalidationConfiguration` is configured for a mutation, and the +mutation response contains an entity with `@key` fields, the corresponding L2 cache entry +is deleted. The cache key is constructed using the same pipeline as storage (typename + +key fields + header prefix + interceptor). Supports both single-entity responses (object) +and list responses (array) — each entity in the array is individually invalidated. + +Tests: +- `execution/engine/federation_caching_l2_test.go:1115` — `TestMutationCacheInvalidationE2E` +- `v2/pkg/engine/resolve/mutation_cache_test.go:25` — `TestNavigateProvidesDataToField` (4 subtests: valid field, missing field, nil providesData, non-Object field) +- `v2/pkg/engine/resolve/mutation_cache_test.go:84` — `TestBuildEntityKeyValue` (4 subtests: simple key, composite key, nested key, missing field) +- `v2/pkg/engine/resolve/mutation_cache_test.go:139` — `TestBuildMutationEntityCacheKey` (3 subtests: basic key, with header prefix, with interceptor) +- `v2/pkg/engine/resolve/mutation_cache_test.go:230` — `TestDetectMutationEntityImpact` (includes array response invalidation and non-object item skipping) + +### AC-MUT-05: Pre-delete cache read for analytics +When both cache invalidation and analytics are enabled, the cached value is read BEFORE +the delete operation. This allows the analytics system to compare the stale cached value +against the fresh mutation response to measure staleness. + +_Known limitation_: `LoaderCache.Delete()` returns only an error, not a success/existence +indicator. The analytics system cannot distinguish "key did not exist" from "key was +successfully deleted". This would require extending the `LoaderCache` interface. + +Tests: +- `v2/pkg/engine/resolve/mutation_cache_test.go` — `TestDetectMutationEntityImpact / "analytics enabled, no cached value records MutationEvent with HadCachedValue=false"` + +### AC-MUT-06: Staleness detection via hash comparison +Mutation impact analytics computes xxhash of both the cached entity (pre-delete) and the +fresh mutation response (both filtered to `ProvidesData` fields only). If hashes differ, +the entity is marked as stale. This measures how often mutations actually change cached +data. + +_Note_: This mechanism (xxhash of `ProvidesData`-filtered fields) is shared with +shadow mode staleness detection (AC-SHADOW-03). The trigger differs (mutation response +vs shadow mode) but the comparison logic is identical. + +Tests: +- `v2/pkg/engine/resolve/mutation_cache_test.go` — `TestDetectMutationEntityImpact / "analytics enabled, stale cached value records MutationEvent with IsStale=true"` + +### AC-MUT-07: Mutation TTL override +When `MutationFieldCacheConfiguration.TTL` is non-zero, mutation-triggered L2 cache writes +use that TTL instead of the entity's default TTL (from `EntityCacheConfiguration`). When +zero, the entity's default TTL is used. This allows `@cachePopulate(maxAge: 60)` on mutation +fields to override the entity's default cache duration. + +Tests: +- `v2/pkg/engine/resolve/mutation_cache_test.go:717` — `TestMutationCacheTTLOverride / "mutation with TTL override uses override value"` +- `v2/pkg/engine/resolve/mutation_cache_test.go:717` — `TestMutationCacheTTLOverride / "mutation without TTL override uses entity default"` +- `v2/pkg/engine/resolve/mutation_cache_test.go:717` — `TestMutationCacheTTLOverride / "TTL override not applied when mutation L2 population disabled"` + +## Extension-Based Invalidation + +### AC-EXT-01: Subgraph-driven invalidation signals +Subgraphs can include cache invalidation keys in their response extensions: +`{"extensions":{"cacheInvalidation":{"keys":[{"typename":"User","key":{"id":"1"}}]}}}`. +The engine processes these keys and deletes the corresponding L2 cache entries. + +Tests: +- `execution/engine/federation_caching_ext_invalidation_test.go:14` — `TestFederationCaching_ExtensionsInvalidation / "mutation with extensions invalidation clears L2 cache"` + +### AC-EXT-02: Key format matches storage format +Invalidation keys use the same `typename` + `key` structure as stored cache keys, ensuring +the correct entry is targeted for deletion. + +Tests: +- `execution/engine/federation_caching_ext_invalidation_test.go:90` — `TestFederationCaching_ExtensionsInvalidation / "multiple entities invalidated in single response"` +- `v2/pkg/engine/resolve/cache_key_parity_test.go:17` — `TestCacheKeyParityRegression_ReadWriteInvalidation` (extension delete key matches the entity storage key) + +### AC-EXT-03: Full key construction pipeline for deletion +The invalidation key goes through the same transformation pipeline as storage keys: +build JSON → apply header hash prefix → apply `L2CacheKeyInterceptor` → call +`cache.Delete()`. This ensures tenant-isolated keys are correctly invalidated. + +Tests: +- `execution/engine/federation_caching_ext_invalidation_test.go:214` — `TestFederationCaching_ExtensionsInvalidation / "with subgraph header prefix"` +- `v2/pkg/engine/resolve/cache_key_parity_test.go:17` — `TestCacheKeyParityRegression_ReadWriteInvalidation` (partial: covers JSON + global prefix + header prefix + delete; does not exercise `L2CacheKeyInterceptor`) + +### AC-EXT-04: Works for queries and mutations +Extension-based invalidation is not restricted to mutation responses. A query response can +also include invalidation keys (e.g., when a subgraph detects data has changed since the +last cache write). + +Tests: +- `execution/engine/federation_caching_ext_invalidation_test.go:178` — `TestFederationCaching_ExtensionsInvalidation / "query response triggers invalidation"` + +### AC-EXT-05: Skip redundant delete-before-set +If the same entity key appears in both the invalidation keys and the cache write set of +the same fetch, the delete is skipped because the entry will be overwritten with fresh +data anyway. This avoids an unnecessary cache round-trip. + +Tests: +- `v2/pkg/engine/resolve/extensions_cache_invalidation_test.go:11` — `TestExtensionsCacheInvalidation` + +### AC-EXT-06: Prerequisites for extension invalidation +Extension-based invalidation requires: (1) L2 caching enabled, (2) `entityCacheConfigs` +present for the subgraph (to determine which named cache to delete from and whether header +prefix is needed), and (3) the `caches` map populated. + +Tests: +- `execution/engine/federation_caching_ext_invalidation_test.go:121` — `TestFederationCaching_ExtensionsInvalidation / "mutation without extensions does not delete"` + +## Subscription Caching + +### AC-SUB-01: Populate mode writes entities to L2 +In populate mode, each subscription event that returns entity data writes it to the L2 +cache. This keeps the cache warm with real-time data, so subsequent queries can serve +the latest state without hitting the subgraph. + +Tests: +- `execution/engine/federation_subscription_caching_test.go:330` — `TestFederationSubscriptionCaching / "subscription entity populates L2 - verified via cache"` + +### AC-SUB-02: Invalidate mode deletes L2 entries +In invalidate mode, each subscription event triggers L2 cache deletion for the received +entity (identified by `@key` fields). This is used when the subscription delivers only +key fields (not full entity data), signaling that the cached version is stale. + +Tests: +- `execution/engine/federation_subscription_caching_test.go:714` — `TestFederationSubscriptionCaching / "key-only subscription invalidates L2 cache"` + +### AC-SUB-03: Base key pipeline for subscription cache operations +Subscription cache operations (both populate and invalidate) apply the cache key +pipeline: template rendering → global prefix → header hash prefix → `L2CacheKeyInterceptor`. +The base path (template rendering, populate, invalidate) is covered by existing tests. +Global prefix and `L2CacheKeyInterceptor` integration within subscriptions is verified +by the code path (shared with `prepareCacheKeys`) but not yet exercised by dedicated +trigger-level tests. + +Tests: +- `v2/pkg/engine/resolve/trigger_cache_test.go:51` — `TestHandleTriggerEntityCache / "populate single entity"` (verifies base key pipeline for populate) +- `v2/pkg/engine/resolve/trigger_cache_test.go:224` — `TestHandleTriggerEntityCache / "invalidate mode deletes cache entry"` (verifies base key pipeline for invalidate) + +### AC-SUB-04: Field-aware subscription config lookup +When multiple subscription fields return the same entity type, the plan visitor uses +`FindByTypeAndFieldName` to match the correct `SubscriptionEntityPopulationConfiguration`. +This prevents order-dependent config selection when subscriptions like `itemCreated` and +`itemUpdated` both produce configs for the same entity type with different TTLs. + +Tests: +- `v2/pkg/engine/plan/federation_metadata_test.go` — `TestSubscriptionEntityPopulationConfigurations / "FindByTypeAndFieldName returns field-specific config"` +- `v2/pkg/engine/plan/federation_metadata_test.go` — `TestSubscriptionEntityPopulationConfigurations / "FindByTypeAndFieldName returns nil when field not found"` + +## Shadow Mode + +### AC-SHADOW-01: Never serves cached data; always fetches from subgraph +When shadow mode is enabled for an entity type, the subgraph is always called regardless +of cache state. L2 cached data is never used in the actual response — the client always +receives fresh data from the subgraph, even on a cache hit. + +Tests: +- `v2/pkg/engine/resolve/cache_load_test.go:1324` — `TestShadowMode_L2_AlwaysFetches` + +### AC-SHADOW-02: Cache operations proceed normally +Despite not serving cached data, L2 reads and writes happen as usual. The cache stays +warm and populated. This allows measuring cache effectiveness without affecting +production traffic. + +Tests: +- `v2/pkg/engine/resolve/cache_load_test.go:1504` — `TestShadowMode_StalenessDetection` + +### AC-SHADOW-03: Staleness detection via hash comparison +After both cached and fresh values are available, they are compared using xxhash. The +comparison uses only `ProvidesData` fields (the fields the fetch actually needs). Results +are recorded as `ShadowComparisonEvent` with `IsFresh` indicating whether cached data +matched. + +_Note_: This mechanism (xxhash of `ProvidesData`-filtered fields) is shared with +mutation staleness detection (AC-MUT-06). The trigger differs (shadow mode vs mutation +response) but the comparison logic is identical. + +Tests: +- `v2/pkg/engine/resolve/cache_load_test.go:1504` — `TestShadowMode_StalenessDetection` + +### AC-SHADOW-04: Per-field hash comparison +In addition to the whole-entity comparison (AC-SHADOW-03), shadow mode records individual +xxhash values for each non-key field of the cached entity (tagged as `FieldSourceShadowCached`). +During response rendering, the same fields from fresh subgraph data are hashed (tagged as +`FieldSourceSubgraph`). By comparing per-field hashes across these two sources, consumers +can identify exactly which fields went stale, enabling field-level staleness analysis. + +Implementation: `loader_cache.go` iterates `ProvidesData` fields, computing xxhash per +field via `HashFieldValue`. The hashes appear in `CacheAnalyticsSnapshot.FieldHashes`. + +Tests: +- `execution/engine/federation_caching_analytics_test.go:679` — `TestCacheAnalyticsE2E / "shadow all entities - always fetches"` +- `v2/pkg/engine/resolve/l1_cache_test.go:2017` — `TestComputeHasAliases` (4 subtests: no aliases, direct alias, nested alias, alias in array item) + +### AC-SHADOW-05: L1 cache unaffected +Shadow mode only affects L2 behavior. L1 cache operates normally — it still caches and +serves entities within the same request, since L1 is always fresh (populated from the +current request's fetches). + +Tests: +- `v2/pkg/engine/resolve/cache_load_test.go:1687` — `TestShadowMode_L1_WorksNormally` + +## Thread Safety + +### AC-THREAD-01: Entity L1 and coordinate L1 on main thread +Both L1 structures on the Loader are plain `map[string]*astjson.Value` accessed only on +the resolver's main thread — there is no cross-goroutine sharing and therefore no locking: + +- Entity L1 (`l1Cache`) is read via `tryL1CacheLoad` in Phase 1 and written via + `populateL1Cache` / `populateL1CacheForRootFieldEntities` in Phase 4 of + `resolveParallel`, all on the main thread. +- Coordinate L1 (`requestScopedL1`) is read/written by `tryRequestScopedInjection` and + `exportRequestScopedFields`, called from Phase 1.5, Phase 3.5 and `resolveSingle`, + also main-thread only. + +Neither map is a `sync.Map`; parallel fetches never touch L1 directly — they complete, +merge on the main thread, and L1 updates happen synchronously during that merge. + +Tests: +- `v2/pkg/engine/resolve/l1_cache_test.go:24` — `TestL1Cache / "L1 hit - same entity fetched twice in same request"` + +### AC-THREAD-02: L2 implementations must be goroutine-safe +L2 `LoaderCache.Set()` and `Delete()` (write-side operations) are called from the main +thread during Phase 4 of `resolveParallel` and may overlap across concurrent router +requests. L2 `LoaderCache.Get()` is issued once per cache instance on the main thread +from `bulkL2Lookup` (Phase 2L2), so a single router request never concurrently reads +from the same cache instance — but concurrent router requests can, so `Get` still must +be goroutine-safe. Net requirement: implementers must ensure thread-safe access (e.g., +connection pooling for Redis). + +Tests: +- `execution/engine/federation_caching_test.go:1435` — `TestFederationCaching / "concurrency with different IDs"` + +### AC-THREAD-03: Per-result analytics accumulation for write-side events +L2 read events (L2 key events, `L2 Get` fetch timings, cache Get errors) are accumulated +by `bulkL2Lookup` on the main thread in Phase 2L2 and folded directly into the collector. +Write-side and HTTP events — per-fetch `l2AnalyticsEvents`, `l2FetchTimings` for the HTTP +round trip, `l2ErrorEvents`, `l2CacheOpErrors`, and `l2EntitySources` — are accumulated +on the per-result slice either inside the Phase 2HTTP goroutine or during Phase 4 merge. +After `g.Wait()`, the main thread merges the per-result slices into the single analytics +collector via `MergeL2Events` / `MergeL2FetchTimings` / `MergeL2Errors` / +`MergeL2CacheOpErrors` / `MergeEntitySources`. + +Tests: +- `v2/pkg/engine/resolve/cache_analytics_test.go:65` — `TestCacheAnalyticsCollector_MergeL2Events` + +### AC-THREAD-04: Main-thread parsing on `l.jsonArena` via reusable `l.parser` +The JSON arena (`jsonArena`) uses a `MonotonicArena` which is NOT thread-safe, so all +astjson allocation happens on the main thread. `bulkL2Lookup` parses every L2 cache +entry onto `l.jsonArena` via the Loader-owned `l.parser` (an `astjson.Parser` whose +scratch slabs amortize across requests), and Phase 4 parses every subgraph HTTP response +onto the same arena. Phase 2HTTP goroutines only return a `[]byte` body and never touch +the arena, so there is no goroutine-arena pool, no cross-arena references in the +response tree, and no lifetime coupling between goroutines and response rendering. + +The root-field L1 promotion path and entity L1 writes both run `StructuralCopy` (or +`StructuralCopyWithTransform` when aliases need rewriting) onto `l.jsonArena` before +storing in `l1Cache`. Container nodes are cloned onto the Loader's arena; leaf values +are aliased from the source — safe because all participants share the same arena +lifetime within a request. This closes the previous "cross-arena reference" hazard at +the storage site rather than at the goroutine boundary. + +Tests: +- `v2/pkg/engine/resolve/arena_thread_safety_gc_test.go:21` — `TestCrossArenaMergeValuesCreatesShallowReferences` (documents the shallow merge semantics that motivate the always-StructuralCopy rule) +- `v2/pkg/engine/resolve/arena_thread_safety_gc_test.go:83` — `TestGoroutineArenaLifetimeWithDeferredRelease` +- `v2/pkg/engine/resolve/arena_thread_safety_gc_test.go:137` — `Benchmark_CrossArenaGCSafety` +- `v2/pkg/engine/resolve/arena_thread_safety_bench_test.go:40` — `BenchmarkConcurrentArena` +- `v2/pkg/engine/resolve/arena_thread_safety_bench_test.go:61` — `BenchmarkPerGoroutineArena` +- `v2/pkg/engine/resolve/loader_arena_gc_test.go:102` — `Benchmark_ArenaGCSafety` +- `v2/pkg/engine/resolve/loader_arena_gc_test.go` — `TestLoaderArenaGC` family (verifies main-thread parsing on `l.jsonArena` preserves arena invariants) + +## Error Handling + +### AC-ERR-01: Cache errors are non-fatal +All cache operations (`Get`, `Set`, `Delete`) are non-fatal. A cache failure never causes +the GraphQL request to fail — the engine falls back to fetching from the subgraph. +When analytics is enabled, cache operation errors are recorded as `CacheOperationError` +events (see AC-ANA-06) so that infrastructure issues are visible to operators. + +Tests: +- `execution/engine/federation_caching_l2_test.go:788` — `TestCacheNotPopulatedOnErrors` +- `v2/pkg/engine/resolve/cache_load_test.go:2077` — `TestL2CacheErrorResilience` (Get error falls through to fetch, Set error still returns correct response) + +### AC-ERR-02: Subgraph errors prevent cache population +When a subgraph returns an error response, the result is NOT written to L2 cache. This +prevents caching error responses that would be served to subsequent requests. + +Tests: +- `execution/engine/federation_caching_l2_test.go:788` — `TestCacheNotPopulatedOnErrors` + +### AC-ERR-03: Graceful degradation on validation failure +When L2 returns a cached entity that fails `ProvidesData` validation (missing required +fields), the system gracefully refetches from the subgraph rather than serving incomplete +data. The old cached entity is preserved for field merging (AC-L2-08). + +Tests: +- `execution/engine/federation_caching_l2_test.go:504` — `TestPartialEntityCaching / "only configured entities are cached"` + +## L2 Circuit Breaker + +### AC-CB-01: Configurable per-cache circuit breaker +Each named L2 cache can have a circuit breaker via `ResolverOptions.CacheCircuitBreakers`. +The breaker wraps the `LoaderCache` interface transparently — callers (loader, resolver) +don't need any changes. + +Configuration: +- `FailureThreshold`: consecutive failures to trip open (default: 5) +- `CooldownPeriod`: duration in open state before half-open probe (default: 10s) + +Tests: +- `v2/pkg/engine/resolve/circuit_breaker_test.go:44` — `TestCircuitBreaker` (7 subtests: stays closed below threshold, opens after N failures, open skips cache, half-open probe success/failure, concurrent safety, success resets count) + +### AC-CB-02: Three-state lifecycle +The circuit breaker follows the standard Closed → Open → Half-Open pattern: +- **Closed**: all operations pass through to the underlying cache +- **Open**: `Get` returns `(nil, nil)` (all-miss), `Set`/`Delete` return `nil` (no-op) +- **Half-Open**: after `CooldownPeriod`, the next operation is allowed through as a probe; + success closes the breaker, failure re-opens it + +Tests: +- `v2/pkg/engine/resolve/circuit_breaker_test.go:44` — covers all three states and transitions + +### AC-CB-03: Non-blocking failure isolation +When open, the breaker returns immediately without contacting the cache backend. This +prevents cascading failures when the cache is down (e.g., Redis timeout) from affecting +GraphQL request latency. The engine falls back to subgraph fetches transparently. + +## Analytics + +### AC-ANA-01: Event-level tracking +Every L1 and L2 read/write operation records a structured event containing: cache level +(L1/L2), entity type, cache key, data source name, byte size, and TTL. Events are +collected per-request in the `CacheAnalyticsCollector`. + +Tests: +- `execution/engine/federation_caching_analytics_test.go:106` — `TestCacheAnalyticsE2E / "L2 miss then hit with analytics"` + +### AC-ANA-02: Fetch timing instrumentation +Each subgraph HTTP call records: request duration, HTTP status code, time-to-first-byte, +and response body size. These timings are available in the snapshot for correlating cache +performance with fetch latency. + +Tests: +- `execution/engine/federation_caching_analytics_test.go:505` — `TestCacheAnalyticsE2E / "subgraph fetch records HTTPStatusCode and ResponseBytes"` + +### AC-ANA-03: Aggregate convenience methods +The `CacheAnalyticsSnapshot` provides pre-computed metrics: `L1HitRate()`, `L2HitRate()`, +`CachedBytesServed()`, `L1HitCount()`, `L2HitCount()`, `AvgCacheAgeMs()`, etc. These are +derived from the raw events at snapshot time. + +Tests: +- `v2/pkg/engine/resolve/cache_analytics_test.go:239` — `TestCacheAnalyticsCollector_SnapshotDerivedMetrics` + +### AC-ANA-04: Event deduplication in snapshots +When `Snapshot()` is called, duplicate events (same CacheKey + Kind combination) are +removed to prevent double-counting from retry or re-merge scenarios. + +Tests: +- `v2/pkg/engine/resolve/cache_analytics_test.go:1679` — `TestSnapshotDeduplication` + +### AC-ANA-05: Header impact analytics +When `IncludeSubgraphHeaderPrefix` is active, the system records `HeaderImpactEvent`s +containing the base key (without header hash) and the response hash. By comparing response +hashes across different header hash values, consumers can detect whether the header prefix +is actually necessary — if all responses are identical regardless of headers, the prefix +adds cache fragmentation without benefit. + +Tests: +- `execution/engine/federation_caching_analytics_test.go:1791` — `TestCacheAnalyticsE2E / "shadow mode with header prefix - same response different headers"` +- `v2/pkg/engine/resolve/mutation_cache_test.go` — `TestBuildMutationEntityDisplayKey` (display key always without prefix) + +### AC-ANA-06: Cache operation error tracking +When analytics is enabled, L2 cache operation errors (`Get`, `Set`, `Delete`) are recorded +as `CacheOperationError` events in the analytics snapshot. Each event contains the operation +type, cache name, entity type, data source, error message (truncated to 256 chars), and +the number of keys involved. This allows operators to detect cache infrastructure issues +(e.g., Redis timeouts, connection failures) without requiring a logger on the Loader. + +Tests: +- `v2/pkg/engine/resolve/mutation_cache_test.go` — `TestDetectMutationEntityImpact / "array response invalidates all entities in the list"` + +### AC-ANA-07: Cache write event source tracking +Each `CacheWriteEvent` carries a `Source` field (`CacheOperationSource`) indicating what +triggered the write: `"query"`, `"mutation"`, or `"subscription"`. This enables the metrics +exporter to label cache operations by trigger source for dashboard attribution. Subscription +cache writes are reported via `OnSubscriptionCacheWrite` callback since subscriptions run +outside per-request analytics. + +### AC-ANA-08: Cache write reason tracking +Each `CacheWriteEvent` carries a `WriteReason` field (`CacheWriteReason`) indicating why +the write occurred. For root field `EntityKeyMappings` writes, the reason is one of: +- `"refresh"` — existing cached key rewritten with fresh or merged data +- `"backfill"` — missing requested key proven by final entity data +- `"derived"` — new key derived from entity data that was not in the original request + +For entity fetches and non-EntityKeyMappings root field writes, the reason is empty. +The reason is set on `CacheEntry.WriteReason` during `cacheKeysToExactRootFieldEntityEntries` +and propagated to `CacheWriteEvent.WriteReason` when `RecordWrite` is called with the event. + +Tests: +- `v2/pkg/engine/resolve/cache_load_test.go:2397` — `TestCacheBackfill_SkipFetch_HappyPath` (backfill reason on emailKey write) +- `v2/pkg/engine/resolve/cache_load_test.go:2498` — `TestCacheBackfill_FetchPath_HappyPath` (refresh on idKey, backfill on emailKey) +- `v2/pkg/engine/resolve/cache_load_test.go:2608` — `TestCacheBackfill_FetchPath_ValueMismatch` (refresh on idKey, derived on actualEmailKey) +- `v2/pkg/engine/resolve/cache_load_test.go:2663` — `TestCacheBackfill_DerivedKeyExpansion` (refresh + backfill + derived across three keys) + +Tests: +- `v2/pkg/engine/resolve/cache_analytics_test.go` — `TestCacheAnalyticsCollector_WriteEventSource / "write events preserve source field"` +- `v2/pkg/engine/resolve/cache_analytics_test.go` — `TestCacheAnalyticsCollector_WriteEventSource / "mutation event preserves source field"` +- `v2/pkg/engine/resolve/cache_analytics_test.go` — `TestCacheAnalyticsCollector_WriteEventSource / "mixed sources in single snapshot"` + +## Cache Trace in Response Extensions + +### AC-TRACE-01: Per-fetch cache trace in extensions.trace +When tracing is enabled (`TraceOptions.Enable = true`) and `ExcludeCacheStats` is false +(default), each fetch in `extensions.trace.fetches` includes a `cache_trace` object with +L1/L2 hit/miss counts, L2 Get/Set timing, cache name, TTL, and configuration flags. + +Tests: +- `execution/engine/federation_caching_trace_test.go` — `TestFederationCaching_CacheTraceInExtensions / "L2 miss then hit shows cache_trace in extensions.trace"` +- `v2/pkg/engine/resolve/cache_trace_test.go` — `TestCacheTrace_JSON` (3 subtests: full serialization, omitempty, shadow mode) + +### AC-TRACE-02: Zero overhead when disabled +When `TraceOptions.Enable` is false or `ExcludeCacheStats` is true, no cache trace data +is collected: no `time.Now()` calls, no counting, no allocations. The `tracingCache` guard +(`l.ctx.TracingOptions.Enable && !l.ctx.TracingOptions.ExcludeCacheStats`) short-circuits +all instrumentation. + +Tests: +- `v2/pkg/engine/resolve/cache_trace_test.go` — `TestBuildCacheTrace / "returns nil when tracing disabled"` +- `v2/pkg/engine/resolve/cache_trace_test.go` — `TestBuildCacheTrace / "returns nil when ExcludeCacheStats true"` + +### AC-TRACE-03: Cache-hit fetches still produce trace +When L1 or L2 provides a complete hit, `load*Fetch` is never called (so `fetch.Trace` is +not normally allocated). The `ensureFetchTrace` helper allocates `DataSourceLoadTrace` on +the cache-hit path so that `CacheTrace` can still be attached. + +Tests: +- `v2/pkg/engine/resolve/cache_trace_test.go` — `TestBuildCacheTrace / "full L1 hit"` (verifies CacheTrace built even when cacheSkipFetch=true) + +### AC-TRACE-04: Trace attached after final cache state +`CacheTrace` is built AFTER `mergeResult` + `populateCachesAfterFetch` complete, ensuring +L2 write timing, negative cache hits, and shadow comparison results are all captured. +Attachment happens in `resolveSingle` (after `callOnFinished`) and `resolveParallel` +Phase 4 (after merge loop). + +Tests: +- `execution/engine/federation_caching_trace_test.go` — `TestFederationCaching_CacheTraceInExtensions` (verifies L2 Set timing present on miss, absent on hit) + +### AC-TRACE-05: Predictable debug timings +When `EnablePredictableDebugTimings` is true, all L2 timing values in `CacheTrace` are +normalized to `1ns` for deterministic test assertions. + +Tests: +- `v2/pkg/engine/resolve/cache_trace_test.go` — `TestBuildCacheTrace / "predictable debug timings"` + +## Batch Entity Key Mode (Root Field with List Arguments) + +### AC-BATCH-01: Per-element cache key construction +When `ArgumentIsEntityKey: true` is set on a `FieldMapping` and the root field argument +is a list (e.g., `ids: ["1","2","3"]`), +the engine constructs one cache key per list element using entity key format. +Each key is identical to what an `_entities` fetch would produce for the same entity, +enabling cache sharing between root fields and entity resolution. + +Tests: +- `v2/pkg/engine/resolve/cache_key_test.go:2175` — `TestRenderCacheKeys_BatchEntityKey` (batch key format, single and multi-element lists) +- `v2/pkg/engine/resolve/cache_key_test.go:2273` — `TestRenderCacheKeys_BatchEntityKey / "batch key format matches scalar key format"` (scalar and batch produce identical keys for the same ID) + +### AC-BATCH-02: Positional correspondence via BatchIndex +Each cache key records its position in the original list argument via `CacheKey.BatchIndex`. +This is used during response reassembly to place cached and fresh entities in the correct +output positions. +For non-batch cache keys, `BatchIndex` is unused (default 0). + +Tests: +- `v2/pkg/engine/resolve/cache_key_test.go:2175` — `TestRenderCacheKeys_BatchEntityKey` (verifies BatchIndex 0, 1, 2 for three-element list) + +### AC-BATCH-03: Empty list short-circuit +When the list argument is `[]` or `null`, +the engine returns an empty response (`[]`) immediately without calling the resolver +or the cache. +This avoids unnecessary subgraph calls and cache operations for trivially empty queries. + +Tests: +- `v2/pkg/engine/resolve/loader_skip_fetch_test.go:889` — `TestLoader_BatchEntityKeyEmptyListShortCircuit` +- `execution/engine/federation_caching_batch_test.go:330` — `TestBatchEntityCacheLookup_FullFetch_EmptyList` + +### AC-BATCH-04: Full fetch mode (all-or-nothing) +When `PartialBatchLoad` is false (default), +any cache miss in a batch causes the full list argument to be sent to the subgraph. +All returned entities are cached individually with their entity keys. + +Tests: +- `execution/engine/federation_caching_batch_test.go:60` — `TestBatchEntityCacheLookup_FullFetch_AllMiss` (no cache entries, full list fetched) +- `execution/engine/federation_caching_batch_test.go:141` — `TestBatchEntityCacheLookup_FullFetch_AllHit` (all cached, no subgraph call) +- `execution/engine/federation_caching_batch_test.go:237` — `TestBatchEntityCacheLookup_FullFetch_PartialMiss_FetchesAll` (partial hit, full list refetched) +- `execution/engine/federation_caching_batch_test.go:499` — `TestBatchEntityCacheLookup_FullFetch_SingleElement` (single-element list behaves like scalar) + +### AC-BATCH-05: Partial fetch mode (fetch only missing) +When `PartialBatchLoad` is true, +only IDs with cache misses are sent to the subgraph. +The input list variable is filtered to exclude IDs that were cache hits. +Cached entities are merged with fresh results in the correct positional order. + +Tests: +- `execution/engine/federation_caching_batch_test.go:579` — `TestBatchEntityCacheLookup_PartialFetch_SomeCached` (some hit, only missing IDs fetched) +- `execution/engine/federation_caching_batch_test.go:676` — `TestBatchEntityCacheLookup_PartialFetch_AllHit` (all cached, no subgraph call) +- `execution/engine/federation_caching_batch_test.go:769` — `TestBatchEntityCacheLookup_PartialFetch_AllMiss` (none cached, full list fetched) +- `execution/engine/federation_caching_batch_test.go:848` — `TestBatchEntityCacheLookup_PartialFetch_OrderPreservation` (response order matches input list order) + +### AC-BATCH-06: Cache sharing between scalar and batch root fields +Batch entity keys use the same format as scalar `EntityKeyMappings`. +A scalar root field `product(id: "1")` and a batch root field `products(ids: ["1","2"])` +both produce `{"__typename":"Product","key":{"id":"1"}}` for ID `"1"`, +so they share the same L2 cache entry. + +Tests: +- `execution/engine/federation_caching_batch_test.go:390` — `TestBatchEntityCacheLookup_CacheKeySharing_ScalarAndBatch` (scalar write, batch read hits same cache entry) +- `v2/pkg/engine/resolve/cache_key_test.go:2273` — `TestRenderCacheKeys_BatchEntityKey / "batch key format matches scalar key format"` + +### AC-BATCH-07: Constructor precomputes batch metadata +`NewRootQueryCacheKeyTemplate` precomputes batch entity key information +(argument path, entity type, merge path) via `precomputeDerivedFields()`. +The precomputed values are exposed via `BatchEntityKeyArgumentPath()` and +`EntityMergePath()` on the `CacheKeyTemplate` interface. + +Tests: +- `v2/pkg/engine/resolve/cache_key_test.go:2395` — `TestRenderCacheKeys_BatchEntityKey / "constructor precomputes batch entity key metadata"` + +## TypeName Fallback + +### AC-TYPENAME-01: Plan-time TypeName used when __typename missing +When `__typename` is missing from the response data, +the plan-time `TypeName` field on `EntityQueryCacheKeyTemplate` is used as fallback +for the cache key's `__typename` value. +This ensures cache keys always reflect the correct entity type +rather than falling back to a hardcoded default. + +Tests: +- `v2/pkg/engine/resolve/cache_key_test.go:632` — `TestCachingRenderEntityQueryCacheKeyTemplate` (TypeName field set on template) + +## Smart Cache Key Backfill (L2, Root Field EntityKeyMappings) + +### AC-L2-BACKFILL-01: Requested missing key backfilled from cached sibling +When a root field with `EntityKeyMappings` produces multiple L2 keys on read, +and one key hits while another misses, +the missing key is backfilled during writeback if the final entity value proves +the mapped key field. +The existing key that already had a cache hit is not rewritten unless +`fromCacheNeedsWriteback` is true. + +Tests: +- `v2/pkg/engine/resolve/cache_load_test.go:2397` — `TestCacheBackfill_SkipFetch_HappyPath` (idKey hits, emailKey misses, cached value contains email → emailKey backfilled, idKey not rewritten) + +### AC-L2-BACKFILL-02: Backfill requires entity-field proof +A requested missing key is NOT backfilled when the final entity value does not contain +the mapped key field, +even if the original request arguments were sufficient to construct that key on the read path. +This prevents creating unvalidated cache associations from request arguments alone. + +Tests: +- `v2/pkg/engine/resolve/cache_load_test.go:2448` — `TestCacheBackfill_SkipFetch_Counterexample_NotDerivable` (cached value lacks email field → zero L2 writes) + +### AC-L2-BACKFILL-03: Value mismatch writes the actual key, not the requested key +When the final entity value contains a mapped key field with a different value than the +requested key (e.g., request asked for `email:"a@example.com"` but subgraph returned +`email:"b@example.com"`), the requested key is NOT written, but the actual key derived +from entity data IS written. +The subgraph returned this value as backend-proven data, so it is valid to cache under +the actual key. + +Tests: +- `v2/pkg/engine/resolve/cache_load_test.go:2608` — `TestCacheBackfill_FetchPath_ValueMismatch` (requested `a@` not written, actual `b@` written as derived key) + +### AC-L2-BACKFILL-04: Fetch-path refresh plus backfill +After a partial cache hit forces a subgraph fetch, +the existing key is refreshed with fresh data and the missing requested key is backfilled +when the final entity value proves it. + +Tests: +- `v2/pkg/engine/resolve/cache_load_test.go:2498` — `TestCacheBackfill_FetchPath_HappyPath` (idKey refreshed, emailKey backfilled — two writes) +- `v2/pkg/engine/resolve/cache_load_test.go:2553` — `TestCacheBackfill_FetchPath_MissingField` (subgraph returns no email → only idKey refreshed — one write) + +### AC-L2-BACKFILL-05: Derived key expansion from final entity data +Beyond refreshing existing keys and backfilling requested missing keys, +the write path also writes additional keys when final backend-proven entity data makes +those keys derivable via `EntityKeyMappings`, +even if those keys were not part of the original read request. +This is the mechanism that enables cross-lookup: +a query with `id` argument populates the `username` key too, +so a later query with `username` argument can hit L2. + +Tests: +- `v2/pkg/engine/resolve/cache_load_test.go:2663` — `TestCacheBackfill_DerivedKeyExpansion` (three mappings: id+email requested, username derived — three writes) +- `execution/engine/federation_caching_test.go:2300` — `TestRootFieldCachingWithArgs_PartialKeyWrite / "entity key mapping - partial key write does not generate extra keys from response"` (E2E: id requested, username derived from response) + +### AC-L2-BACKFILL-06: No double-accounting between regular and derived writes +The regular write path and derived-key expansion use a single `seen` map to prevent +the same key from being written twice. +A key that is already included in the regular write set is not re-added by the +derived-key path. + +Tests: +- `v2/pkg/engine/resolve/cache_load_test.go:2498` — `TestCacheBackfill_FetchPath_HappyPath` (idKey appears in both regular and derived paths, written exactly once) + +### AC-L2-BACKFILL-07: Reproducibility checked by rendering, not by guessing +Write eligibility is determined by rendering keys from final entity data using +`renderDerivedEntityKeyFromValue` (the same renderer used by `renderDerivedEntityKey` for +request-arg-based keys). +This uses the same L2 prefix and interceptor logic as normal cache-key generation. +When a rendered key matches a requested missing key, it is a backfill. +When it doesn't match any requested key, it is a derived expansion. +In both cases, the rendered key string is the cache key — never the requested key. + +Tests: +- `v2/pkg/engine/resolve/cache_load_test.go:2608` — `TestCacheBackfill_FetchPath_ValueMismatch` (rendered key `b@` differs from requested `a@` → `b@` written as derived, `a@` not written) + +## @requestScoped Coordinate L1 Cache + +The coordinate L1 cache is a per-request plain `map[string]*astjson.Value` on the Loader +(`requestScopedL1`), main-thread only, separate from the entity L1 cache. +It stores field values keyed by subgraph-qualified strings (e.g., `"viewer.currentViewer"`). + +### Directive + +```graphql +directive @requestScoped(key: String!) on FIELD_DEFINITION +``` + +**Symmetric semantics**: every field annotated with `@requestScoped(key: "X")` in the +same subgraph shares the same L1 entry `{subgraphName}.X`. There is no +receiver/provider distinction. Every participating field is simultaneously: + +- A **reader** — the planner emits a hint so the resolver can inject from L1 and + potentially skip the subgraph fetch +- A **writer** — the planner emits an export so the resolver stores the value in L1 + after the fetch + +The first field to resolve populates L1; subsequent fields with the same key inject +from L1 (subject to widening checks and alias-aware normalization). + +**Composition validation**: +- `key` is mandatory +- When a key is declared on only one field in the subgraph, a warning is emitted — + `@requestScoped` is meaningless unless ≥ 2 fields share the same key + +### AC-RS-01: L1 storage uses schema-normalized values via the `ProvidesData` pipeline + +The coordinate L1 cache uses the same `astjson.Transform` pipeline as entity L1 and L2 +caches. Per-field normalize/denormalize Transforms are built from the +`RequestScopedField.ProvidesData` `*Object` tree. Writes run `structuralCopyNormalized` +(which delegates to `StructuralCopyWithTransform`) onto `l.jsonArena` to strip aliases. +Reads run `structuralCopyDenormalized` back onto `l.jsonArena` to re-apply aliases for +the current query's selection set. The planner populates `ProvidesData` in +`populateRequestScopedFieldsProvidesData` in `visitor.go`. + +Values in L1 are stored under schema field names (aliases normalized away on write), +and re-aliased on read per the current query's selection set. + +Tests: +- `v2/pkg/engine/plan/request_scoped_provides_data_test.go` — `TestPopulateRequestScopedFieldsProvidesData` +- `v2/pkg/engine/resolve/request_scoped_test.go` — `TestRequestScopedProvidesDataShapes` (nested aliases, array of aliased items, arg-variant sub-fields, mixed depths, __typename, nullable) + +### AC-RS-02: Export on fetch completion, inject before fetch + +Every `@requestScoped` field participates in both: +- **Export** (after fetch): the field's value is read from the response, normalized + via `ProvidesData`, and stored in L1 under its `L1Key` +- **Inject** (before fetch): the resolver checks L1 under the `L1Key`; if found and + the cached value satisfies the widening check, the value is denormalized (aliases + re-applied), injected onto items, and the fetch is skipped + +Tests: +- `v2/pkg/engine/resolve/request_scoped_test.go` — `TestExportRequestScopedFields`, `TestTryRequestScopedInjection`, `TestRequestScopedRoundTrip` + +### AC-RS-03: Field widening check prevents partial injection + +When the coordinate L1 has a cached value but it lacks fields required by the current +query's selection set (e.g., L1 has `{id, name}` but the current fetch needs +`{id, name, email}`), injection is blocked and the fetch proceeds normally. + +The check uses `validateItemHasRequiredData` against `hint.ProvidesData` — the same +validator used by entity L1 and L2. + +Tests: +- `v2/pkg/engine/resolve/request_scoped_test.go` — `TestTryRequestScopedInjection / "field widening blocks injection when cached value missing required fields"` + +### AC-RS-04: @interfaceObject type mapping + +When `@requestScoped` is declared on a field of an `@interfaceObject` type (e.g., +`Personalized.currentViewer`), the planner resolves the concrete entity type +(e.g., `Article`) to the interface type via `InterfaceObjects` and finds the +`@requestScoped` fields on the interface. This enables injection on entity batches +for concrete types even when the directive is declared on the interface. + +### AC-RS-05: Collect-then-inject atomicity + +When multiple hints exist on the same fetch, the injection is atomic: either ALL hints +are satisfied (and items are mutated with all injected values) or NONE are (items are +left untouched). The collect-then-inject pattern prevents partial mutations from +corrupting items when a later hint fails. + +Tests: +- `v2/pkg/engine/resolve/request_scoped_test.go` — `TestTryRequestScopedInjection / "partial hints returns false but does not mutate items"`, `TestRequestScopedRoundTrip / "multiple hints one blocked by field widening other cached"` + +### AC-RS-06: Trace reporting — L1 hit counters and LoadSkipped + +When `tryRequestScopedInjection` returns true and the fetch is skipped: +- `ensureFetchTrace(f).LoadSkipped = true` is set so the ART trace reports the fetch as skipped +- `res.cacheTraceRequestScopedHits = res.cacheTraceEntityCount` is set so `buildCacheTrace` + folds these into the `L1Hit` counter (subtracting from `L1Miss`). The playground renders + the red L1 hit badge accordingly. + +### AC-RS-07: Arena detach on export via StructuralCopy onto `l.jsonArena` + +`exportRequestScopedFields` must store a value that is independent of any source +arena. It does this by StructuralCopying onto `l.jsonArena` before storing: +- With `ProvidesData.HasAliases == true`, `StructuralCopyWithTransform` copies + via the per-field normalize Transform, stripping aliases and arg suffixes while + producing a fresh value owned by `l.jsonArena`. +- With `HasAliases == false`, `StructuralCopy` copies verbatim onto `l.jsonArena`. + +Merging an incoming export into an existing `requestScopedL1` entry uses the +working-copy-and-swap pattern: StructuralCopy the existing entry into a working +copy, run `astjson.MergeValues` against the working copy, and store the working +copy only on success. On merge failure the existing live entry is preserved +unchanged, so a partial `MergeValues` failure cannot corrupt sibling L1 keys. + +Without this, if the source value pointed into a goroutine arena or response tree +that gets freed or mutated, subsequent reads would panic or resurrect stale data. + +Tests: +- `v2/pkg/engine/resolve/request_scoped_test.go` — `TestExportedValuesAreIndependentCopies` +- `v2/pkg/engine/resolve/loader_cache_phase2_test.go:147` — `TestExportRequestScopedFields_MergeWorkingCopyOnFailure` (working-copy-and-swap isolates merge failure from live cache entry) + +### AC-RS-08: L1 gating + +`tryRequestScopedInjection` and `exportRequestScopedFields` must check +`l.ctx.ExecutionOptions.Caching.EnableL1Cache`. Per-request headers like +`X-WG-Disable-Entity-Cache-L1` disable L1 for the request and must also disable +the coordinate L1 since it's part of the L1 layer. + +## Future Improvements + +The following features are not yet implemented but are planned or under consideration: + +- **Stale-While-Revalidate (SWR)**: Serve stale cached data immediately while revalidating + asynchronously in the background. Would reduce tail latency for cache-miss scenarios + by serving slightly stale data rather than waiting for the subgraph. + +- **Tag-based invalidation**: Associate cache entries with tags (e.g., `team:123`) and + invalidate all entries with a given tag in a single operation. Would simplify bulk + invalidation for related entities. + +- **Cache entry compression**: Compress cached entity data (e.g., with zstd or gzip) to + reduce memory and network usage for large entities in external cache stores. diff --git a/docs/entity-caching/ENTITY_CACHING_INTEGRATION.md b/docs/entity-caching/ENTITY_CACHING_INTEGRATION.md new file mode 100644 index 0000000000..a765f8a632 --- /dev/null +++ b/docs/entity-caching/ENTITY_CACHING_INTEGRATION.md @@ -0,0 +1,999 @@ +# Entity Caching Integration Guide + +This guide covers everything needed to integrate the entity caching system into a GraphQL Federation router. After reading this, you should be able to fully configure L1/L2 caching, implement a cache backend, set up invalidation, and collect analytics. + +## Overview + +The caching system has two levels: + +| Level | Storage | Scope | Applies To | Default | +|-------|---------|-------|-----------|---------| +| **L1** | In-memory plain `map` per request, main-thread only | Single request | Entity fetches only | Disabled | +| **L2** | External cache (Redis, etc.) | Cross-request with TTL | Entity + root field fetches | Disabled | + +Both levels are opt-in and disabled by default. L1 prevents redundant fetches for the same entity within a single request. L2 shares entity data across requests. + +**Key principle**: Cache keys use only `@key` fields for stable entity identity (never `@requires`). + +## 1. Implement the LoaderCache Interface + +To use L2 caching, implement the `LoaderCache` interface from `v2/pkg/engine/resolve`: + +```go +import "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" + +type LoaderCache interface { + // Get retrieves cache entries by keys. + // Returns a slice of the same length as keys. Use nil for cache misses. + // Called from goroutines during parallel resolution — must be thread-safe. + Get(ctx context.Context, keys []string) ([]*resolve.CacheEntry, error) + + // Set stores cache entries with a TTL. + // Called from goroutines during parallel resolution — must be thread-safe. + Set(ctx context.Context, entries []*resolve.CacheEntry, ttl time.Duration) error + + // Delete removes cache entries by keys. + // Called during cache invalidation (extension-based, mutation-based). + Delete(ctx context.Context, keys []string) error +} + +type CacheEntry struct { + Key string // Cache key string (JSON format) + Value []byte // Opaque cached payload bytes (e.g., entity JSON or root-field response bytes); callers interpret + RemainingTTL time.Duration // Remaining TTL from cache (0 = unknown/not supported) + WriteReason CacheWriteReason // Why this entry was written (set by the engine, not by backends) +} +``` + +**Thread safety requirement**: `Get`, `Set`, and `Delete` may be called from multiple goroutines during parallel fetch execution. Your implementation must be safe for concurrent use. + +**RemainingTTL**: If your cache backend supports it, return the remaining TTL in `CacheEntry.RemainingTTL`. This is used for cache analytics (cache age tracking) and shadow mode staleness detection. Return 0 if not supported. + +## 2. Configure Per-Subgraph Caching + +### SubgraphCachingConfig + +Each subgraph can have independent caching configuration. Pass these via the factory option: + +```go +import ( + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" +) + +subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", // Must match SubgraphConfiguration.Name + EntityCaching: plan.EntityCacheConfigurations{...}, + RootFieldCaching: plan.RootFieldCacheConfigurations{...}, + MutationFieldCaching: plan.MutationFieldCacheConfigurations{...}, + MutationCacheInvalidation: plan.MutationCacheInvalidationConfigurations{...}, + SubscriptionEntityPopulation: plan.SubscriptionEntityPopulationConfigurations{...}, + }, +} + +factory := engine.NewFederationEngineConfigFactory( + ctx, + subgraphsConfigs, + engine.WithSubgraphEntityCachingConfigs(subgraphCachingConfigs), +) +config, err := factory.BuildEngineConfiguration() +``` + +### Entity Cache Configuration + +Controls L2 caching for entity types resolved via `_entities` queries: + +```go +plan.EntityCacheConfiguration{ + // TypeName is the entity type to cache (must match __typename from subgraph). + TypeName: "User", + + // CacheName identifies which LoaderCache instance to use. + // Multiple entity types can share a cache by using the same name. + CacheName: "default", + + // TTL specifies how long cached entities remain valid. + // Zero TTL means entries never expire (not recommended for production). + TTL: 60 * time.Second, + + // IncludeSubgraphHeaderPrefix controls whether forwarded headers affect cache keys. + // When true, cache keys include a hash of headers sent to the subgraph, + // ensuring different header configurations (e.g., different auth tokens) + // use separate cache entries. + IncludeSubgraphHeaderPrefix: true, + + // EnablePartialCacheLoad enables fetching only cache-missed entities. + // Default (false): any miss in a batch refetches ALL entities. + // When true: only missing entities are fetched, cached ones served directly. + EnablePartialCacheLoad: false, + + // HashAnalyticsKeys controls whether entity keys are hashed or stored raw + // in cache analytics. When true, KeyHash is populated instead of KeyRaw. + HashAnalyticsKeys: false, + + // ShadowMode enables shadow caching: L2 reads/writes happen but cached data + // is never served. Fresh data is always fetched and compared against cache + // for staleness detection. L1 cache is unaffected. + ShadowMode: false, + + // NegativeCacheTTL is the TTL for caching null entity results (entity not found). + // When > 0, null responses from _entities are cached as sentinels. + // When 0 (default), null entities are not cached. + NegativeCacheTTL: 5 * time.Second, +} +``` + +### Root Field Cache Configuration + +Controls L2 caching for root query fields (e.g., `Query.topProducts`): + +```go +plan.RootFieldCacheConfiguration{ + TypeName: "Query", + FieldName: "topProducts", + CacheName: "default", + TTL: 30 * time.Second, + IncludeSubgraphHeaderPrefix: true, + + // EntityKeyMappings enables cache sharing between root fields and entity fetches. + // When set, the L2 cache key uses entity key format instead of root field format. + // Example: Query.user(id: "123") shares cache with User entity key {"id":"123"}. + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + { + EntityKeyField: "id", // @key field on User + ArgumentPath: []string{"id"}, // Root field argument name + + // ArgumentIsEntityKey marks the argument as a direct entity + // key lookup. When true AND the argument is a list type, + // each list element maps 1:1 to an entity in the response + // (positional correspondence). This enables batch cache key + // construction, empty list optimization, and partial fetch mode. + // See "Batch Entity Key Mode" section below. + ArgumentIsEntityKey: false, + }, + }, + }, + }, + + // PartialBatchLoad enables partial fetch mode for batch arguments + // (ArgumentIsEntityKey + list). When false (default), batch cache is + // all-or-nothing: any miss fetches the full list. When true, only + // missing IDs are fetched; cached entities are served directly. + // Only applies when EntityKeyMappings uses ArgumentIsEntityKey. + PartialBatchLoad: false, + + ShadowMode: false, +} +``` + +### Mutation Field Cache Configuration + +Controls whether entity fetches triggered by a mutation populate L2: + +```go +plan.MutationFieldCacheConfiguration{ + // Mutation field name + FieldName: "addReview", + + // By default, mutations skip L2 reads AND L2 writes. + // Set to true to allow entity fetches during this mutation to write to L2. + EnableEntityL2CachePopulation: true, + + // TTL overrides the entity's default cache TTL for L2 writes triggered by this mutation. + // When zero (default), the entity's default TTL (from EntityCacheConfiguration) is used. + // Useful for @cachePopulate(maxAge: 60) on mutation fields. + TTL: 60 * time.Second, +} +``` + +**Mutation caching behavior**: +- Mutations **always skip L2 reads** (always fetch fresh from subgraph) +- Mutations **skip L2 writes by default** +- With `EnableEntityL2CachePopulation: true`, entity fetches triggered by this mutation **will write to L2** +- With `TTL` set, mutation-triggered L2 writes use this TTL instead of the entity's default + +### Mutation Cache Invalidation Configuration + +Configures automatic L2 cache deletion after a mutation completes: + +```go +plan.MutationCacheInvalidationConfiguration{ + FieldName: "updateUser", + // EntityTypeName can be omitted — it's inferred from the mutation return type. + EntityTypeName: "User", +} +``` + +When the mutation returns an entity with `@key` fields, the corresponding L2 cache entry is deleted. + +### Subscription Entity Population Configuration + +Controls how subscription events update the L2 cache: + +```go +plan.SubscriptionEntityPopulationConfiguration{ + TypeName: "Product", + CacheName: "default", + TTL: 30 * time.Second, + IncludeSubgraphHeaderPrefix: true, + + // When true and the subscription only provides @key fields (no additional + // entity fields), DELETE the L2 cache entry on each event. + // When false (default), populate L2 with entity data from the event. + EnableInvalidationOnKeyOnly: false, +} +``` + +**Two modes**: +- **Populate** (default): subscription provides entity fields beyond `@key` → write to L2 +- **Invalidate** (`EnableInvalidationOnKeyOnly: true`): subscription provides only `@key` → delete from L2 + +## 3. Wire Caches into the Resolver + +Register your `LoaderCache` implementations in the `ResolverOptions`: + +```go +resolver := resolve.New(ctx, resolve.ResolverOptions{ + MaxConcurrency: 32, + + // Register named cache instances (referenced by CacheName in configs) + Caches: map[string]resolve.LoaderCache{ + "default": myRedisCache, + "fast": myInMemoryCache, + }, + + // Required for extension-based cache invalidation + // Maps subgraphName → entityTypeName → invalidation config + EntityCacheConfigs: map[string]map[string]*resolve.EntityCacheInvalidationConfig{ + "accounts": { + "User": { + CacheName: "default", + IncludeSubgraphHeaderPrefix: true, + }, + }, + }, + + // ... other options +}) +``` + +## 4. Enable Caching at Runtime + +Set caching options per-request on the execution context: + +```go +ctx := resolve.NewContext(context.Background()) +ctx.ExecutionOptions.Caching = resolve.CachingOptions{ + // Enable per-request in-memory entity cache + EnableL1Cache: true, + + // Enable external cross-request cache + EnableL2Cache: true, + + // Enable detailed cache analytics collection + EnableCacheAnalytics: true, + + // Optional: transform L2 cache keys (e.g., for tenant isolation) + L2CacheKeyInterceptor: func(ctx context.Context, key string, info resolve.L2CacheKeyInterceptorInfo) string { + if tenantID, ok := ctx.Value("tenant-id").(string); ok { + return tenantID + ":" + key + } + return key + }, +} +``` + +**L2CacheKeyInterceptor** receives: +```go +type L2CacheKeyInterceptorInfo struct { + SubgraphName string // e.g., "accounts" + CacheName string // e.g., "default" +} +``` + +The interceptor is applied **after** subgraph header prefix. It does NOT affect L1 keys. + +## 5. Cache Key Format + +### Entity Keys + +Generated by `EntityQueryCacheKeyTemplate` from `@key` fields: +```json +{"__typename":"User","key":{"id":"123"}} +{"__typename":"Product","key":{"upc":"top-1"}} +{"__typename":"Order","key":{"id":"1","orgId":"acme"}} +``` + +The `__typename` value comes from the response data. +When `__typename` is missing from the response, +the plan-time `TypeName` field on `EntityQueryCacheKeyTemplate` is used as fallback. + +### Root Field Keys + +Generated by `RootQueryCacheKeyTemplate` from field name and arguments: +```json +{"__typename":"Query","field":"topProducts"} +{"__typename":"Query","field":"user","args":{"id":"123"}} +{"__typename":"Query","field":"search","args":{"max":10,"term":"C3PO"}} +``` + +Arguments are sorted alphabetically for stable key generation. + +### Key Transformations (applied in order) + +1. **Global cache key prefix** (when `GlobalCacheKeyPrefix` is set on the request's `CachingOptions`): + ```text + v42:{"__typename":"User","key":{"id":"123"}} + ``` + +2. **Subgraph header hash prefix** (when `IncludeSubgraphHeaderPrefix = true`): + ```text + v42:{headerHash}:{"__typename":"User","key":{"id":"123"}} + ``` + +3. **L2CacheKeyInterceptor** (when set): + ```text + tenant-X:v42:{headerHash}:{"__typename":"User","key":{"id":"123"}} + ``` + +### Entity Field Argument-Aware Keys + +When entity fields have arguments (e.g., `greeting(style: "formal")`), the field argument values are hashed via xxhash and appended as a suffix to the cache key. Different argument values produce different cache entries. + +### EntityKeyMappings (Cache Sharing) + +When `EntityKeyMappings` is configured on a root field, the L2 cache key uses entity key format instead of root field format. This means: +- `Query.user(id: "123")` → cache key `{"__typename":"User","key":{"id":"123"}}` +- A subsequent `_entities` fetch for `User(id: "123")` hits the same cache entry + +**Multiple key mappings:** An entity with multiple `@key` directives can have multiple `EntityKeyMapping` entries. Each mapping independently generates a cache key when all its arguments are available. If a mapping's arguments are missing from the query variables, that mapping is skipped — the remaining mappings still produce keys. + +```go +// Example: Product has @key(fields: "id") and @key(fields: "sku region") +EntityKeyMappings: []plan.EntityKeyMapping{ + {EntityTypeName: "Product", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }}, + {EntityTypeName: "Product", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "sku", ArgumentPath: []string{"sku"}}, + {EntityKeyField: "region", ArgumentPath: []string{"region"}}, + }}, +} +// productByAll(id, sku, region) → 2 cache keys (both mappings resolve) +// productBySku(sku, region) → 1 cache key (only sku+region mapping resolves) +``` + +**Nested keys with structured arguments:** For entities with nested `@key` fields (e.g., `@key(fields: "store { id region }")`), use dot-notation for `EntityKeyField` and multi-element paths for `ArgumentPath`: + +```go +// Nested key with structured input: query productByStore(store: {id: "s1", region: "us"}) +EntityKeyMappings: []plan.EntityKeyMapping{ + {EntityTypeName: "Product", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "store.id", ArgumentPath: []string{"store", "id"}}, + {EntityKeyField: "store.region", ArgumentPath: []string{"store", "region"}}, + }}, +} +// Produces: {"__typename":"Product","key":{"store":{"id":"s1","region":"us"}}} +``` + +**Write-side behavior:** L2 reads use the argument-derived key set. +L2 writes use smart cache key backfill to make precise per-key decisions based on +final entity data: + +- **Existing keys** that hit on read are refreshed only when the data changed + (multi-candidate writeback) or when a subgraph fetch returned fresh data. +- **Requested missing keys** (keys generated from arguments on read but absent in L2) + are backfilled only when the final entity value proves them — the mapped key field + must be present in the entity and render to the exact same key string. + Request arguments alone are not sufficient to prove a cache association on write. +- **Derived keys** beyond the original request are written when the final entity data + contains the mapped key fields for other `EntityKeyMapping` entries. + For example, if a root field is queried with `id` and the response contains `username`, + the `username` key is also written, enabling cross-lookup by `username` on subsequent requests. + +If a root field provides only a subset of arguments (e.g., only `sku` and `region` but +not `id`), the read uses only the matching keys. +The write may add the `id` key if the subgraph response contains `id`. + +**Variable remapping:** `RemapVariables` applies only to single-element argument paths. +Multi-element paths (structured argument navigation like `["store", "id"]`) are not remapped. + +### Batch Entity Key Mode + +When a root field takes a **list argument** that maps 1:1 to entities in the response +(e.g., `products(ids: ["1","2","3"])` returns exactly three products in order), +set `ArgumentIsEntityKey: true` on the corresponding `FieldMapping`. +This enables per-entity cache key construction from each list element, +rather than treating the entire list as a single opaque cache key. + +**Configuration:** +```go +plan.RootFieldCacheConfiguration{ + TypeName: "Query", + FieldName: "products", + CacheName: "default", + TTL: 60 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "Product", + FieldMappings: []plan.FieldMapping{ + { + EntityKeyField: "id", + ArgumentPath: []string{"ids"}, + ArgumentIsEntityKey: true, + }, + }, + }, + }, + // Optional: enable partial fetch (only missing IDs fetched) + PartialBatchLoad: false, +} +``` + +**Behavior:** + +- **Cache key construction**: One cache key per list element. + `products(ids: ["1","2","3"])` produces three keys: + `{"__typename":"Product","key":{"id":"1"}}`, + `{"__typename":"Product","key":{"id":"2"}}`, + `{"__typename":"Product","key":{"id":"3"}}`. + Each key uses the same entity key format as `_entities` fetches, + enabling cache sharing between root fields and entity resolution. + +- **Positional correspondence**: The engine assumes the response array has the same + length and order as the input list argument. + Element `ids[0]` corresponds to response `data.products[0]`, etc. + `CacheKey.BatchIndex` records each key's position for response reassembly. + +- **Empty list short-circuit**: When the list argument is `[]` or `null`, + the engine returns an empty response (`[]`) immediately without calling the + resolver or the cache. + This avoids unnecessary work for trivially empty queries. + +- **Full fetch mode** (`PartialBatchLoad: false`, default): Any cache miss in the batch + causes the full list to be sent to the subgraph. + All returned entities are cached. + +- **Partial fetch mode** (`PartialBatchLoad: true`): Only missing IDs are sent to the + subgraph. + The input list variable is filtered to exclude IDs that were cache hits. + Cached entities are served directly and merged with fresh results in the correct + positional order. + +**Cache sharing with scalar root fields:** +Batch entity keys use the same format as scalar `EntityKeyMappings`. +A scalar root field `product(id: "1")` and a batch root field `products(ids: ["1","2"])` +both produce `{"__typename":"Product","key":{"id":"1"}}` for ID `"1"`, +so they share the same L2 cache entry. + +### TypeName Fallback + +Entity cache keys normally use `__typename` from the response data. +When `__typename` is missing from the response, +the plan-time `TypeName` on `EntityQueryCacheKeyTemplate` is used as fallback +instead of a hardcoded default. +This ensures cache keys always reflect the correct entity type. + +### CacheKeyTemplate Interface + +The `CacheKeyTemplate` interface (used by both `EntityQueryCacheKeyTemplate` and +`RootQueryCacheKeyTemplate`) exposes the following methods: + +```go +type CacheKeyTemplate interface { + RenderCacheKeys(ctx *Context, fetch *SingleFetch, keys *[]CacheKey) error + IsEntityFetch() bool + BatchEntityKeyArgumentPath() []string + EntityMergePath(postProcessing PostProcessingConfiguration) []string +} +``` + +- `IsEntityFetch()` — reports whether rendered keys describe entity fetch inputs. +- `BatchEntityKeyArgumentPath()` — returns the argument path for batch entity lookups. + Returns nil when the template does not support batch entity key construction. +- `EntityMergePath()` — returns the entity-level merge path for root-field entity mappings. + Returns nil when the template stores complete response payloads. + +**Constructor**: Use `NewRootQueryCacheKeyTemplate(rootFields, entityKeyMappings)` to create +`RootQueryCacheKeyTemplate` instances. +The constructor precomputes batch entity key metadata via `precomputeDerivedFields()`. + +## 6. Cache Behavior by Operation Type + +### Queries + +```text +L1 check (main thread, entity fetches only) + ↓ miss +L2 check (goroutine, entity + root fetches) + ↓ miss +Subgraph fetch (goroutine) + ↓ response +Populate L1 + L2 (main thread for L1, goroutine for L2) +``` + +L1 is checked first on the main thread. If it's a complete hit, the goroutine is not spawned (saves overhead). L2 and fetch happen in parallel goroutines. + +### Mutations + +- **Always skip L2 reads** — fetch fresh data from subgraph +- **Skip L2 writes by default** — unless `EnableEntityL2CachePopulation: true` on the mutation field +- **Optional invalidation** — with `MutationCacheInvalidationConfiguration`, delete L2 entry after mutation +- **Mutation impact detection** — when analytics enabled, compare mutation response against cached value + +### Subscriptions + +Based on `SubscriptionEntityPopulationConfiguration`: +- **Populate mode** (default): on each subscription event, write entity data to L2 +- **Invalidate mode** (`EnableInvalidationOnKeyOnly: true`): on each event with only `@key` fields, delete L2 entry + +## 7. Cache Invalidation + +### Mutation-Triggered Invalidation + +Configure via `MutationCacheInvalidationConfiguration`. After a mutation completes and returns an entity, the L2 cache entry for that entity is deleted. + +### Subgraph Response Extension Invalidation + +Subgraphs can signal cache invalidation through GraphQL response extensions: + +```json +{ + "data": { "updateUser": { "id": "1", "name": "Updated" } }, + "extensions": { + "cacheInvalidation": { + "keys": [ + { "typename": "User", "key": { "id": "1" } }, + { "typename": "User", "key": { "id": "2" } } + ] + } + } +} +``` + +The engine automatically: +1. Parses `extensions.cacheInvalidation.keys` from each subgraph response +2. Builds L2 cache keys matching entity type and key fields +3. Applies the full L2 key-transformation pipeline in order: `GlobalCacheKeyPrefix` → subgraph header prefix → `L2CacheKeyInterceptor` (same ordering as cache writes) +4. Calls `LoaderCache.Delete()` for each key +5. **Optimization**: skips delete if the same key is being written in the same fetch (no unnecessary round-trip) + +**Requirements for extension-based invalidation**: +- `EntityCacheConfigs` must be set on `ResolverOptions` (maps subgraph name → entity type → cache config) +- `EnableL2Cache` must be true on the request context + +### Subscription-Based Invalidation + +With `EnableInvalidationOnKeyOnly: true`, subscription events that only contain `@key` fields trigger L2 deletion. + +### Manual Invalidation + +Call `LoaderCache.Delete()` directly with cache keys. The key format is: +```text +[optional-global-prefix:][optional-interceptor-prefix:][optional-header-hash:]{"__typename":"TypeName","key":{...}} +``` + +If `GlobalCacheKeyPrefix` is configured on the router, reads and writes both prepend it +to every key. Manual invalidation callers must include the same global prefix, otherwise +`Delete()` will target a different key than the live reads/writes use and the entry will +remain in the cache. + +## 8. Partial Cache Loading + +Controls what happens when some entities in a batch are cached and others are not. + +**Default (`EnablePartialCacheLoad: false`)**: +Any cache miss in a batch → refetch ALL entities from the subgraph. This keeps the cache maximally fresh because every entity gets a fresh value on each batch miss. + +**Enabled (`EnablePartialCacheLoad: true`)**: +Only missing entities are fetched from the subgraph. Cached entities are served directly within their TTL window. This reduces subgraph load but cached entities may be slightly stale (within TTL). + +Choose based on your freshness vs. performance tradeoff. + +## 9. Shadow Mode + +Shadow mode lets you test caching in production without serving cached data to clients. + +**Behavior**: +- L2 cache reads and writes happen normally +- Cached data is **never served** — fresh data is always fetched from the subgraph +- Fresh and cached data are compared for staleness detection +- L1 cache works normally (not affected by shadow mode) + +**Configuration**: Set `ShadowMode: true` on `EntityCacheConfiguration` or `RootFieldCacheConfiguration`. + +**Staleness results** are available in `CacheAnalyticsSnapshot.ShadowComparisons`: +```go +type ShadowComparisonEvent struct { + CacheKey string // Cache key for correlation + EntityType string // Entity type name + IsFresh bool // true if cached data matches fresh data + CachedHash uint64 // xxhash of cached ProvidesData fields + FreshHash uint64 // xxhash of fresh ProvidesData fields + CachedBytes int // Size of cached ProvidesData + FreshBytes int // Size of fresh ProvidesData + DataSource string // Subgraph name + CacheAgeMs int64 // Age of cached entry (ms, 0 = unknown) + ConfiguredTTL time.Duration // TTL configured for this entity +} +``` + +## 10. Cache Analytics + +Enable via `EnableCacheAnalytics: true` in `CachingOptions`. After execution, collect stats: + +```go +snapshot := ctx.GetCacheStats() +``` + +### CacheAnalyticsSnapshot + +```go +type CacheAnalyticsSnapshot struct { + L1Reads []CacheKeyEvent // L1 read events (hit/miss) + L2Reads []CacheKeyEvent // L2 read events (hit/miss/partial-hit) + L1Writes []CacheWriteEvent // L1 write events + L2Writes []CacheWriteEvent // L2 write events + FetchTimings []FetchTimingEvent // Per-fetch timing with HTTP status + ErrorEvents []SubgraphErrorEvent // Subgraph errors + FieldHashes []EntityFieldHash // Field value hashes for staleness + EntityTypes []EntityTypeInfo // Entity counts by type + ShadowComparisons []ShadowComparisonEvent // Shadow mode results + MutationEvents []MutationEvent // Mutation impact on cache +} +``` + +### Convenience Methods + +```go +snapshot.L1HitRate() // float64 [0, 1] +snapshot.L2HitRate() // float64 [0, 1] +snapshot.L1HitCount() // int64 +snapshot.L2HitCount() // int64 +snapshot.CachedBytesServed() // int64 +snapshot.EventsByEntityType() // map[string]EntityTypeCacheStats +``` + +### Key Event Types + +**CacheKeyEvent** — per-key cache lookup: +```go +type CacheKeyEvent struct { + CacheKey string // Cache key + EntityType string // Entity type name + Kind CacheKeyEventKind // CacheKeyHit, CacheKeyMiss, CacheKeyPartialHit + DataSource string // Subgraph name + ByteSize int // Cached entry size + CacheAgeMs int64 // Age in ms (L2 only, 0 = unknown) + Shadow bool // Shadow mode event +} +``` + +**CacheWriteEvent** — per-key cache write: +```go +type CacheWriteEvent struct { + CacheKey string // Cache key + EntityType string // Entity type name + ByteSize int // Written entry size + DataSource string // Subgraph name + CacheLevel CacheLevel // CacheLevelL1 or CacheLevelL2 + TTL time.Duration // TTL used for this write + Shadow bool // Shadow mode event + Source CacheOperationSource // "query", "mutation", or "subscription" + WriteReason CacheWriteReason // "refresh", "backfill", "derived", or "" (see below) +} +``` + +`WriteReason` is set for root field `EntityKeyMappings` L2 writes: +- `"refresh"` — existing cached key rewritten with fresh or merged data +- `"backfill"` — missing requested key proven by final entity data +- `"derived"` — new key derived from entity data not in the original request + +Empty for entity fetches and non-EntityKeyMappings root field writes. + +**FetchTimingEvent** — per-fetch timing: +```go +type FetchTimingEvent struct { + DataSource string // Subgraph name + EntityType string // Entity type (empty for root fields) + DurationMs int64 // Fetch/lookup duration + Source FieldSource // FieldSourceSubgraph, FieldSourceL1, FieldSourceL2 + ItemCount int // Number of entities + IsEntityFetch bool // true for _entities queries + HTTPStatusCode int // HTTP status (0 for cache hits) + ResponseBytes int // Response body size (0 for cache hits) + TTFBMs int64 // Time to first byte +} +``` + +**MutationEvent** — mutation impact on cached entities: +```go +type MutationEvent struct { + MutationRootField string // e.g., "updateUser" + EntityType string // e.g., "User" + EntityCacheKey string // Display key JSON + HadCachedValue bool // true if L2 had an entry + IsStale bool // true if cached differs from mutation response + CachedHash uint64 // Hash of cached ProvidesData + FreshHash uint64 // Hash of mutation response ProvidesData + CachedBytes int // 0 when HadCachedValue=false + FreshBytes int +} +``` + +### Integration Pattern + +```go +// After each request: +snapshot := ctx.GetCacheStats() + +// Export to observability +metrics.RecordL1HitRate(snapshot.L1HitRate()) +metrics.RecordL2HitRate(snapshot.L2HitRate()) +metrics.RecordCachedBytesServed(snapshot.CachedBytesServed()) + +for _, timing := range snapshot.FetchTimings { + metrics.RecordFetchDuration(timing.DataSource, timing.DurationMs, timing.Source) +} + +for _, shadow := range snapshot.ShadowComparisons { + if !shadow.IsFresh { + log.Warn("stale cache entry", "entity", shadow.EntityType, "key", shadow.CacheKey, "age_ms", shadow.CacheAgeMs) + } +} + +for _, mutation := range snapshot.MutationEvents { + if mutation.IsStale { + log.Info("mutation updated stale cache", "field", mutation.MutationRootField, "entity", mutation.EntityType) + } +} +``` + +## 11. Cache Trace in Response Extensions + +When the trace feature is enabled (`TraceOptions.Enable = true` with `IncludeTraceOutputInResponseExtensions = true`), each fetch in the response's `extensions.trace` includes a `cache_trace` object with per-fetch caching details. This provides real-time visibility into cache behavior for each subgraph call. + +### Enabling Cache Trace + +Cache trace is included automatically when tracing is enabled. To exclude it (e.g., to reduce response size), set `ExcludeCacheStats: true`: + +```go +opts := engine.WithRequestTraceOptions(resolve.TraceOptions{ + Enable: true, + IncludeTraceOutputInResponseExtensions: true, + ExcludeCacheStats: false, // default: included +}) +``` + +**Zero overhead**: When `Enable` is false or `ExcludeCacheStats` is true, no cache trace data is collected — no timing calls, no allocations, no counting. + +### CacheTrace Structure + +Each fetch node in `extensions.trace.fetches` includes: + +```go +type CacheTrace struct { + L1Enabled bool `json:"l1_enabled"` // L1 enabled for this fetch (runtime state) + L2Enabled bool `json:"l2_enabled"` // L2 enabled for this fetch (runtime state) + CacheName string `json:"cache_name"` // Named cache instance + TTLSeconds int64 `json:"ttl_seconds"` // Configured TTL + + L1Hit int `json:"l1_hit"` // L1 cache hits + L1Miss int `json:"l1_miss"` // L1 cache misses + L2Hit int `json:"l2_hit"` // L2 cache hits + L2Miss int `json:"l2_miss"` // L2 cache misses + + NegativeCacheHits int `json:"negative_cache_hits,omitempty"` // Null entities from cache + + // L2 operation timing + L2GetDurationNano int64 `json:"l2_get_duration_nanoseconds,omitempty"` + L2SetDurationNano int64 `json:"l2_set_duration_nanoseconds,omitempty"` + L2SetNegativeDurationNano int64 `json:"l2_set_negative_duration_nanoseconds,omitempty"` + + // Configuration flags + PartialCacheLoad bool `json:"partial_cache_load,omitempty"` + ShadowMode bool `json:"shadow_mode,omitempty"` + ShadowHit bool `json:"shadow_hit,omitempty"` + IncludeSubgraphHeaderPrefix bool `json:"include_subgraph_header_prefix,omitempty"` + + // Per-entity details (entity/batch fetches only) + Entities []CacheTraceEntity `json:"entities,omitempty"` + + // Cache keys used (when ExcludeRawInputData is false) + Keys []string `json:"keys,omitempty"` + + // Errors from cache operations + L2GetError string `json:"l2_get_error,omitempty"` + L2SetError string `json:"l2_set_error,omitempty"` +} +``` + +### Example Response + +```json +{ + "data": { "topProducts": [...] }, + "extensions": { + "trace": { + "fetches": { + "kind": "Sequence", + "children": [{ + "kind": "Single", + "fetch": { + "kind": "Single", + "source_name": "accounts", + "trace": { + "duration_load_nanoseconds": 5000000, + "cache_trace": { + "l1_enabled": true, + "l2_enabled": true, + "cache_name": "default", + "ttl_seconds": 60, + "l1_hit": 0, + "l1_miss": 1, + "l2_hit": 1, + "l2_miss": 0, + "l2_get_duration_nanoseconds": 250000, + "keys": ["{\"__typename\":\"User\",\"key\":{\"id\":\"1\"}}"] + } + } + } + }] + } + } + } +} +``` + +## 12. Complete Integration Example + +```go +package main + +import ( + "context" + "time" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +func setupCaching() { + // 1. Define subgraph caching configurations + cachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + { + TypeName: "User", + CacheName: "default", + TTL: 5 * time.Minute, + IncludeSubgraphHeaderPrefix: true, + }, + }, + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "me", + CacheName: "default", + TTL: 1 * time.Minute, + IncludeSubgraphHeaderPrefix: true, + }, + }, + MutationFieldCaching: plan.MutationFieldCacheConfigurations{ + { + FieldName: "updateUser", + EnableEntityL2CachePopulation: true, + }, + }, + MutationCacheInvalidation: plan.MutationCacheInvalidationConfigurations{ + { + FieldName: "deleteUser", + EntityTypeName: "User", + }, + }, + }, + { + SubgraphName: "products", + EntityCaching: plan.EntityCacheConfigurations{ + { + TypeName: "Product", + CacheName: "default", + TTL: 10 * time.Minute, + }, + }, + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "topProducts", + CacheName: "default", + TTL: 30 * time.Second, + }, + }, + SubscriptionEntityPopulation: plan.SubscriptionEntityPopulationConfigurations{ + { + TypeName: "Product", + CacheName: "default", + TTL: 10 * time.Minute, + EnableInvalidationOnKeyOnly: true, + }, + }, + }, + } + + // 2. Create engine configuration + factory := engine.NewFederationEngineConfigFactory( + context.Background(), + subgraphConfigs, // []engine.SubgraphConfiguration + engine.WithSubgraphEntityCachingConfigs(cachingConfigs), + ) + config, _ := factory.BuildEngineConfiguration() + + // 3. Create resolver with cache instances + resolver := resolve.New(context.Background(), resolve.ResolverOptions{ + MaxConcurrency: 64, + Caches: map[string]resolve.LoaderCache{ + "default": NewRedisCache("redis://localhost:6379"), + }, + EntityCacheConfigs: map[string]map[string]*resolve.EntityCacheInvalidationConfig{ + "accounts": { + "User": {CacheName: "default", IncludeSubgraphHeaderPrefix: true}, + }, + "products": { + "Product": {CacheName: "default"}, + }, + }, + }) + + // 4. Per-request: enable caching + execCtx := resolve.NewContext(context.Background()) + execCtx.ExecutionOptions.Caching = resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: true, + EnableCacheAnalytics: true, + L2CacheKeyInterceptor: func(ctx context.Context, key string, info resolve.L2CacheKeyInterceptorInfo) string { + // Optional: add tenant isolation + if tenantID, ok := ctx.Value("tenant-id").(string); ok { + return tenantID + ":" + key + } + return key + }, + } + + // 5. Resolve (uses config from step 2) + resolveInfo, _ := resolver.ResolveGraphQLResponse(execCtx, response, initialData, writer) + + // 6. Collect cache analytics + snapshot := execCtx.GetCacheStats() + _ = snapshot.L1HitRate() + _ = snapshot.L2HitRate() + _ = snapshot.CachedBytesServed() + _ = config + _ = resolveInfo +} +``` + +## 13. Configuration Reference Summary + +| Configuration | Package | Purpose | +|--------------|---------|---------| +| `SubgraphCachingConfig` | `execution/engine` | Top-level per-subgraph config container | +| `EntityCacheConfiguration` | `v2/pkg/engine/plan` | L2 entity caching (TypeName, TTL, etc.) | +| `RootFieldCacheConfiguration` | `v2/pkg/engine/plan` | L2 root field caching (FieldName, EntityKeyMappings, PartialBatchLoad) | +| `FieldMapping.ArgumentIsEntityKey` | `v2/pkg/engine/plan` | Marks argument as direct entity key for batch cache key construction | +| `CacheKeyTemplate` | `v2/pkg/engine/resolve` | Interface for cache key rendering (entity + root field templates) | +| `NewRootQueryCacheKeyTemplate` | `v2/pkg/engine/resolve` | Constructor for root field cache key templates (precomputes batch metadata) | +| `MutationFieldCacheConfiguration` | `v2/pkg/engine/plan` | Mutation L2 write control | +| `MutationCacheInvalidationConfiguration` | `v2/pkg/engine/plan` | Mutation-triggered L2 deletion | +| `SubscriptionEntityPopulationConfiguration` | `v2/pkg/engine/plan` | Subscription L2 populate/invalidate | +| `CachingOptions` | `v2/pkg/engine/resolve` | Per-request L1/L2/analytics enable | +| `L2CacheKeyInterceptor` | `v2/pkg/engine/resolve` | Custom key transform (tenant isolation) | +| `LoaderCache` | `v2/pkg/engine/resolve` | Cache backend interface | +| `EntityCacheInvalidationConfig` | `v2/pkg/engine/resolve` | Extension-based invalidation lookup | +| `ResolverOptions.Caches` | `v2/pkg/engine/resolve` | Named cache instance registry | +| `TraceOptions.ExcludeCacheStats` | `v2/pkg/engine/resolve` | Exclude cache trace from response extensions | diff --git a/examples/federation/go.mod b/examples/federation/go.mod index 882ee7fd46..24fa11d039 100644 --- a/examples/federation/go.mod +++ b/examples/federation/go.mod @@ -49,10 +49,10 @@ require ( github.com/tidwall/pretty v1.2.1 // indirect github.com/tidwall/sjson v1.2.5 // indirect github.com/urfave/cli/v2 v2.27.7 // indirect - github.com/wundergraph/astjson v1.0.0 // indirect + github.com/wundergraph/astjson v1.1.1-0.20260419105127-f600d161463f // indirect github.com/wundergraph/cosmo/composition-go v0.0.0-20241020204711-78f240a77c99 // indirect github.com/wundergraph/cosmo/router v0.0.0-20251013094319-c611abf26b17 // indirect - github.com/wundergraph/go-arena v1.1.0 // indirect + github.com/wundergraph/go-arena v1.2.0 // indirect github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/mod v0.29.0 // indirect diff --git a/examples/federation/go.sum b/examples/federation/go.sum index 4bcbb63885..a09480ca6d 100644 --- a/examples/federation/go.sum +++ b/examples/federation/go.sum @@ -59,8 +59,7 @@ github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/pprof v0.0.0-20230207041349-798e818bf904 h1:4/hN5RUoecvl+RmJRE2YxKWtnnQls6rQjjW5oV7qg2U= github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= @@ -154,15 +153,16 @@ github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU= github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4= github.com/vektah/gqlparser/v2 v2.5.30 h1:EqLwGAFLIzt1wpx1IPpY67DwUujF1OfzgEyDsLrN6kE= github.com/vektah/gqlparser/v2 v2.5.30/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo= -github.com/wundergraph/astjson v0.0.0-20250106123708-be463c97e083 h1:8/D7f8gKxTBjW+SZK4mhxTTBVpxcqeBgWF1Rfmltbfk= -github.com/wundergraph/astjson v0.0.0-20250106123708-be463c97e083/go.mod h1:eOTL6acwctsN4F3b7YE+eE2t8zcJ/doLm9sZzsxxxrE= -github.com/wundergraph/astjson v1.0.0/go.mod h1:h12D/dxxnedtLzsKyBLK7/Oe4TAoGpRVC9nDpDrZSWw= +github.com/wundergraph/astjson v1.1.1-0.20260418181506-345133162d36 h1:xf9ZfqdSRYgqf2l2TYFGHXIzagWvFRefvbJW3StWSiM= +github.com/wundergraph/astjson v1.1.1-0.20260418181506-345133162d36/go.mod h1:uHSJv7uowLN/nIPvkTFqUDt1sXk4qQU0KNwHfwfDcQE= +github.com/wundergraph/astjson v1.1.1-0.20260419105127-f600d161463f h1:MoVoeMlgY9Ej1aoF3Y/kniBZ8pv+WfIA3YSCnPBh+6M= +github.com/wundergraph/astjson v1.1.1-0.20260419105127-f600d161463f/go.mod h1:uHSJv7uowLN/nIPvkTFqUDt1sXk4qQU0KNwHfwfDcQE= github.com/wundergraph/cosmo/composition-go v0.0.0-20241020204711-78f240a77c99 h1:TGXDYfDhwFLFTuNuCwkuqXT5aXGz47zcurXLfTBS9w4= github.com/wundergraph/cosmo/composition-go v0.0.0-20241020204711-78f240a77c99/go.mod h1:fUuOAUAXUFB/mlSkAaImGeE4A841AKR5dTMWhV4ibxI= github.com/wundergraph/cosmo/router v0.0.0-20251013094319-c611abf26b17 h1:GjO2E8LTf3U5JiQJCY4MmlRcAjVt7IvAbWFSgEjQdl8= github.com/wundergraph/cosmo/router v0.0.0-20251013094319-c611abf26b17/go.mod h1:7kt64e0LOLMBqOzrfu9PuLRn9cVT9YN1Bb3EennVtws= -github.com/wundergraph/go-arena v1.1.0 h1:9+wSRkJAkA2vbYHp6s8tEGhPViRGQNGXqPHT0QzhdIc= -github.com/wundergraph/go-arena v1.1.0/go.mod h1:ROOysEHWJjLQ8FSfNxZCziagb7Qw2nXY3/vgKRh7eWw= +github.com/wundergraph/go-arena v1.2.0 h1:6MlhEy0NBY3Z+BuK3rj0F9YoT3bM0SlahGkzK0lKRZ4= +github.com/wundergraph/go-arena v1.2.0/go.mod h1:ROOysEHWJjLQ8FSfNxZCziagb7Qw2nXY3/vgKRh7eWw= github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 h1:FnBeRrxr7OU4VvAzt5X7s6266i6cSVkkFPS0TuXWbIg= github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= diff --git a/execution/engine/CLAUDE.md b/execution/engine/CLAUDE.md new file mode 100644 index 0000000000..2ebfd000d1 --- /dev/null +++ b/execution/engine/CLAUDE.md @@ -0,0 +1,126 @@ +# E2E Test Conventions for `execution/engine` + +## Inline everything + +No `const` blocks, no named variables for expected values. Put all literal values (cache keys, hashes, byte sizes, query strings, expected responses) directly inline in assertions and setup code. Duplicate values across subtests rather than sharing — each subtest must be fully self-contained and readable without scrolling up. + +```go +// CORRECT: literals inline in assertions +assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: `{"__typename":"Product","key":{"upc":"top-1"}}`, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: "reviews"}, + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: `11945571715631340836:{"__typename":"Product","key":{"upc":"top-1"}}`, EntityType: "Product", ByteSize: 177, DataSource: "reviews", CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second}, + }, +}), snap) + +// WRONG: named constants defined above the test logic +const ( + keyProductTop1 = `{"__typename":"Product","key":{"upc":"top-1"}}` + byteSizeProductTop1 = 177 +) +``` + +## Inline setup too + +Config structs (e.g. `SubgraphCachingConfigs`) should be defined inline in the setup call, not as named variables. Only keep variables for state that is mutated or referenced multiple times at runtime (e.g. `tracker`, `mockHeaders`, `setup`). + +```go +// CORRECT: config inline +setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": NewFakeLoaderCache()}), + withHTTPClient(&http.Client{Transport: tracker}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + {SubgraphName: "products", RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }}, + }), +)) + +// WRONG: named variable for config used only once +configs := engine.SubgraphCachingConfigs{...} +setup := federationtesting.NewFederationSetup(addCachingGateway( + withSubgraphEntityCachingConfigs(configs), +)) +``` + +## Self-contained subtests + +Each `t.Run` subtest must be independently readable. No shared constants, variables, or helpers defined in the parent test function. Duplication across subtests is preferred over sharing. + +## Inline queries + +Use `QueryStringWithHeaders` with inline GraphQL query strings. Do not load queries from files. + +```go +// CORRECT +resp, headers := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { topProducts { name reviews { body } } }`, nil, t) + +// WRONG +resp := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, + cachingTestQueryPath("queries/my_query.query"), nil, t) +``` + +## Full snapshot assertions + +Assert complete `CacheAnalyticsSnapshot` structs — not just the fields you care about. This catches unexpected events. + +## Snapshot comments + +Every event line in a snapshot assertion MUST have a brief comment explaining **why** that event occurred. + +```go +// CORRECT: explains causation +{CacheKey: `...`, Kind: resolve.CacheKeyMiss, Shadow: true}, // Shadow L2 miss: cache empty +{CacheKey: `...`, Kind: resolve.CacheKeyMiss, Shadow: false}, // L2 miss: shadow mode not implemented for root fields + +// WRONG: restates the field value +{CacheKey: `...`, Kind: resolve.CacheKeyMiss}, // this is a miss +``` + +## Subscription cleanup via t.Cleanup + +Always register subscription close functions with `t.Cleanup` immediately after creation. `t.Fatal`/`require` calls `runtime.Goexit()`, skipping any explicit close calls later in the test. `t.Cleanup` is guaranteed to run regardless of how the test exits. + +```go +// CORRECT: cleanup registered immediately, runs even on t.Fatal +messages1, close1 := gqlClient.Subscription(ctx, wsAddr, queryPath, vars, t) +t.Cleanup(close1) +messages2, close2 := gqlClient.Subscription(ctx, wsAddr, queryPath, vars, t) +t.Cleanup(close2) + +// Explicit close before assertions is still fine (double-close is safe) +close1() +close2() + +// WRONG: close only called explicitly — skipped if t.Fatal fires above +messages1, close1 := gqlClient.Subscription(ctx, wsAddr, queryPath, vars, t) +messages2, close2 := gqlClient.Subscription(ctx, wsAddr, queryPath, vars, t) +// ... t.Fatal("timeout") could fire here ... +close1() +close2() +``` + +## Always check every cache log + +Every `defaultCache.ClearLog()` MUST be followed by `defaultCache.GetLog()` with full assertions BEFORE the next `ClearLog()` or end of test. Never clear a log without verifying its contents — skipped checks hide regressions. + +## http.Header is a reference type + +When returning `http.Header` from mocks, always `.Clone()` before returning. The HTTP client mutates the header map in-place (adds `Accept`, `Content-Type`, `Accept-Encoding`), which corrupts the mock's stored state and causes different hashes on subsequent calls. + +```go +// CORRECT: clone before returning +func (m *mock) HeadersForSubgraph(name string) (http.Header, uint64) { + h := m.headers[name] + return h.Clone(), hashHeaders(h) +} + +// WRONG: returns the same map reference — will be mutated by HTTP client +func (m *mock) HeadersForSubgraph(name string) (http.Header, uint64) { + h := m.headers[name] + return h, hashHeaders(h) +} +``` \ No newline at end of file diff --git a/execution/engine/config_factory_federation.go b/execution/engine/config_factory_federation.go index fca8b342b1..9f5a39e2ee 100644 --- a/execution/engine/config_factory_federation.go +++ b/execution/engine/config_factory_federation.go @@ -29,6 +29,30 @@ type SubgraphConfiguration struct { SubscriptionProtocol SubscriptionProtocol } +// SubgraphCachingConfig defines L2 caching configuration for a specific subgraph. +// This allows fine-grained control over which entities and root fields are cached per subgraph. +type SubgraphCachingConfig struct { + SubgraphName string // Name of the subgraph (must match SubgraphConfiguration.Name) + EntityCaching plan.EntityCacheConfigurations // Caching config for entity types in this subgraph + RootFieldCaching plan.RootFieldCacheConfigurations // Caching config for root fields in this subgraph + MutationFieldCaching plan.MutationFieldCacheConfigurations // Caching config for mutation field behavior in this subgraph + SubscriptionEntityPopulation plan.SubscriptionEntityPopulationConfigurations // Caching config for subscription entity population/invalidation + MutationCacheInvalidation plan.MutationCacheInvalidationConfigurations // Caching config for mutation-triggered cache invalidation +} + +// SubgraphCachingConfigs is a list of per-subgraph caching configurations. +type SubgraphCachingConfigs []SubgraphCachingConfig + +// FindBySubgraphName returns the caching config for the given subgraph name, or nil if not found. +func (c SubgraphCachingConfigs) FindBySubgraphName(name string) *SubgraphCachingConfig { + for i := range c { + if c[i].SubgraphName == name { + return &c[i] + } + } + return nil +} + type SubscriptionProtocol string const ( @@ -43,6 +67,7 @@ type federationEngineConfigFactoryOptions struct { subscriptionClientFactory graphql_datasource.GraphQLSubscriptionClientFactory subscriptionType SubscriptionType customResolveMap map[string]resolve.CustomResolve + subgraphCachingConfigs SubgraphCachingConfigs grpcClient grpc.ClientConnInterface } @@ -79,6 +104,32 @@ func WithFederationSubscriptionType(subscriptionType SubscriptionType) Federatio } } +// WithSubgraphEntityCachingConfigs enables entity caching for specific subgraphs and entity types. +// Each SubgraphEntityCachingConfig specifies which entities to cache for a particular subgraph. +// This allows fine-grained control over caching behavior per subgraph and entity type. +// +// Example: +// +// WithSubgraphEntityCachingConfigs(SubgraphEntityCachingConfigs{ +// { +// SubgraphName: "products", +// EntityCaching: plan.EntityCacheConfigurations{ +// {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, +// }, +// }, +// { +// SubgraphName: "accounts", +// EntityCaching: plan.EntityCacheConfigurations{ +// {TypeName: "User", CacheName: "default", TTL: 60 * time.Second}, +// }, +// }, +// }) +func WithSubgraphEntityCachingConfigs(configs SubgraphCachingConfigs) FederationEngineConfigFactoryOption { + return func(options *federationEngineConfigFactoryOptions) { + options.subgraphCachingConfigs = configs + } +} + func NewFederationEngineConfigFactory(engineCtx context.Context, subgraphsConfigs []SubgraphConfiguration, opts ...FederationEngineConfigFactoryOption) *FederationEngineConfigFactory { options := federationEngineConfigFactoryOptions{ httpClient: &http.Client{ @@ -88,7 +139,6 @@ func NewFederationEngineConfigFactory(engineCtx context.Context, subgraphsConfig TLSHandshakeTimeout: 0 * time.Second, }, }, - // TODO grpcClient: nil, streamingClient: &http.Client{ Timeout: 0, @@ -109,6 +159,7 @@ func NewFederationEngineConfigFactory(engineCtx context.Context, subgraphsConfig subscriptionClientFactory: options.subscriptionClientFactory, subscriptionType: options.subscriptionType, customResolveMap: options.customResolveMap, + subgraphCachingConfigs: options.subgraphCachingConfigs, subgraphsConfigs: subgraphsConfigs, } } @@ -122,6 +173,7 @@ type FederationEngineConfigFactory struct { subscriptionClientFactory graphql_datasource.GraphQLSubscriptionClientFactory subscriptionType SubscriptionType customResolveMap map[string]resolve.CustomResolve + subgraphCachingConfigs SubgraphCachingConfigs subgraphsConfigs []SubgraphConfiguration grpcClient grpc.ClientConnInterface @@ -235,12 +287,20 @@ func (f *FederationEngineConfigFactory) createPlannerConfiguration(routerConfig }) } + // Create a mapping from datasource ID to subgraph name + // The composition library generates datasources in the same order as subgraphs are passed + dsIDToSubgraphName := make(map[string]string) + for i, subgraphConfig := range f.subgraphsConfigs { + dsIDToSubgraphName[fmt.Sprintf("%d", i)] = subgraphConfig.Name + } + for _, ds := range engineConfig.DatasourceConfigurations { if ds.Kind != nodev1.DataSourceKind_GRAPHQL { return nil, fmt.Errorf("invalid datasource kind %q", ds.Kind) } - dataSource, err := f.subgraphDataSourceConfiguration(engineConfig, ds) + subgraphName := dsIDToSubgraphName[ds.Id] + dataSource, err := f.subgraphDataSourceConfiguration(engineConfig, ds, subgraphName) if err != nil { return nil, fmt.Errorf("failed to create data source configuration for data source %s: %w", ds.Id, err) } @@ -251,7 +311,7 @@ func (f *FederationEngineConfigFactory) createPlannerConfiguration(routerConfig return &outConfig, nil } -func (f *FederationEngineConfigFactory) subgraphDataSourceConfiguration(engineConfig *nodev1.EngineConfiguration, in *nodev1.DataSourceConfiguration) (plan.DataSource, error) { +func (f *FederationEngineConfigFactory) subgraphDataSourceConfiguration(engineConfig *nodev1.EngineConfiguration, in *nodev1.DataSourceConfiguration, subgraphName string) (plan.DataSource, error) { var out plan.DataSource factory, err := f.graphqlDataSourceFactory() @@ -343,10 +403,11 @@ func (f *FederationEngineConfigFactory) subgraphDataSourceConfiguration(engineCo return nil, fmt.Errorf("error creating custom configuration for data source %s: %w", in.Id, err) } - out, err = plan.NewDataSourceConfiguration[graphql_datasource.Configuration]( + out, err = plan.NewDataSourceConfigurationWithName[graphql_datasource.Configuration]( in.Id, + subgraphName, factory, - f.dataSourceMetaData(in), + f.dataSourceMetaData(in, subgraphName), customConfiguration, ) if err != nil { @@ -356,7 +417,7 @@ func (f *FederationEngineConfigFactory) subgraphDataSourceConfiguration(engineCo return out, nil } -func (f *FederationEngineConfigFactory) dataSourceMetaData(in *nodev1.DataSourceConfiguration) *plan.DataSourceMetadata { +func (f *FederationEngineConfigFactory) dataSourceMetaData(in *nodev1.DataSourceConfiguration, subgraphName string) *plan.DataSourceMetadata { var d plan.DirectiveConfigurations = make([]plan.DirectiveConfiguration, 0, len(in.Directives)) out := &plan.DataSourceMetadata{ @@ -423,6 +484,17 @@ func (f *FederationEngineConfigFactory) dataSourceMetaData(in *nodev1.DataSource }) } + // Add caching configuration for this specific subgraph + // Look up the caching config by subgraph name for explicit per-subgraph configuration + subgraphCachingConfig := f.subgraphCachingConfigs.FindBySubgraphName(subgraphName) + if subgraphCachingConfig != nil { + out.FederationMetaData.EntityCaching = subgraphCachingConfig.EntityCaching + out.FederationMetaData.RootFieldCaching = subgraphCachingConfig.RootFieldCaching + out.FederationMetaData.MutationFieldCaching = subgraphCachingConfig.MutationFieldCaching + out.FederationMetaData.SubscriptionEntityPopulation = subgraphCachingConfig.SubscriptionEntityPopulation + out.FederationMetaData.MutationCacheInvalidation = subgraphCachingConfig.MutationCacheInvalidation + } + return out } diff --git a/execution/engine/config_factory_federation_test.go b/execution/engine/config_factory_federation_test.go index e445fafdcb..0e8834fcc5 100644 --- a/execution/engine/config_factory_federation_test.go +++ b/execution/engine/config_factory_federation_test.go @@ -16,9 +16,25 @@ import ( "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" ) +func mustGraphqlDataSourceConfigurationWithName(t *testing.T, id string, name string, factory plan.PlannerFactory[graphqlDataSource.Configuration], metadata *plan.DataSourceMetadata, customConfig graphqlDataSource.Configuration) plan.DataSourceConfiguration[graphqlDataSource.Configuration] { + t.Helper() + + cfg, err := plan.NewDataSourceConfigurationWithName[graphqlDataSource.Configuration]( + id, + name, + factory, + metadata, + customConfig, + ) + require.NoError(t, err) + + return cfg +} + func TestEngineConfigFactory_EngineConfiguration(t *testing.T) { + t.Parallel() engineCtx, cancel := context.WithCancel(context.Background()) - defer cancel() + t.Cleanup(cancel) runWithoutError := func( t *testing.T, @@ -93,6 +109,7 @@ func TestEngineConfigFactory_EngineConfiguration(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + t.Parallel() tt.run(t, httpClient, streamingClient, []SubgraphConfiguration{ { Name: "users", @@ -131,8 +148,8 @@ func TestEngineConfigFactory_EngineConfiguration(t *testing.T) { require.NoError(t, err) conf.SetDataSources([]plan.DataSource{ - mustGraphqlDataSourceConfiguration(t, - "0", + mustGraphqlDataSourceConfigurationWithName(t, + "0", "users", gqlFactory, &plan.DataSourceMetadata{ RootNodes: []plan.TypeField{ @@ -177,8 +194,8 @@ func TestEngineConfigFactory_EngineConfiguration(t *testing.T) { CustomScalarTypeFields: []graphqlDataSource.SingleTypeField{}, }), ), - mustGraphqlDataSourceConfiguration(t, - "1", + mustGraphqlDataSourceConfigurationWithName(t, + "1", "products", gqlFactory, &plan.DataSourceMetadata{ RootNodes: []plan.TypeField{ @@ -223,8 +240,8 @@ func TestEngineConfigFactory_EngineConfiguration(t *testing.T) { CustomScalarTypeFields: []graphqlDataSource.SingleTypeField{}, }), ), - mustGraphqlDataSourceConfiguration(t, - "2", + mustGraphqlDataSourceConfigurationWithName(t, + "2", "reviews", gqlFactory, &plan.DataSourceMetadata{ RootNodes: []plan.TypeField{ diff --git a/execution/engine/config_factory_proxy_test.go b/execution/engine/config_factory_proxy_test.go index 4cddfef40f..7916871635 100644 --- a/execution/engine/config_factory_proxy_test.go +++ b/execution/engine/config_factory_proxy_test.go @@ -15,6 +15,7 @@ import ( ) func TestProxyEngineConfigFactory_EngineConfiguration(t *testing.T) { + t.Parallel() engineCtx := context.Background() schema, err := graphql.NewSchemaFromString(graphqlGeneratorSchema) @@ -57,6 +58,7 @@ func TestProxyEngineConfigFactory_EngineConfiguration(t *testing.T) { } t.Run("engine config with unknown subscription type", func(t *testing.T) { + t.Parallel() upstreamConfig := ProxyUpstreamConfig{ URL: "http://localhost:8080", Method: http.MethodGet, @@ -136,6 +138,7 @@ func TestProxyEngineConfigFactory_EngineConfiguration(t *testing.T) { }) t.Run("engine config with specific WS subscription type", func(t *testing.T) { + t.Parallel() upstreamConfig := ProxyUpstreamConfig{ URL: "http://localhost:8080", Method: http.MethodGet, @@ -216,6 +219,7 @@ func TestProxyEngineConfigFactory_EngineConfiguration(t *testing.T) { }) t.Run("engine config with SSE subscription type", func(t *testing.T) { + t.Parallel() upstreamConfig := ProxyUpstreamConfig{ URL: "http://localhost:8080", Method: http.MethodGet, diff --git a/execution/engine/engine_config_test.go b/execution/engine/engine_config_test.go index db6427d70b..45a4888a3c 100644 --- a/execution/engine/engine_config_test.go +++ b/execution/engine/engine_config_test.go @@ -15,7 +15,9 @@ import ( "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" ) +//nolint:tparallel // Subtests mutate shared engineConfig state within the parent test. func TestNewConfiguration(t *testing.T) { + t.Parallel() var engineConfig Configuration t.Run("should create a new engine v2 config", func(t *testing.T) { @@ -72,10 +74,11 @@ func TestNewConfiguration(t *testing.T) { } func TestGraphQLDataSourceGenerator_Generate(t *testing.T) { + t.Parallel() client := &http.Client{} streamingClient := &http.Client{} engineCtx, cancel := context.WithCancel(context.Background()) - defer cancel() + t.Cleanup(cancel) doc, report := astparser.ParseGraphqlDocumentString(graphqlGeneratorSchema) require.Falsef(t, report.HasErrors(), "document parser report has errors") @@ -106,6 +109,7 @@ func TestGraphQLDataSourceGenerator_Generate(t *testing.T) { } t.Run("without subscription configuration", func(t *testing.T) { + t.Parallel() dataSourceConfig := mustConfiguration(t, graphqlDataSource.ConfigurationInput{ Fetch: &graphqlDataSource.FetchConfiguration{ URL: "http://localhost:8080", @@ -137,6 +141,7 @@ func TestGraphQLDataSourceGenerator_Generate(t *testing.T) { }) t.Run("with subscription configuration (SSE)", func(t *testing.T) { + t.Parallel() dataSourceConfig := mustConfiguration(t, graphqlDataSource.ConfigurationInput{ Fetch: &graphqlDataSource.FetchConfiguration{ URL: "http://localhost:8080", @@ -174,10 +179,12 @@ func TestGraphQLDataSourceGenerator_Generate(t *testing.T) { } func TestGraphqlFieldConfigurationsGenerator_Generate(t *testing.T) { + t.Parallel() schema, err := graphql.NewSchemaFromString(graphqlGeneratorSchema) require.NoError(t, err) t.Run("should generate field configs without predefined field configs", func(t *testing.T) { + t.Parallel() fieldConfigurations := newGraphQLFieldConfigsGenerator(schema).Generate() sort.Slice(fieldConfigurations, func(i, j int) bool { // make the resulting slice deterministic again return fieldConfigurations[i].TypeName < fieldConfigurations[j].TypeName @@ -218,6 +225,7 @@ func TestGraphqlFieldConfigurationsGenerator_Generate(t *testing.T) { }) t.Run("should generate field configs with predefined field configs", func(t *testing.T) { + t.Parallel() predefinedFieldConfigs := plan.FieldConfigurations{ { TypeName: "User", diff --git a/execution/engine/error_behavior_test.go b/execution/engine/error_behavior_test.go new file mode 100644 index 0000000000..cf86544031 --- /dev/null +++ b/execution/engine/error_behavior_test.go @@ -0,0 +1,876 @@ +package engine + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/jensneuse/abstractlogger" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/execution/graphql" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/graphql_datasource" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/service_datasource" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +func compactJSONForAssert(t testing.TB, input string) string { + t.Helper() + + var value any + err := json.Unmarshal([]byte(input), &value) + require.NoError(t, err) + + normalized, err := json.Marshal(value) + require.NoError(t, err) + return string(normalized) +} + +// TestErrorBehavior_EndToEnd tests the onError request parameter behavior +// as specified in GraphQL spec PR #1163. +// +// Error Behavior Modes: +// - PROPAGATE (default): Null bubbles up to nearest nullable ancestor +// - NULL: Error yields null at site, no bubbling, errors are collected +// - HALT: First error stops execution, data becomes null +func TestErrorBehavior_EndToEnd(t *testing.T) { + t.Parallel() + // Set up a mock subgraph that returns data with null in non-nullable fields + setupErrorScenario := func(t *testing.T, subgraphResponse string) (*ExecutionEngine, *graphql.Schema) { + t.Helper() + + // Create a mock server that returns the subgraph response + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(subgraphResponse)) + })) + t.Cleanup(server.Close) + + // Schema with non-nullable fields that can trigger errors + schemaSDL := ` + type Query { + user: User + users: [User!]! + } + + type User { + id: ID! + name: String! + email: String + profile: Profile + posts: [Post!]! + } + + type Profile { + bio: String! + avatar: String + } + + type Post { + id: ID! + title: String! + content: String + } + ` + + schema, err := graphql.NewSchemaFromString(schemaSDL) + require.NoError(t, err) + + httpClient := http.DefaultClient + subscriptionClient := graphql_datasource.NewGraphQLSubscriptionClient(httpClient, httpClient, context.Background()) + + factory, err := graphql_datasource.NewFactory(context.Background(), httpClient, subscriptionClient) + require.NoError(t, err) + + schemaConfig, err := graphql_datasource.NewSchemaConfiguration(schemaSDL, nil) + require.NoError(t, err) + + customConfig, err := graphql_datasource.NewConfiguration(graphql_datasource.ConfigurationInput{ + Fetch: &graphql_datasource.FetchConfiguration{ + URL: server.URL, + Method: "POST", + }, + SchemaConfiguration: schemaConfig, + }) + require.NoError(t, err) + + dsConfig, err := plan.NewDataSourceConfiguration[graphql_datasource.Configuration]( + "graphql_datasource", + factory, + &plan.DataSourceMetadata{ + RootNodes: []plan.TypeField{ + {TypeName: "Query", FieldNames: []string{"user", "users"}}, + }, + ChildNodes: []plan.TypeField{ + {TypeName: "User", FieldNames: []string{"id", "name", "email", "profile", "posts"}}, + {TypeName: "Profile", FieldNames: []string{"bio", "avatar"}}, + {TypeName: "Post", FieldNames: []string{"id", "title", "content"}}, + }, + }, + customConfig, + ) + require.NoError(t, err) + + engineConfig := NewConfiguration(schema) + engineConfig.SetDataSources([]plan.DataSource{dsConfig}) + engineConfig.SetFieldConfigurations(plan.FieldConfigurations{ + {TypeName: "Query", FieldName: "user"}, + {TypeName: "Query", FieldName: "users"}, + }) + + eng, err := NewExecutionEngine(context.Background(), abstractlogger.NoopLogger, engineConfig, resolve.ResolverOptions{ + MaxConcurrency: 1, + }) + require.NoError(t, err) + + return eng, schema + } + + t.Run("PROPAGATE mode - null bubbles up to nearest nullable ancestor", func(t *testing.T) { + t.Parallel() + // Subgraph returns null for non-nullable `name` field + // In PROPAGATE mode, the null should bubble up to the nullable `user` field + subgraphResponse := `{"data":{"user":{"id":"1","name":null,"email":"test@example.com"}}}` + + eng, _ := setupErrorScenario(t, subgraphResponse) + + query := `query { user { id name email } }` + req := &graphql.Request{ + Query: query, + } + + ctx := context.Background() + buf := new(bytes.Buffer) + resultWriter := graphql.NewEngineResultWriterFromBuffer(buf) + + err := eng.Execute(ctx, req, &resultWriter, WithErrorBehavior(resolve.ErrorBehaviorPropagate)) + require.NoError(t, err) + + expected := `{"errors":[{"message":"Cannot return null for non-nullable field 'Query.user.name'.","path":["user","name"]}],"data":{"user":null}}` + assert.Equal(t, compactJSONForAssert(t, expected), compactJSONForAssert(t, buf.String())) + }) + + t.Run("NULL mode - error at site, no bubbling, errors collected", func(t *testing.T) { + // Subgraph returns null for non-nullable `name` field + // In NULL mode, the null should stay at `name`, not bubble up + subgraphResponse := `{"data":{"user":{"id":"1","name":null,"email":"test@example.com"}}}` + + eng, _ := setupErrorScenario(t, subgraphResponse) + + query := `query { user { id name email } }` + req := &graphql.Request{ + Query: query, + } + + ctx := context.Background() + buf := new(bytes.Buffer) + resultWriter := graphql.NewEngineResultWriterFromBuffer(buf) + + err := eng.Execute(ctx, req, &resultWriter, WithErrorBehavior(resolve.ErrorBehaviorNull)) + require.NoError(t, err) + + // In NULL mode: error at site, no bubbling - user object preserved with name=null + // Error included so client can distinguish error null from intentional null + expected := `{"errors":[{"message":"Cannot return null for non-nullable field 'Query.user.name'.","path":["user","name"]}],"data":{"user":{"id":"1","name":null,"email":"test@example.com"}}}` + assert.Equal(t, compactJSONForAssert(t, expected), compactJSONForAssert(t, buf.String())) + }) + + t.Run("HALT mode - first error stops execution, data becomes null", func(t *testing.T) { + // Subgraph returns null for non-nullable `name` field + // In HALT mode, the entire data should become null on first error + subgraphResponse := `{"data":{"user":{"id":"1","name":null,"email":"test@example.com"}}}` + + eng, _ := setupErrorScenario(t, subgraphResponse) + + query := `query { user { id name email } }` + req := &graphql.Request{ + Query: query, + } + + ctx := context.Background() + buf := new(bytes.Buffer) + resultWriter := graphql.NewEngineResultWriterFromBuffer(buf) + + err := eng.Execute(ctx, req, &resultWriter, WithErrorBehavior(resolve.ErrorBehaviorHalt)) + require.NoError(t, err) + + // In HALT mode: execution stops, data becomes null + expected := `{"errors":[{"message":"Cannot return null for non-nullable field 'Query.user.name'.","path":["user","name"]}],"data":null}` + assert.Equal(t, compactJSONForAssert(t, expected), compactJSONForAssert(t, buf.String())) + }) + + t.Run("NULL mode with multiple errors - all errors collected", func(t *testing.T) { + t.Parallel() + // Subgraph returns multiple null values for non-nullable fields + subgraphResponse := `{"data":{"user":{"id":"1","name":null,"email":"test@example.com","profile":{"bio":null,"avatar":"pic.jpg"}}}}` + + eng, _ := setupErrorScenario(t, subgraphResponse) + + query := `query { user { id name email profile { bio avatar } } }` + req := &graphql.Request{ + Query: query, + } + + ctx := context.Background() + buf := new(bytes.Buffer) + resultWriter := graphql.NewEngineResultWriterFromBuffer(buf) + + err := eng.Execute(ctx, req, &resultWriter, WithErrorBehavior(resolve.ErrorBehaviorNull)) + require.NoError(t, err) + + // In NULL mode: both errors collected, objects preserved + expected := `{"errors":[{"message":"Cannot return null for non-nullable field 'Query.user.name'.","path":["user","name"]},{"message":"Cannot return null for non-nullable field 'Query.user.profile.bio'.","path":["user","profile","bio"]}],"data":{"user":{"id":"1","name":null,"email":"test@example.com","profile":{"bio":null,"avatar":"pic.jpg"}}}}` + assert.Equal(t, compactJSONForAssert(t, expected), compactJSONForAssert(t, buf.String())) + }) + + t.Run("PROPAGATE mode with nested non-nullable - bubble to correct level", func(t *testing.T) { + t.Parallel() + // Profile has non-nullable bio, profile itself is nullable + // Null bio should bubble up to profile becoming null + subgraphResponse := `{"data":{"user":{"id":"1","name":"Test","email":"test@example.com","profile":{"bio":null,"avatar":"pic.jpg"}}}}` + + eng, _ := setupErrorScenario(t, subgraphResponse) + + query := `query { user { id name email profile { bio avatar } } }` + req := &graphql.Request{ + Query: query, + } + + ctx := context.Background() + buf := new(bytes.Buffer) + resultWriter := graphql.NewEngineResultWriterFromBuffer(buf) + + err := eng.Execute(ctx, req, &resultWriter, WithErrorBehavior(resolve.ErrorBehaviorPropagate)) + require.NoError(t, err) + + // In PROPAGATE mode: null bio bubbles up to nullable profile + expected := `{"errors":[{"message":"Cannot return null for non-nullable field 'Query.user.profile.bio'.","path":["user","profile","bio"]}],"data":{"user":{"id":"1","name":"Test","email":"test@example.com","profile":null}}}` + assert.Equal(t, compactJSONForAssert(t, expected), compactJSONForAssert(t, buf.String())) + }) + + t.Run("NULL mode with array containing errors", func(t *testing.T) { + t.Parallel() + // Array of users where one has null non-nullable field + subgraphResponse := `{"data":{"users":[{"id":"1","name":"Alice","email":"alice@example.com","profile":null,"posts":[]},{"id":"2","name":null,"email":"bob@example.com","profile":null,"posts":[]}]}}` + + eng, _ := setupErrorScenario(t, subgraphResponse) + + query := `query { users { id name email } }` + req := &graphql.Request{ + Query: query, + } + + ctx := context.Background() + buf := new(bytes.Buffer) + resultWriter := graphql.NewEngineResultWriterFromBuffer(buf) + + err := eng.Execute(ctx, req, &resultWriter, WithErrorBehavior(resolve.ErrorBehaviorNull)) + require.NoError(t, err) + + // In NULL mode: array preserved, second user has null name with error + expected := `{"errors":[{"message":"Cannot return null for non-nullable field 'Query.users.name'.","path":["users",1,"name"]}],"data":{"users":[{"id":"1","name":"Alice","email":"alice@example.com"},{"id":"2","name":null,"email":"bob@example.com"}]}}` + assert.Equal(t, compactJSONForAssert(t, expected), compactJSONForAssert(t, buf.String())) + }) + + t.Run("default behavior without explicit mode is PROPAGATE", func(t *testing.T) { + t.Parallel() + subgraphResponse := `{"data":{"user":{"id":"1","name":null,"email":"test@example.com"}}}` + + eng, _ := setupErrorScenario(t, subgraphResponse) + + query := `query { user { id name email } }` + req := &graphql.Request{ + Query: query, + } + + ctx := context.Background() + buf := new(bytes.Buffer) + resultWriter := graphql.NewEngineResultWriterFromBuffer(buf) + + // Execute WITHOUT specifying error behavior - should default to PROPAGATE + err := eng.Execute(ctx, req, &resultWriter) + require.NoError(t, err) + + // Default behavior is PROPAGATE: null bubbles up + expected := `{"errors":[{"message":"Cannot return null for non-nullable field 'Query.user.name'.","path":["user","name"]}],"data":{"user":null}}` + assert.Equal(t, compactJSONForAssert(t, expected), compactJSONForAssert(t, buf.String())) + }) + + t.Run("successful query - no difference between modes", func(t *testing.T) { + t.Parallel() + // No errors in the response + subgraphResponse := `{"data":{"user":{"id":"1","name":"Test User","email":"test@example.com"}}}` + + eng, _ := setupErrorScenario(t, subgraphResponse) + + query := `query { user { id name email } }` + expected := `{"data":{"user":{"id":"1","name":"Test User","email":"test@example.com"}}}` + + for _, mode := range []resolve.ErrorBehavior{ + resolve.ErrorBehaviorPropagate, + resolve.ErrorBehaviorNull, + resolve.ErrorBehaviorHalt, + } { + t.Run(mode.String(), func(t *testing.T) { + t.Parallel() + req := &graphql.Request{ + Query: query, + } + + ctx := context.Background() + buf := new(bytes.Buffer) + resultWriter := graphql.NewEngineResultWriterFromBuffer(buf) + + err := eng.Execute(ctx, req, &resultWriter, WithErrorBehavior(mode)) + require.NoError(t, err) + + // All modes should return the same successful result + assert.Equal(t, compactJSONForAssert(t, expected), compactJSONForAssert(t, buf.String())) + }) + } + }) +} + +// TestErrorBehavior_RequestExtensions tests that error behavior can be set via request extensions +func TestErrorBehavior_RequestExtensions(t *testing.T) { + t.Parallel() + t.Run("parse NULL from extensions", func(t *testing.T) { + t.Parallel() + req := &graphql.Request{ + Query: `query { user { id name } }`, + Extensions: []byte(`{"onError":"NULL"}`), + } + + behavior, ok := req.GetOnErrorBehavior() + assert.True(t, ok) + assert.Equal(t, resolve.ErrorBehaviorNull, behavior) + }) + + t.Run("parse PROPAGATE from extensions", func(t *testing.T) { + t.Parallel() + req := &graphql.Request{ + Query: `query { user { id name } }`, + Extensions: []byte(`{"onError":"PROPAGATE"}`), + } + + behavior, ok := req.GetOnErrorBehavior() + assert.True(t, ok) + assert.Equal(t, resolve.ErrorBehaviorPropagate, behavior) + }) + + t.Run("parse HALT from extensions", func(t *testing.T) { + t.Parallel() + req := &graphql.Request{ + Query: `query { user { id name } }`, + Extensions: []byte(`{"onError":"HALT"}`), + } + + behavior, ok := req.GetOnErrorBehavior() + assert.True(t, ok) + assert.Equal(t, resolve.ErrorBehaviorHalt, behavior) + }) + + t.Run("invalid onError value returns false", func(t *testing.T) { + t.Parallel() + req := &graphql.Request{ + Query: `query { user { id name } }`, + Extensions: []byte(`{"onError":"INVALID"}`), + } + + behavior, ok := req.GetOnErrorBehavior() + assert.False(t, ok) + assert.Equal(t, resolve.ErrorBehaviorPropagate, behavior) // Default fallback + }) + + t.Run("missing onError returns false", func(t *testing.T) { + t.Parallel() + req := &graphql.Request{ + Query: `query { user { id name } }`, + Extensions: []byte(`{"persistedQuery":{"hash":"abc123"}}`), + } + + behavior, ok := req.GetOnErrorBehavior() + assert.False(t, ok) + assert.Equal(t, resolve.ErrorBehaviorPropagate, behavior) // Default fallback + }) + + t.Run("empty extensions returns false", func(t *testing.T) { + t.Parallel() + req := &graphql.Request{ + Query: `query { user { id name } }`, + } + + behavior, ok := req.GetOnErrorBehavior() + assert.False(t, ok) + assert.Equal(t, resolve.ErrorBehaviorPropagate, behavior) // Default fallback + }) +} + +// TestErrorBehavior_ServiceCapabilityIntrospection tests the __service query for onError capability discovery +func TestErrorBehavior_ServiceCapabilityIntrospection(t *testing.T) { + t.Parallel() + // Schema that includes the _Service type for introspection + schemaSDL := ` + type Query { + __service: _Service! + user: User + } + + type _Service { + capabilities: [_Capability!]! + } + + type _Capability { + identifier: String! + value: String + description: String + } + + type User { + id: ID! + name: String! + } + ` + + setupServiceIntrospection := func(t *testing.T, defaultBehavior string) *ExecutionEngine { + t.Helper() + + schema, err := graphql.NewSchemaFromString(schemaSDL) + require.NoError(t, err) + + // Create service datasource configuration + serviceFactory := service_datasource.NewServiceConfigFactory(service_datasource.ServiceOptions{ + DefaultErrorBehavior: defaultBehavior, + }) + + engineConfig := NewConfiguration(schema) + + // Add service datasource + dataSources := serviceFactory.BuildDataSourceConfigurations() + engineConfig.SetDataSources(dataSources) + + fieldConfigs := serviceFactory.BuildFieldConfigurations() + engineConfig.SetFieldConfigurations(fieldConfigs) + + eng, err := NewExecutionEngine(context.Background(), abstractlogger.NoopLogger, engineConfig, resolve.ResolverOptions{ + MaxConcurrency: 1, + }) + require.NoError(t, err) + + return eng + } + + t.Run("introspect onError capability with PROPAGATE default", func(t *testing.T) { + t.Parallel() + eng := setupServiceIntrospection(t, "PROPAGATE") + + query := `query { __service { capabilities { identifier value description } } }` + req := &graphql.Request{ + Query: query, + } + + ctx := context.Background() + buf := new(bytes.Buffer) + resultWriter := graphql.NewEngineResultWriterFromBuffer(buf) + + err := eng.Execute(ctx, req, &resultWriter) + require.NoError(t, err) + + expected := `{ + "data": { + "__service": { + "capabilities": [ + { + "identifier": "graphql.onError", + "value": null, + "description": "Supports the onError request extension for controlling error propagation behavior" + }, + { + "identifier": "graphql.defaultErrorBehavior", + "value": "PROPAGATE", + "description": "The default error behavior when onError is not specified in the request" + } + ] + } + } + }` + assert.Equal(t, compactJSONForAssert(t, expected), compactJSONForAssert(t, buf.String())) + }) + + t.Run("introspect onError capability with NULL default", func(t *testing.T) { + t.Parallel() + eng := setupServiceIntrospection(t, "NULL") + + query := `query { __service { capabilities { identifier value description } } }` + req := &graphql.Request{ + Query: query, + } + + ctx := context.Background() + buf := new(bytes.Buffer) + resultWriter := graphql.NewEngineResultWriterFromBuffer(buf) + + err := eng.Execute(ctx, req, &resultWriter) + require.NoError(t, err) + + expected := `{ + "data": { + "__service": { + "capabilities": [ + { + "identifier": "graphql.onError", + "value": null, + "description": "Supports the onError request extension for controlling error propagation behavior" + }, + { + "identifier": "graphql.defaultErrorBehavior", + "value": "NULL", + "description": "The default error behavior when onError is not specified in the request" + } + ] + } + } + }` + assert.Equal(t, compactJSONForAssert(t, expected), compactJSONForAssert(t, buf.String())) + }) + + t.Run("introspect onError capability with HALT default", func(t *testing.T) { + t.Parallel() + eng := setupServiceIntrospection(t, "HALT") + + query := `query { __service { capabilities { identifier value description } } }` + req := &graphql.Request{ + Query: query, + } + + ctx := context.Background() + buf := new(bytes.Buffer) + resultWriter := graphql.NewEngineResultWriterFromBuffer(buf) + + err := eng.Execute(ctx, req, &resultWriter) + require.NoError(t, err) + + expected := `{ + "data": { + "__service": { + "capabilities": [ + { + "identifier": "graphql.onError", + "value": null, + "description": "Supports the onError request extension for controlling error propagation behavior" + }, + { + "identifier": "graphql.defaultErrorBehavior", + "value": "HALT", + "description": "The default error behavior when onError is not specified in the request" + } + ] + } + } + }` + assert.Equal(t, compactJSONForAssert(t, expected), compactJSONForAssert(t, buf.String())) + }) + + t.Run("introspect without default behavior configured", func(t *testing.T) { + t.Parallel() + eng := setupServiceIntrospection(t, "") + + query := `query { __service { capabilities { identifier value description } } }` + req := &graphql.Request{ + Query: query, + } + + ctx := context.Background() + buf := new(bytes.Buffer) + resultWriter := graphql.NewEngineResultWriterFromBuffer(buf) + + err := eng.Execute(ctx, req, &resultWriter) + require.NoError(t, err) + + // Without default behavior configured, only onError capability is returned + expected := `{ + "data": { + "__service": { + "capabilities": [ + { + "identifier": "graphql.onError", + "value": null, + "description": "Supports the onError request extension for controlling error propagation behavior" + } + ] + } + } + }` + assert.Equal(t, compactJSONForAssert(t, expected), compactJSONForAssert(t, buf.String())) + }) + + t.Run("introspect only identifiers", func(t *testing.T) { + t.Parallel() + eng := setupServiceIntrospection(t, "PROPAGATE") + + // Client can query only the fields they need + query := `query { __service { capabilities { identifier } } }` + req := &graphql.Request{ + Query: query, + } + + ctx := context.Background() + buf := new(bytes.Buffer) + resultWriter := graphql.NewEngineResultWriterFromBuffer(buf) + + err := eng.Execute(ctx, req, &resultWriter) + require.NoError(t, err) + + expected := `{ + "data": { + "__service": { + "capabilities": [ + {"identifier": "graphql.onError"}, + {"identifier": "graphql.defaultErrorBehavior"} + ] + } + } + }` + assert.Equal(t, compactJSONForAssert(t, expected), compactJSONForAssert(t, buf.String())) + }) +} + +// TestServiceCapability_CosmoRouterIntegration tests the schema extension API +// that Cosmo router uses to add service capability types to a schema. +// +// This mimics the Cosmo router integration pattern: +// 1. Parse a user schema (no service types) +// 2. Merge with base schema (adds introspection types) +// 3. Extend with service types via NewServiceConfigFactoryWithSchema +// 4. Verify introspection shows _Service and _Capability types +// 5. Verify __service query works +func TestServiceCapability_CosmoRouterIntegration(t *testing.T) { + t.Parallel() + t.Run("schema extension and introspection", func(t *testing.T) { + t.Parallel() + // User's schema - does NOT include _Service, _Capability, or __service + userSchemaSDL := ` + type Query { + user(id: ID!): User + } + type User { + id: ID! + name: String! + } + ` + + // Create schema and extend with service types using the new API + schema, err := graphql.NewSchemaFromString(userSchemaSDL) + require.NoError(t, err) + + // Use NewServiceConfigFactoryWithSchema to extend schema AND create factory + serviceFactory, err := service_datasource.NewServiceConfigFactoryWithSchema( + schema.Document(), + service_datasource.ServiceOptions{ + DefaultErrorBehavior: "PROPAGATE", + }, + ) + require.NoError(t, err) + + // Build engine configuration + // NOTE: NewExecutionEngine automatically adds introspection datasources, + // so we don't need to add them manually here + engineConfig := NewConfiguration(schema) + + // Add service capabilities datasource + for _, ds := range serviceFactory.BuildDataSourceConfigurations() { + engineConfig.AddDataSource(ds) + } + for _, fc := range serviceFactory.BuildFieldConfigurations() { + engineConfig.AddFieldConfiguration(fc) + } + + eng, err := NewExecutionEngine(context.Background(), abstractlogger.NoopLogger, engineConfig, resolve.ResolverOptions{ + MaxConcurrency: 1, + }) + require.NoError(t, err) + + // Test __service query works + t.Run("__service query returns capabilities", func(t *testing.T) { + t.Parallel() + query := `{ __service { capabilities { identifier value description } } }` + req := &graphql.Request{Query: query} + + buf := new(bytes.Buffer) + resultWriter := graphql.NewEngineResultWriterFromBuffer(buf) + + err := eng.Execute(context.Background(), req, &resultWriter) + require.NoError(t, err) + + expected := `{ + "data": { + "__service": { + "capabilities": [ + { + "identifier": "graphql.onError", + "value": null, + "description": "Supports the onError request extension for controlling error propagation behavior" + }, + { + "identifier": "graphql.defaultErrorBehavior", + "value": "PROPAGATE", + "description": "The default error behavior when onError is not specified in the request" + } + ] + } + } + }` + assert.Equal(t, compactJSONForAssert(t, expected), compactJSONForAssert(t, buf.String())) + }) + + // Test introspection shows _Service type + t.Run("introspection returns _Service type", func(t *testing.T) { + t.Parallel() + query := `{ + __type(name: "_Service") { + name + kind + fields { name } + } + }` + req := &graphql.Request{Query: query} + + buf := new(bytes.Buffer) + resultWriter := graphql.NewEngineResultWriterFromBuffer(buf) + + err := eng.Execute(context.Background(), req, &resultWriter) + require.NoError(t, err) + + expected := `{ + "data": { + "__type": { + "name": "_Service", + "kind": "OBJECT", + "fields": [ + {"name": "capabilities"} + ] + } + } + }` + assert.Equal(t, compactJSONForAssert(t, expected), compactJSONForAssert(t, buf.String())) + }) + + // Test introspection shows _Capability type + t.Run("introspection returns _Capability type", func(t *testing.T) { + t.Parallel() + query := `{ + __type(name: "_Capability") { + name + kind + fields { name } + } + }` + req := &graphql.Request{Query: query} + + buf := new(bytes.Buffer) + resultWriter := graphql.NewEngineResultWriterFromBuffer(buf) + + err := eng.Execute(context.Background(), req, &resultWriter) + require.NoError(t, err) + + expected := `{ + "data": { + "__type": { + "name": "_Capability", + "kind": "OBJECT", + "fields": [ + {"name": "identifier"}, + {"name": "value"}, + {"name": "description"} + ] + } + } + }` + assert.Equal(t, compactJSONForAssert(t, expected), compactJSONForAssert(t, buf.String())) + }) + + // Test __schema introspection shows user fields (but not __ prefixed fields per GraphQL spec) + // NOTE: Per GraphQL spec and standard behavior, fields starting with __ are not + // included in introspection results (like __schema, __type, and now __service). + // This is intentional - the query works, it's just hidden from field listings. + t.Run("schema introspection shows user-defined fields", func(t *testing.T) { + t.Parallel() + query := `{ + __schema { + queryType { + fields { + name + } + } + } + }` + req := &graphql.Request{Query: query} + + buf := new(bytes.Buffer) + resultWriter := graphql.NewEngineResultWriterFromBuffer(buf) + + err := eng.Execute(context.Background(), req, &resultWriter) + require.NoError(t, err) + + // Verify user-defined fields are present + result := buf.String() + assert.Equal(t, `{"data":{"__schema":{"queryType":{"fields":[{"name":"user"}]}}}}`, result) + + // NOTE: __service is NOT in the fields list (per GraphQL spec - __ prefixed fields + // are hidden from introspection). This matches __schema and __type behavior. + // The query still works (tested above), it's just hidden from field listings. + }) + }) + + t.Run("works with NULL default error behavior", func(t *testing.T) { + t.Parallel() + userSchemaSDL := ` + type Query { + hello: String + } + ` + + schema, err := graphql.NewSchemaFromString(userSchemaSDL) + require.NoError(t, err) + + serviceFactory, err := service_datasource.NewServiceConfigFactoryWithSchema( + schema.Document(), + service_datasource.ServiceOptions{ + DefaultErrorBehavior: "NULL", + }, + ) + require.NoError(t, err) + + engineConfig := NewConfiguration(schema) + for _, ds := range serviceFactory.BuildDataSourceConfigurations() { + engineConfig.AddDataSource(ds) + } + for _, fc := range serviceFactory.BuildFieldConfigurations() { + engineConfig.AddFieldConfiguration(fc) + } + + eng, err := NewExecutionEngine(context.Background(), abstractlogger.NoopLogger, engineConfig, resolve.ResolverOptions{ + MaxConcurrency: 1, + }) + require.NoError(t, err) + + query := `{ __service { capabilities { identifier value } } }` + req := &graphql.Request{Query: query} + + buf := new(bytes.Buffer) + resultWriter := graphql.NewEngineResultWriterFromBuffer(buf) + + err = eng.Execute(context.Background(), req, &resultWriter) + require.NoError(t, err) + + // Verify NULL default is returned + result := buf.String() + assert.Equal(t, `{"data":{"__service":{"capabilities":[{"identifier":"graphql.onError","value":null},{"identifier":"graphql.defaultErrorBehavior","value":"NULL"}]}}}`, result) + }) +} diff --git a/execution/engine/execution_engine.go b/execution/engine/execution_engine.go index 9d441eee9e..0228b8dd49 100644 --- a/execution/engine/execution_engine.go +++ b/execution/engine/execution_engine.go @@ -28,15 +28,23 @@ import ( ) type internalExecutionContext struct { - resolveContext *resolve.Context - postProcessor *postprocess.Processor + resolveContext *resolve.Context + postProcessor *postprocess.Processor + cacheStatsOutput *resolve.CacheAnalyticsSnapshot // Optional pointer to capture cache stats after execution } func newInternalExecutionContext() *internalExecutionContext { - return &internalExecutionContext{ + ctx := &internalExecutionContext{ resolveContext: resolve.NewContext(context.Background()), postProcessor: postprocess.NewProcessor(), } + // Inbound request deduplication is opt-in here because the execution engine + // does not by default populate Request.ID and VariablesHash, and dedup with + // uninitialized values would collide every inbound request onto the same + // key — followers would receive an unrelated leader's response. + // Enable via WithInboundRequestDeduplication(), which also wires the hashes. + ctx.resolveContext.ExecutionOptions.DisableInboundRequestDeduplication = true + return ctx } func (e *internalExecutionContext) setRequest(request resolve.Request) { @@ -101,6 +109,73 @@ func WithRequestTraceOptions(options resolve.TraceOptions) ExecutionOptions { } } +func WithSubgraphHeadersBuilder(builder resolve.SubgraphHeadersBuilder) ExecutionOptions { + return func(ctx *internalExecutionContext) { + ctx.resolveContext.SubgraphHeadersBuilder = builder + } +} + +func WithDebugMode() ExecutionOptions { + return func(ctx *internalExecutionContext) { + ctx.resolveContext.Debug = true + } +} + +func WithCachingOptions(options resolve.CachingOptions) ExecutionOptions { + return func(ctx *internalExecutionContext) { + ctx.resolveContext.ExecutionOptions.Caching = options + } +} + +// WithInboundRequestDeduplication enables inbound request deduplication for the +// execution engine. When enabled, the engine populates Request.ID (operation +// hash) and VariablesHash before resolving, so concurrent identical queries +// share a single leader fetch and followers reuse the leader's response bytes. +// Mutations and subscriptions are excluded automatically by SingleFlightAllowed. +func WithInboundRequestDeduplication() ExecutionOptions { + return func(ctx *internalExecutionContext) { + ctx.resolveContext.ExecutionOptions.DisableInboundRequestDeduplication = false + } +} + +func WithRemapVariables(remap map[string]string) ExecutionOptions { + return func(ctx *internalExecutionContext) { + ctx.resolveContext.RemapVariables = remap + } +} + +// WithCacheStatsOutput provides a pointer to a CacheAnalyticsSnapshot struct that will be +// populated with cache statistics after query execution completes. +// This is useful for monitoring, debugging, and testing cache effectiveness. +// +// Example usage: +// +// var stats resolve.CacheAnalyticsSnapshot +// err := engine.Execute(ctx, operation, writer, WithCacheStatsOutput(&stats)) +// if err == nil { +// fmt.Printf("L1 hits: %d, L1 misses: %d\n", stats.L1Hits, stats.L1Misses) +// } +func WithCacheStatsOutput(stats *resolve.CacheAnalyticsSnapshot) ExecutionOptions { + return func(ctx *internalExecutionContext) { + ctx.cacheStatsOutput = stats + } +} + +// WithErrorBehavior sets the error handling behavior for the request. +// This implements the GraphQL spec proposal for onError (PR #1163). +// +// Available behaviors: +// - ErrorBehaviorPropagate: Traditional null bubbling (default) +// - ErrorBehaviorNull: Errors yield null without bubbling +// - ErrorBehaviorHalt: First error stops execution, data becomes null +// +// Note: This option only has effect when OnErrorEnabled is true in ResolverOptions. +func WithErrorBehavior(behavior resolve.ErrorBehavior) ExecutionOptions { + return func(ctx *internalExecutionContext) { + ctx.resolveContext.ExecutionOptions.ErrorBehavior = behavior + } +} + func NewExecutionEngine(ctx context.Context, logger abstractlogger.Logger, engineConfig Configuration, resolverOptions resolve.ResolverOptions) (*ExecutionEngine, error) { executionPlanCache, err := lru.New(1024) if err != nil { @@ -233,9 +308,33 @@ func (e *ExecutionEngine) Execute(ctx context.Context, operation *graphql.Reques }) } + // Helper to capture cache stats after execution + captureStats := func() { + if execContext.cacheStatsOutput != nil { + *execContext.cacheStatsOutput = execContext.resolveContext.GetCacheStats() + } + } + + if !execContext.resolveContext.ExecutionOptions.DisableInboundRequestDeduplication { + // Populate the dedup key inputs the resolver needs. Operation hash goes + // into Request.ID, raw variables bytes into VariablesHash. Only paid for + // when the caller opted into inbound dedup via WithInboundRequestDeduplication. + opHash := pool.Hash64.Get() + if err := astprinter.Print(operation.Document(), opHash); err == nil { + execContext.resolveContext.Request.ID = opHash.Sum64() + } + opHash.Reset() + if len(operation.Variables) > 0 { + _, _ = opHash.Write(operation.Variables) + } + execContext.resolveContext.VariablesHash = opHash.Sum64() + pool.Hash64.Put(opHash) + } + switch p := cachedPlan.(type) { case *plan.SynchronousResponsePlan: - resp, err := e.resolver.ResolveGraphQLResponse(execContext.resolveContext, p.Response, nil, writer) + resp, err := e.resolver.ResolveGraphQLResponse(execContext.resolveContext, p.Response, writer) + captureStats() if err != nil { return err } @@ -244,7 +343,9 @@ func (e *ExecutionEngine) Execute(ctx context.Context, operation *graphql.Reques } return nil case *plan.SubscriptionResponsePlan: - return e.resolver.ResolveGraphQLSubscription(execContext.resolveContext, p.Response, writer) + err := e.resolver.ResolveGraphQLSubscription(execContext.resolveContext, p.Response, writer) + captureStats() + return err default: return errors.New("execution impossible: unknown type of operation") } diff --git a/execution/engine/execution_engine_cost_test.go b/execution/engine/execution_engine_cost_test.go index 6e490de61a..f083196388 100644 --- a/execution/engine/execution_engine_cost_test.go +++ b/execution/engine/execution_engine_cost_test.go @@ -12,7 +12,10 @@ import ( func TestExecutionEngine_Cost(t *testing.T) { + t.Parallel() + t.Run("common on star wars scheme", func(t *testing.T) { + t.Parallel() rootNodes := []plan.TypeField{ {TypeName: "Query", FieldNames: []string{"hero", "droid", "search"}}, {TypeName: "Human", FieldNames: []string{"name", "height", "friends"}}, @@ -877,6 +880,7 @@ func TestExecutionEngine_Cost(t *testing.T) { }) t.Run("union types", func(t *testing.T) { + t.Parallel() unionSchema := ` type Query { search(term: String!): [SearchResult!] @@ -1046,6 +1050,7 @@ func TestExecutionEngine_Cost(t *testing.T) { }) t.Run("listSize", func(t *testing.T) { + t.Parallel() listSchema := ` type Query { items(first: Int, last: Int): [Item!] @@ -1323,6 +1328,7 @@ func TestExecutionEngine_Cost(t *testing.T) { }) t.Run("nested lists with compounding multipliers", func(t *testing.T) { + t.Parallel() nestedSchema := ` type Query { users(first: Int): [User!] @@ -2111,6 +2117,7 @@ func TestExecutionEngine_Cost(t *testing.T) { }) t.Run("sizedFields", func(t *testing.T) { + t.Parallel() connSchema := ` type Query { users(first: Int, last: Int): UserConnection! @@ -2599,6 +2606,7 @@ func TestExecutionEngine_Cost(t *testing.T) { }) t.Run("sizedFields on abstract types", func(t *testing.T) { + t.Parallel() t.Run("parent returns interface, child via inline fragment", func(t *testing.T) { s2Schema := ` interface Connection { @@ -2787,6 +2795,7 @@ func TestExecutionEngine_Cost(t *testing.T) { }) t.Run("sizedFields on interface field", func(t *testing.T) { + t.Parallel() s4Schema := ` interface Paginated { items(first: Int): ItemConnection @@ -3023,6 +3032,7 @@ func TestExecutionEngine_Cost(t *testing.T) { }) t.Run("sizedField returns list of abstract type", func(t *testing.T) { + t.Parallel() s7Schema := ` interface Publishable { id: ID! diff --git a/execution/engine/execution_engine_grpc_test.go b/execution/engine/execution_engine_grpc_test.go index 09a1cf416e..636b2d253c 100644 --- a/execution/engine/execution_engine_grpc_test.go +++ b/execution/engine/execution_engine_grpc_test.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package engine @@ -226,9 +225,11 @@ func executeOperation(t *testing.T, grpcClient grpc.ClientConnInterface, operati } func TestGRPCSubgraphExecution(t *testing.T) { + t.Parallel() conn := setupGRPCTestGoPluginServer(t) t.Run("running simple query should work", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "UserQuery", Variables: nil, @@ -241,6 +242,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should run query with variable", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "UserQuery", Variables: stringify(map[string]any{ @@ -262,6 +264,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should run complex query", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "ComplexFilterTypeQuery", Variables: stringify(map[string]any{ @@ -289,6 +292,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should run query with two arguments and no variables and mapping for field names", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "QueryWithTwoArguments", Query: `query QueryWithTwoArguments { typeFilterWithArguments(filterField1: "test1", filterField2: "test2") { id name filterField1 filterField2 } }`, @@ -300,6 +304,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should run query with a complex input type and no variables and mapping for field names", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "ComplexFilterTypeQuery", Query: `query ComplexFilterTypeQuery { complexFilterType(filter: { filter: { name: "test", filterField1: "test1", filterField2: "test2", pagination: { page: 1, perPage: 10 } } }) { id name } }`, @@ -311,6 +316,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should run query with a complex input type and variables with different name", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "ComplexFilterTypeQuery", Variables: stringify(map[string]any{ @@ -331,6 +337,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should run query with a type filter with arguments and variables", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "TypeWithMultipleFilterFieldsQuery", Variables: stringify(map[string]any{ @@ -348,6 +355,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should run query with a nested type", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "NestedTypeQuery", Query: `query NestedTypeQuery { nestedType { id name b { id name c { id name } } } }`, @@ -359,6 +367,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should run query with a recursive type", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "RecursiveTypeQuery", Query: `query RecursiveTypeQuery { recursiveType { id name recursiveType { id recursiveType { id name } name } } }`, @@ -371,6 +380,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should stop when no mapping is found for the operation request", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "UserQuery", Query: `query UserQuery { user(id: "1") { id name } }`, @@ -394,6 +404,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { // Category tests to verify enum handling t.Run("should correctly handle query for all categories with enum values", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "CategoriesQuery", Query: `query CategoriesQuery { categories { id name kind } }`, @@ -410,6 +421,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should correctly handle query for categories by specific enum kind", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "CategoriesByKindQuery", Variables: stringify(map[string]any{ @@ -435,6 +447,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should correctly handle filter categories with enum and pagination", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "FilterCategoriesQuery", Variables: stringify(map[string]any{ @@ -466,6 +479,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle all enum values with explicit mapping", func(t *testing.T) { + t.Parallel() // Test each enum value explicitly enumValues := []string{"BOOK", "ELECTRONICS", "FURNITURE", "OTHER"} @@ -502,6 +516,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle nullable fields", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "NullableFieldsTypeQuery", Query: `query NullableFieldsTypeQuery { nullableFieldsType { id optionalString optionalInt optionalFloat optionalBoolean requiredString requiredInt } }`, @@ -514,6 +529,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle nullable fields query by ID with full data", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "NullableFieldsTypeByIdQuery", Variables: stringify(map[string]any{ @@ -540,6 +556,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle nullable fields query by ID with partial data", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "NullableFieldsTypeByIdQuery", Variables: stringify(map[string]any{ @@ -566,6 +583,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle nullable fields query by ID with minimal data", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "NullableFieldsTypeByIdQuery", Variables: stringify(map[string]any{ @@ -592,6 +610,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle nullable fields query by ID returning null for not found", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "NullableFieldsTypeByIdQuery", Variables: stringify(map[string]any{ @@ -614,6 +633,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle query for all nullable fields types", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "AllNullableFieldsTypesQuery", Query: `query AllNullableFieldsTypesQuery { @@ -637,6 +657,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle nullable fields query with filter", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "NullableFieldsTypeWithFilterQuery", Variables: stringify(map[string]any{ @@ -673,6 +694,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle create nullable fields type mutation", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "CreateNullableFieldsTypeMutation", Variables: stringify(map[string]any{ @@ -715,6 +737,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle create nullable fields type mutation with minimal input", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "CreateNullableFieldsTypeMutation", Variables: stringify(map[string]any{ @@ -753,6 +776,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle update nullable fields type mutation", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "UpdateNullableFieldsTypeMutation", Variables: stringify(map[string]any{ @@ -786,6 +810,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle update nullable fields type mutation returning null for non-existent ID", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "UpdateNullableFieldsTypeMutation", Variables: stringify(map[string]any{ @@ -814,6 +839,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { // BlogPost and Author list tests t.Run("should handle BlogPost query with scalar lists", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "BlogPostScalarListsQuery", Query: `query BlogPostScalarListsQuery { @@ -845,6 +871,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle BlogPost query with nested scalar lists", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "BlogPostNestedScalarListsQuery", Query: `query BlogPostNestedScalarListsQuery { @@ -866,6 +893,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle BlogPost query with complex lists", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "BlogPostComplexListsQuery", Query: `query BlogPostComplexListsQuery { @@ -907,6 +935,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle BlogPost query with nested complex lists", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "BlogPostNestedComplexListsQuery", Query: `query BlogPostNestedComplexListsQuery { @@ -936,6 +965,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle BlogPost query by ID", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "BlogPostByIdQuery", Variables: stringify(map[string]any{ @@ -968,6 +998,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle BlogPost filtered query", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "BlogPostFilteredQuery", Variables: stringify(map[string]any{ @@ -1004,6 +1035,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle Author query with scalar lists", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "AuthorScalarListsQuery", Query: `query AuthorScalarListsQuery { @@ -1027,6 +1059,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle Author query with nested scalar lists", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "AuthorNestedScalarListsQuery", Query: `query AuthorNestedScalarListsQuery { @@ -1046,6 +1079,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle Author query with complex lists", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "AuthorComplexListsQuery", Query: `query AuthorComplexListsQuery { @@ -1089,6 +1123,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle Author query with nested complex lists", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "AuthorNestedComplexListsQuery", Query: `query AuthorNestedComplexListsQuery { @@ -1123,6 +1158,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle Author query by ID", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "AuthorByIdQuery", Variables: stringify(map[string]any{ @@ -1154,6 +1190,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle Author filtered query", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "AuthorFilteredQuery", Variables: stringify(map[string]any{ @@ -1188,6 +1225,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle BlogPost creation mutation with complex input lists", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "CreateBlogPostMutation", Variables: stringify(map[string]any{ @@ -1294,6 +1332,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle Author creation mutation with complex input lists", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "CreateAuthorMutation", Variables: stringify(map[string]any{ @@ -1377,6 +1416,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle all BlogPosts query with lists", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "AllBlogPostsQuery", Query: `query AllBlogPostsQuery { @@ -1408,6 +1448,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle all Authors query with lists", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "AllAuthorsQuery", Query: `query AllAuthorsQuery { @@ -1435,6 +1476,7 @@ func TestGRPCSubgraphExecution(t *testing.T) { }) t.Run("should handle empty and nullable list items", func(t *testing.T) { + t.Parallel() operation := graphql.Request{ OperationName: "EmptyAndNullableListItems", Query: `query EmptyAndNullableListItems { diff --git a/execution/engine/execution_engine_helpers_test.go b/execution/engine/execution_engine_helpers_test.go index 89b181d563..5fdf33f55d 100644 --- a/execution/engine/execution_engine_helpers_test.go +++ b/execution/engine/execution_engine_helpers_test.go @@ -92,7 +92,7 @@ func createConditionalTestRoundTripper(t *testing.T, testCase conditionalTestCas } } -func stringify(any interface{}) []byte { +func stringify(any any) []byte { out, _ := json.Marshal(any) return out } diff --git a/execution/engine/execution_engine_test.go b/execution/engine/execution_engine_test.go index b71aca6f8a..2122c30129 100644 --- a/execution/engine/execution_engine_test.go +++ b/execution/engine/execution_engine_test.go @@ -62,6 +62,7 @@ func mustFactory(t testing.TB, httpClient *http.Client) plan.PlannerFactory[grap func runExecutionTest(testCase ExecutionEngineTestCase, withError bool, expectedErrorMessage string, options ...executionTestOptions) func(t *testing.T) { return func(t *testing.T) { + t.Parallel() t.Helper() if testCase.skipReason != "" { @@ -133,7 +134,7 @@ func runExecutionTest(testCase ExecutionEngineTestCase, withError bool, expected } if testCase.expectedJSONResponse != "" { - assert.JSONEq(t, testCase.expectedJSONResponse, actualResponse) + assert.Equal(t, compactJSONForAssert(t, testCase.expectedJSONResponse), compactJSONForAssert(t, actualResponse)) } if testCase.expectedResponse != "" { @@ -177,7 +178,9 @@ func mustGraphqlDataSourceConfiguration(t *testing.T, id string, factory plan.Pl } func TestEngineResponseWriter_AsHTTPResponse(t *testing.T) { + t.Parallel() t.Run("no compression", func(t *testing.T) { + t.Parallel() rw := graphql.NewEngineResultWriter() _, err := rw.Write([]byte(`{"key": "value"}`)) require.NoError(t, err) @@ -195,14 +198,16 @@ func TestEngineResponseWriter_AsHTTPResponse(t *testing.T) { }) t.Run("compression based on content encoding header", func(t *testing.T) { - rw := graphql.NewEngineResultWriter() - _, err := rw.Write([]byte(`{"key": "value"}`)) - require.NoError(t, err) - - headers := make(http.Header) - headers.Set("Content-Type", "application/json") + t.Parallel() t.Run("gzip", func(t *testing.T) { + t.Parallel() + rw := graphql.NewEngineResultWriter() + _, err := rw.Write([]byte(`{"key": "value"}`)) + require.NoError(t, err) + + headers := make(http.Header) + headers.Set("Content-Type", "application/json") headers.Set(httpclient.ContentEncodingHeader, "gzip") response := rw.AsHTTPResponse(http.StatusOK, headers) @@ -221,6 +226,13 @@ func TestEngineResponseWriter_AsHTTPResponse(t *testing.T) { }) t.Run("deflate", func(t *testing.T) { + t.Parallel() + rw := graphql.NewEngineResultWriter() + _, err := rw.Write([]byte(`{"key": "value"}`)) + require.NoError(t, err) + + headers := make(http.Header) + headers.Set("Content-Type", "application/json") headers.Set(httpclient.ContentEncodingHeader, "deflate") response := rw.AsHTTPResponse(http.StatusOK, headers) @@ -239,6 +251,7 @@ func TestEngineResponseWriter_AsHTTPResponse(t *testing.T) { } func TestWithAdditionalHttpHeaders(t *testing.T) { + t.Parallel() reqHeader := http.Header{ http.CanonicalHeaderKey("X-Other-Key"): []string{"x-other-value"}, http.CanonicalHeaderKey("Date"): []string{"date-value"}, @@ -249,6 +262,7 @@ func TestWithAdditionalHttpHeaders(t *testing.T) { } t.Run("should add all headers to request without excluded keys", func(t *testing.T) { + t.Parallel() c := resolve.NewContext(context.Background()) c.Request = resolve.Request{ Header: nil, @@ -265,6 +279,7 @@ func TestWithAdditionalHttpHeaders(t *testing.T) { }) t.Run("should only add headers that are not excluded", func(t *testing.T) { + t.Parallel() c := resolve.NewContext(context.Background()) c.Request = resolve.Request{ Header: nil, @@ -354,6 +369,7 @@ func relaxFieldSelectionMergingNullability() executionTestOptions { } func TestExecutionEngine_Execute(t *testing.T) { + t.Parallel() t.Run("apollo router compatibility subrequest HTTP error enabled", runWithoutError( ExecutionEngineTestCase{ schema: graphql.StarwarsSchema(t), @@ -542,6 +558,7 @@ func TestExecutionEngine_Execute(t *testing.T) { )) t.Run("introspection", func(t *testing.T) { + t.Parallel() schema := graphql.StarwarsSchema(t) t.Run("execute type introspection query", runWithoutError( @@ -1349,7 +1366,7 @@ func TestExecutionEngine_Execute(t *testing.T) { t.Run("execute operation with variables for arguments", runWithoutError( ExecutionEngineTestCase{ schema: graphql.StarwarsSchema(t), - operation: graphql.LoadStarWarsQuery(starwars.FileDroidWithArgAndVarQuery, map[string]interface{}{"droidID": "R2D2"}), + operation: graphql.LoadStarWarsQuery(starwars.FileDroidWithArgAndVarQuery, map[string]any{"droidID": "R2D2"}), dataSources: []plan.DataSource{ mustGraphqlDataSourceConfiguration(t, "id", @@ -1412,7 +1429,7 @@ func TestExecutionEngine_Execute(t *testing.T) { operation: func(t *testing.T) graphql.Request { return graphql.Request{ OperationName: "MyHeroes", - Variables: stringify(map[string]interface{}{ + Variables: stringify(map[string]any{ "heroNames": []string{"Luke Skywalker", "R2-D2"}, }), Query: `query MyHeroes($heroNames: [String!]!){ @@ -1666,7 +1683,7 @@ func TestExecutionEngine_Execute(t *testing.T) { operation: func(t *testing.T) graphql.Request { return graphql.Request{ OperationName: "", - Variables: stringify(map[string]interface{}{}), + Variables: stringify(map[string]any{}), Query: `query{ charactersByIds(ids: 1) { name @@ -1735,7 +1752,7 @@ func TestExecutionEngine_Execute(t *testing.T) { operation: func(t *testing.T) graphql.Request { return graphql.Request{ OperationName: "", - Variables: stringify(map[string]interface{}{ + Variables: stringify(map[string]any{ "ids": 1, }), Query: `query($ids: [Int]) { charactersByIds(ids: $ids) { name } }`, @@ -1858,6 +1875,7 @@ func TestExecutionEngine_Execute(t *testing.T) { )) t.Run("execute operation with default arguments", func(t *testing.T) { + t.Parallel() t.Run("query variables with default value", runWithoutError( ExecutionEngineTestCase{ schema: heroWithArgumentSchema(t), @@ -1924,7 +1942,7 @@ func TestExecutionEngine_Execute(t *testing.T) { operation: func(t *testing.T) graphql.Request { return graphql.Request{ OperationName: "queryVariables", - Variables: stringify(map[string]interface{}{ + Variables: stringify(map[string]any{ "name": "Luke", "nameOptional": "Skywalker", }), @@ -2328,6 +2346,7 @@ func TestExecutionEngine_Execute(t *testing.T) { )) t.Run("invalid and inaccessible enum values", func(t *testing.T) { + t.Parallel() schema, err := graphql.NewSchemaFromString(enumSDL) require.NoError(t, err) @@ -4457,7 +4476,9 @@ func TestExecutionEngine_Execute(t *testing.T) { }) t.Run("variables", func(t *testing.T) { + t.Parallel() t.Run("operation with optional input fields", func(t *testing.T) { + t.Parallel() schemaString := ` type Query { field(arg: Input): String @@ -4592,6 +4613,8 @@ func TestExecutionEngine_Execute(t *testing.T) { t.Run("execute operation with nested fetch on one of the types", func(t *testing.T) { + t.Parallel() + definition := ` type User implements Node { id: ID! @@ -4934,7 +4957,10 @@ func TestExecutionEngine_Execute(t *testing.T) { t.Run("validation of optional @requires dependencies", func(t *testing.T) { + t.Parallel() + t.Run("execute operation with @requires and @external", func(t *testing.T) { + t.Parallel() definition := ` type User { id: ID! @@ -5095,6 +5121,7 @@ func TestExecutionEngine_Execute(t *testing.T) { }) t.Run("do not validate non-nullable @requires dependencies", func(t *testing.T) { + t.Parallel() definition := ` type Query { accounts: [User!]! @@ -5262,6 +5289,7 @@ func TestExecutionEngine_Execute(t *testing.T) { }) t.Run("validate nullable @requires dependencies", func(t *testing.T) { + t.Parallel() definition := ` type Query { accounts: [User!]! @@ -5429,6 +5457,7 @@ func TestExecutionEngine_Execute(t *testing.T) { }) t.Run("validate nested nullable @requires dependencies", func(t *testing.T) { + t.Parallel() definition := ` type Query { accounts: [User!]! @@ -5632,6 +5661,7 @@ func TestExecutionEngine_Execute(t *testing.T) { }) t.Run("field merging with different nullability on non-overlapping union types", func(t *testing.T) { + t.Parallel() unionSchema := ` union Entity = User | Organization type Query { entity: Entity } @@ -5785,6 +5815,7 @@ func testConditionalNetHttpClient(t *testing.T, testCase conditionalTestCase) *h } func TestExecutionEngine_GetCachedPlan(t *testing.T) { + t.Parallel() schema, err := graphql.NewSchemaFromString(testSubscriptionDefinition) require.NoError(t, err) @@ -5848,12 +5879,20 @@ func TestExecutionEngine_GetCachedPlan(t *testing.T) { ), }) - engine, err := NewExecutionEngine(context.Background(), abstractlogger.NoopLogger, engineConfig, resolve.ResolverOptions{ - MaxConcurrency: 1024, - }) - require.NoError(t, err) + newEngine := func(t *testing.T) *ExecutionEngine { + t.Helper() + + engine, err := NewExecutionEngine(context.Background(), abstractlogger.NoopLogger, engineConfig, resolve.ResolverOptions{ + MaxConcurrency: 1024, + }) + require.NoError(t, err) + + return engine + } t.Run("should reuse cached plan", func(t *testing.T) { + t.Parallel() + engine := newEngine(t) t.Cleanup(engine.executionPlanCache.Purge) require.Equal(t, 0, engine.executionPlanCache.Len()) @@ -5882,6 +5921,8 @@ func TestExecutionEngine_GetCachedPlan(t *testing.T) { }) t.Run("should create new plan and cache it", func(t *testing.T) { + t.Parallel() + engine := newEngine(t) t.Cleanup(engine.executionPlanCache.Purge) require.Equal(t, 0, engine.executionPlanCache.Len()) @@ -5924,8 +5965,7 @@ func BenchmarkIntrospection(b *testing.B) { require.NoError(b, err) expectedResponse := buf.Bytes() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := b.Context() type benchCase struct { engine *ExecutionEngine writer *graphql.EngineResultWriter @@ -5956,7 +5996,7 @@ func BenchmarkIntrospection(b *testing.B) { require.Equal(b, string(expectedResponse), writer.String()) pool := sync.Pool{ - New: func() interface{} { + New: func() any { return newBenchCase() }, } @@ -5979,8 +6019,7 @@ func BenchmarkIntrospection(b *testing.B) { } func BenchmarkExecutionEngine(b *testing.B) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := b.Context() type benchCase struct { engine *ExecutionEngine writer *graphql.EngineResultWriter @@ -6041,7 +6080,7 @@ func BenchmarkExecutionEngine(b *testing.B) { require.Equal(b, "{\"data\":{\"hello\":\"world\"}}", writer.String()) pool := sync.Pool{ - New: func() interface{} { + New: func() any { return newBenchCase() }, } diff --git a/execution/engine/extractor_test.go b/execution/engine/extractor_test.go index fa7111c2b7..4720c99978 100644 --- a/execution/engine/extractor_test.go +++ b/execution/engine/extractor_test.go @@ -11,6 +11,7 @@ import ( ) func TestExtractor_ExtractFieldsFromRequest(t *testing.T) { + t.Parallel() schema, err := graphql.NewSchemaFromString(testDefinition) require.NoError(t, err) diff --git a/execution/engine/federation_caching_analytics_test.go b/execution/engine/federation_caching_analytics_test.go new file mode 100644 index 0000000000..6550147963 --- /dev/null +++ b/execution/engine/federation_caching_analytics_test.go @@ -0,0 +1,2138 @@ +package engine_test + +import ( + "context" + "net/http" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// TestFederationCaching_Analytics verifies that cache analytics snapshots (L1/L2 reads, writes, +// field hashes, entity types) are correctly recorded and returned in response headers. +func TestFederationCaching_Analytics(t *testing.T) { + t.Parallel() + // Common cache key constants used across subtests + const ( + keyProductTop1 = `{"__typename":"Product","key":{"upc":"top-1"}}` + keyProductTop2 = `{"__typename":"Product","key":{"upc":"top-2"}}` + keyTopProducts = `{"__typename":"Query","field":"topProducts"}` + keyUser1234 = `{"__typename":"User","key":{"id":"1234"}}` + keyMe = `{"__typename":"Query","field":"me"}` + dsAccounts = "accounts" + dsProducts = "products" + dsReviews = "reviews" + ) + + // Field hash constants — xxhash of the rendered scalar field values. + // These are deterministic because xxhash is seeded identically each time. + const ( + hashProductNameTrilby uint64 = 1032923585965781586 // xxhash("Trilby") + hashProductNameFedora uint64 = 2432227032303632641 // xxhash("Fedora") + hashUserUsernameMe uint64 = 4957449860898447395 // xxhash("Me") + ) + + // Entity key constants for field hash assertions + const ( + entityKeyProductTop1 = `{"upc":"top-1"}` + entityKeyProductTop2 = `{"upc":"top-2"}` + entityKeyUser1234 = `{"id":"1234"}` + ) + + // Byte sizes of cached entities (measured from actual JSON marshalling) + const ( + byteSizeProductTop1 = 177 // Product top-1 entity (reviews subgraph response) + byteSizeProductTop2 = 233 // Product top-2 entity (reviews subgraph response) + byteSizeTopProducts = 127 // Query.topProducts root field (products subgraph response) + byteSizeUser1234 = 49 // User 1234 entity (accounts subgraph response) + byteSizeUser1234Full = 105 // User 1234 entity from L1 (full accumulated entity with passthrough) + byteSizeQueryMe = 56 // Query.me root field (accounts subgraph response) + ) + + // Shared field hashes for the multi-upstream query (topProducts with reviews). + // Product.name: 2 products (Trilby, Fedora) → 2 distinct hashes + // User.username: 2 reviews both by "Me" → 2 identical hashes + // All FieldSourceSubgraph by default (overridden in specific tests) + multiUpstreamFieldHashes := []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceSubgraph}, + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceSubgraph}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceSubgraph}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceSubgraph}, + } + + // L2 hit field hashes — same data but all sourced from L2 cache + multiUpstreamFieldHashesL2 := []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceL2}, + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + } + + multiUpstreamEntityTypes := []resolve.EntityTypeInfo{ + {TypeName: "Product", Count: 2, UniqueKeys: 2}, + {TypeName: "User", Count: 2, UniqueKeys: 1}, + } + + // Standard subgraph caching configs used by L2 and L1+L2 tests + multiUpstreamCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + expectedResponseBody := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}` + + t.Run("L2 miss then hit with analytics", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(multiUpstreamCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // First query — all L2 misses, populates L2 cache + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + expected1 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // L2 miss: first request, cache empty + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // L2 miss: first request, cache empty + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // L2 miss: root field not yet cached + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts}, // L2 miss: User entity not yet cached (second review's User 1234 deduplicated in batch) + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Written after subgraph fetch on miss + {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Written after subgraph fetch on miss + {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Root field written to L2 after fetch + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // User entity written after accounts fetch + }, + FieldHashes: multiUpstreamFieldHashes, + EntityTypes: multiUpstreamEntityTypes, + }) + assert.Equal(t, expected1, normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Second query — all L2 hits from populated cache + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + expected2 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // L2 hit: populated by Request 1 + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // L2 hit: populated by Request 1 + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // L2 hit: root field cached by Request 1 + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234}, // L2 hit: User entity cached by Request 1 (second review's User 1234 deduplicated) + }, + // No L2Writes: all served from cache, no fetches needed + FieldHashes: multiUpstreamFieldHashesL2, + EntityTypes: multiUpstreamEntityTypes, + }) + assert.Equal(t, expected2, normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("L1 cache analytics with entity reuse", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + EnableCacheAnalytics: true, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Query that triggers L1 entity reuse: + // 1. Query.me -> accounts subgraph -> returns User 1234 -> populates L1 + // 2. User.sameUserReviewers -> reviews subgraph -> returns [User 1234] + // 3. Entity fetch for User 1234 -> L1 HIT (no subgraph call) + query := `query { + me { + id + username + sameUserReviewers { + id + username + } + } + }` + + tracker.Reset() + resp, headers := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + assert.Equal(t, `{"data":{"me":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}}`, string(resp)) + + expected := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L1Reads: []resolve.CacheKeyEvent{ + // L1 hit: User 1234 populated by accounts fetch, reused for sameUserReviewers entity resolution + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234Full}, + // L1 miss: reviews subgraph also checks L1 for User (union optimization enables L1 for reviews) + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, + }, + L1Writes: []resolve.CacheWriteEvent{ + // Query.me root field written to L1 after accounts subgraph fetch + {CacheKey: keyMe, EntityType: "Query", ByteSize: byteSizeQueryMe, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL1, Source: resolve.CacheSourceQuery}, + // Reviews entity fetch for User 1234 also writes to L1 (union optimization enables it) + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234Full, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL1, Source: resolve.CacheSourceQuery}, + }, + FieldHashes: []resolve.EntityFieldHash{ + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL1}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL1}, + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 2, UniqueKeys: 1}, + }, + }) + assert.Equal(t, expected, normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("L1+L2 combined analytics", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: true, + EnableCacheAnalytics: true, + }), + withSubgraphEntityCachingConfigs(multiUpstreamCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // First query — L2 misses (L1 is per-request, always fresh) + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + expected1 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // L2 miss: first request, cache empty + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // L2 miss: first request, cache empty + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // L2 miss: root field not yet cached + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts}, // L2 miss: User entity not yet cached (second review's User 1234 hits L1 after this fetch) + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Written after reviews subgraph fetch + {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Written after reviews subgraph fetch + {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Root field written after products fetch + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // User entity written after accounts fetch + }, + FieldHashes: multiUpstreamFieldHashes, + EntityTypes: multiUpstreamEntityTypes, + }) + assert.Equal(t, expected1, normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Second query — L2 hits (L1 is per-request, reset between requests) + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + expected2 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // L2 hit: populated by Request 1 + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // L2 hit: populated by Request 1 + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // L2 hit: root field cached by Request 1 + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234}, // L2 hit: User entity cached by Request 1 (second review's User 1234 hits L1) + }, + // No L2Writes: all entities served from L2 cache + FieldHashes: multiUpstreamFieldHashesL2, + EntityTypes: multiUpstreamEntityTypes, + }) + assert.Equal(t, expected2, normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("root field with args - L2 analytics", func(t *testing.T) { + t.Parallel() + // Tests that root field caching with arguments properly records L2 analytics events. + // This covers the root field path in tryL2CacheLoad (no L1 keys branch). + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + rootFieldArgsCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "user", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(rootFieldArgsCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + const ( + keyUserById1234 = `{"__typename":"Query","field":"user","args":{"id":"1234"}}` + keyUserById5678 = `{"__typename":"Query","field":"user","args":{"id":"5678"}}` + dsAccountsLocal = "accounts" + byteSizeUser1234 = 38 // {"user":{"id":"1234","username":"Me"}} + byteSizeUser5678 = 45 // {"user":{"id":"5678","username":"User 5678"}} + + hashUsernameMeLocal uint64 = 4957449860898447395 // xxhash("Me") + hashUsername5678Local uint64 = 15512417390573333165 // xxhash("User 5678") + entityKeyUser1234Local = `{"id":"1234"}` + entityKeyUser5678Local = `{"id":"5678"}` + ) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First query (id=1234) — L2 miss, populates cache + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts subgraph") + + expected1 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyUserById1234, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsAccountsLocal}, // L2 miss: first request, cache empty + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyUserById1234, EntityType: "Query", ByteSize: byteSizeUser1234, DataSource: dsAccountsLocal, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Root field written after accounts fetch + }, + FieldHashes: []resolve.EntityFieldHash{ + {EntityType: "User", FieldName: "username", FieldHash: hashUsernameMeLocal, KeyRaw: entityKeyUser1234Local, Source: resolve.FieldSourceSubgraph}, // User returned by root field, data from subgraph + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 1, UniqueKeys: 1}, // 1 User entity from root field response + }, + }) + assert.Equal(t, expected1, normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Second query (same id=1234) — L2 hit + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Second query should skip accounts (cache hit)") + + expected2 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyUserById1234, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsAccountsLocal, ByteSize: byteSizeUser1234}, // L2 hit: populated by first request + }, + // No L2Writes: data served from cache + FieldHashes: []resolve.EntityFieldHash{ + // Source is FieldSourceSubgraph (default) because entity source tracking operates at + // entity cache level, not root field cache level — no entity caching configured for User + {EntityType: "User", FieldName: "username", FieldHash: hashUsernameMeLocal, KeyRaw: entityKeyUser1234Local, Source: resolve.FieldSourceSubgraph}, + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 1, UniqueKeys: 1}, + }, + }) + assert.Equal(t, expected2, normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Third query (different id=5678) — L2 miss (different args = different cache key) + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "5678"}, t) + assert.Equal(t, `{"data":{"user":{"id":"5678","username":"User 5678"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Third query should call accounts (different args)") + + expected3 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyUserById5678, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsAccountsLocal}, // L2 miss: different args, not cached + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyUserById5678, EntityType: "Query", ByteSize: byteSizeUser5678, DataSource: dsAccountsLocal, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // New args written to L2 + }, + FieldHashes: []resolve.EntityFieldHash{ + {EntityType: "User", FieldName: "username", FieldHash: hashUsername5678Local, KeyRaw: entityKeyUser5678Local, Source: resolve.FieldSourceSubgraph}, // User 5678 data from subgraph + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 1, UniqueKeys: 1}, + }, + }) + assert.Equal(t, expected3, normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("root field only - L2 analytics without entity caching", func(t *testing.T) { + t.Parallel() + // Tests root field caching analytics in isolation — only root field caching configured, + // no entity caching. Verifies that only root field events appear in analytics. + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Only configure root field caching for products — no entity caching at all + rootOnlyConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(rootOnlyConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + productsHost := productsURLParsed.Host + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + reviewsHost := reviewsURLParsed.Host + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + const ( + keyTopProductsLocal = `{"__typename":"Query","field":"topProducts"}` + dsProductsLocal = "products" + byteSizeTP = 127 // Query.topProducts root field response + ) + + // First query — L2 miss for root field, no events for entities (not configured) + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + // Products subgraph called (root field miss), reviews + accounts always called (no entity caching) + assert.Equal(t, 1, tracker.GetCount(productsHost), "First query should call products subgraph") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "First query should call reviews subgraph") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts subgraph") + + expected1 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyTopProductsLocal, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProductsLocal}, // L2 miss: first request, cache empty + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyTopProductsLocal, EntityType: "Query", ByteSize: byteSizeTP, DataSource: dsProductsLocal, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Root field written after products fetch + }, + // Only entity types tracked during resolution (not caching-dependent) + FieldHashes: multiUpstreamFieldHashes, + EntityTypes: multiUpstreamEntityTypes, + }) + assert.Equal(t, expected1, normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Second query — L2 hit for root field, entities still fetched (not cached) + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + // Products subgraph skipped (root field cache hit), reviews + accounts still called + assert.Equal(t, 0, tracker.GetCount(productsHost), "Second query should skip products (root field cache hit)") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "Second query should call reviews (no entity caching)") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Second query should call accounts (no entity caching)") + + expected2 := normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyTopProductsLocal, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProductsLocal, ByteSize: byteSizeTP}, // L2 hit: root field cached by first request + }, + // No L2Writes: root field served from cache, entities have no caching configured + FieldHashes: multiUpstreamFieldHashes, // Entity field hashes still tracked (resolution, not caching) + EntityTypes: multiUpstreamEntityTypes, + }) + assert.Equal(t, expected2, normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("subgraph fetch records HTTPStatusCode and ResponseBytes", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(multiUpstreamCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // First request — all L2 misses, subgraph fetches happen + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + snap := parseCacheAnalytics(t, headers) + + // Filter to subgraph fetch events only (exclude L2 read events) + var subgraphTimings []resolve.FetchTimingEvent + for _, ft := range snap.FetchTimings { + if ft.Source == resolve.FieldSourceSubgraph { + subgraphTimings = append(subgraphTimings, ft) + } + } + timings := normalizeFetchTimings(subgraphTimings) + + assert.Equal(t, []resolve.FetchTimingEvent{ + {DataSource: dsAccounts, EntityType: "User", Source: resolve.FieldSourceSubgraph, ItemCount: 1, IsEntityFetch: true, HTTPStatusCode: 200, ResponseBytes: 62}, // _entities fetch for User 1234 + {DataSource: dsProducts, EntityType: "Query", Source: resolve.FieldSourceSubgraph, ItemCount: 1, IsEntityFetch: false, HTTPStatusCode: 200, ResponseBytes: 136}, // topProducts root field fetch + {DataSource: dsReviews, EntityType: "Product", Source: resolve.FieldSourceSubgraph, ItemCount: 1, IsEntityFetch: true, HTTPStatusCode: 200, ResponseBytes: 376}, // _entities fetch for Product top-1 and top-2 + }, timings) + }) + + t.Run("cache hit has zero HTTPStatusCode and ResponseBytes", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(multiUpstreamCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // First request — populates L2 cache + resp, _ := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + // Second request — all L2 hits + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + snap := parseCacheAnalytics(t, headers) + timings := normalizeFetchTimings(snap.FetchTimings) + + assert.Equal(t, []resolve.FetchTimingEvent{ + {DataSource: dsAccounts, EntityType: "User", Source: resolve.FieldSourceL2, ItemCount: 1, IsEntityFetch: true}, // L2 hit for User 1234 entity + {DataSource: dsProducts, EntityType: "Query", Source: resolve.FieldSourceL2, ItemCount: 1, IsEntityFetch: true}, // L2 hit for topProducts root field + {DataSource: dsReviews, EntityType: "Product", Source: resolve.FieldSourceL2, ItemCount: 2, IsEntityFetch: true}, // L2 hit for Product top-1 and top-2 entities + }, timings) + }) +} + +// TestFederationCaching_ShadowMode verifies shadow mode: L2 reads/writes happen normally but +// cached data is never served. Fresh data is always fetched and compared for staleness detection. +func TestFederationCaching_ShadowMode(t *testing.T) { + t.Parallel() + // Cache key constants (same as TestCacheAnalyticsE2E — same federation setup) + const ( + keyProductTop1 = `{"__typename":"Product","key":{"upc":"top-1"}}` + keyProductTop2 = `{"__typename":"Product","key":{"upc":"top-2"}}` + keyTopProducts = `{"__typename":"Query","field":"topProducts"}` + keyUser1234 = `{"__typename":"User","key":{"id":"1234"}}` + dsAccounts = "accounts" + dsProducts = "products" + dsReviews = "reviews" + ) + + // Field hash constants + const ( + hashProductNameTrilby uint64 = 1032923585965781586 + hashProductNameFedora uint64 = 2432227032303632641 + hashUserUsernameMe uint64 = 4957449860898447395 + ) + + // Entity key constants + const ( + entityKeyProductTop1 = `{"upc":"top-1"}` + entityKeyProductTop2 = `{"upc":"top-2"}` + entityKeyUser1234 = `{"id":"1234"}` + ) + + // Byte sizes + const ( + byteSizeProductTop1 = 177 + byteSizeProductTop2 = 233 + byteSizeTopProducts = 127 + byteSizeUser1234 = 49 + ) + + // Shadow comparison hash constants + const ( + shadowHashProductTop1 uint64 = 8656108128396512717 + shadowHashProductTop2 uint64 = 4671066427758823003 + shadowHashUser1234 uint64 = 188937276969638005 + shadowBytesProductTop1 = 124 + shadowBytesProductTop2 = 180 + shadowBytesUser1234 = 17 + ) + + // Shadow cached field hash constants (ProvidesData fields hashed from cached value during shadow comparison) + const ( + shadowFieldHashProductReviewsTop1 uint64 = 13894521258004960943 // xxhash of Product reviews field for top-1 + shadowFieldHashProductReviewsTop2 uint64 = 3182276346310063647 // xxhash of Product reviews field for top-2 + ) + + // Field hashes when all data comes from subgraph (first request, all misses) + fieldHashesSubgraph := []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceSubgraph}, + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceSubgraph}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceSubgraph}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceSubgraph}, + } + + // Field hashes when all data comes from L2 (second request, all hits — no shadow entities) + fieldHashesL2 := []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceL2}, + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + } + + // Field hashes when all entities are in shadow mode (second request): + // L2 source hashes from resolution + ShadowCached hashes from compareShadowValues + fieldHashesL2AllShadow := []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceL2}, + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceL2}, + {EntityType: "Product", FieldName: "reviews", FieldHash: shadowFieldHashProductReviewsTop1, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceShadowCached}, // Cached Product reviews field for per-field staleness detection + {EntityType: "Product", FieldName: "reviews", FieldHash: shadowFieldHashProductReviewsTop2, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceShadowCached}, // Cached Product reviews field for per-field staleness detection + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceShadowCached}, // Cached User username for per-field staleness detection + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceShadowCached}, // Cached User username (second review) + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + } + + // Field hashes when only User is in shadow mode (mixed mode, second request): + // Product/root L2 source hashes + User L2 + User ShadowCached hashes + fieldHashesL2MixedShadow := []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: entityKeyProductTop1, Source: resolve.FieldSourceL2}, + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: entityKeyProductTop2, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceShadowCached}, // Cached User username for per-field staleness detection + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceShadowCached}, // Cached User username (second review) + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: hashUserUsernameMe, KeyRaw: entityKeyUser1234, Source: resolve.FieldSourceL2}, + } + + entityTypes := []resolve.EntityTypeInfo{ + {TypeName: "Product", Count: 2, UniqueKeys: 2}, + {TypeName: "User", Count: 2, UniqueKeys: 1}, + } + + expectedResponseBody := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}` + + t.Run("shadow all entities - always fetches", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Shadow mode for all entity types, real caching for root fields + shadowConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(shadowConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) + productsHost := mustParseHost(setup.ProductsUpstreamServer.URL) + reviewsHost := mustParseHost(setup.ReviewsUpstreamServer.URL) + + // Request 1: All L2 misses → all 3 subgraphs called + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + assert.Equal(t, 1, tracker.GetCount(productsHost), "request 1: should call products exactly once") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "request 1: should call reviews exactly once") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 1: should call accounts exactly once") + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews, Shadow: true}, // Shadow L2 miss: cache empty, subgraph fetched + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews, Shadow: true}, // Shadow L2 miss: cache empty, subgraph fetched + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // Real L2 miss: root field not shadow, fetched normally + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts, Shadow: true}, // Shadow L2 miss: User not yet cached + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Written to L2 even in shadow (populates for comparison) + {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Written to L2 even in shadow + {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Root field written normally (not shadow) + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // User entity written for future shadow comparison + }, + // No ShadowComparisons: nothing cached yet to compare against + FieldHashes: fieldHashesSubgraph, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Request 2: Entity L2 hits (shadow) → entity subgraphs STILL called + // Root field L2 hit → products NOT called (real caching) + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + assert.Equal(t, 0, tracker.GetCount(productsHost), "request 2: products should NOT be called (root field real cache hit)") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "request 2: reviews should be called (Product entity shadow)") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 2: accounts should be called (User entity shadow)") + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1, Shadow: true}, // Shadow L2 hit: cached by Req 1, but subgraph still called + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2, Shadow: true}, // Shadow L2 hit: cached by Req 1, but subgraph still called + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // Real L2 hit: root field served from cache (not shadow) + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234, Shadow: true}, // Shadow L2 hit: accounts still called for comparison + }, + L2Writes: []resolve.CacheWriteEvent{ + // Only shadow entities re-written (refreshed from subgraph); root field NOT re-written (real cache hit) + {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Shadow re-write: fresh data from subgraph + {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Shadow re-write: fresh data from subgraph + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Shadow re-write: fresh User from accounts + }, + ShadowComparisons: []resolve.ShadowComparisonEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", IsFresh: true, CachedHash: shadowHashProductTop1, FreshHash: shadowHashProductTop1, CachedBytes: shadowBytesProductTop1, FreshBytes: shadowBytesProductTop1, DataSource: dsReviews, ConfiguredTTL: 30 * time.Second}, // Fresh: cached matches subgraph (data unchanged) + {CacheKey: keyProductTop2, EntityType: "Product", IsFresh: true, CachedHash: shadowHashProductTop2, FreshHash: shadowHashProductTop2, CachedBytes: shadowBytesProductTop2, FreshBytes: shadowBytesProductTop2, DataSource: dsReviews, ConfiguredTTL: 30 * time.Second}, // Fresh: cached matches subgraph (data unchanged) + {CacheKey: keyUser1234, EntityType: "User", IsFresh: true, CachedHash: shadowHashUser1234, FreshHash: shadowHashUser1234, CachedBytes: shadowBytesUser1234, FreshBytes: shadowBytesUser1234, DataSource: dsAccounts, ConfiguredTTL: 30 * time.Second}, // Fresh: cached User matches subgraph (no mutation) + }, + FieldHashes: fieldHashesL2AllShadow, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("mixed mode - shadow User, real cache Product", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Shadow mode for User only, real caching for Product and root fields + mixedConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, // real caching + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, // shadow + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(mixedConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) + productsHost := mustParseHost(setup.ProductsUpstreamServer.URL) + reviewsHost := mustParseHost(setup.ReviewsUpstreamServer.URL) + + // Request 1: All L2 misses → all 3 subgraphs called + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + assert.Equal(t, 1, tracker.GetCount(productsHost), "request 1: should call products exactly once") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "request 1: should call reviews exactly once") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 1: should call accounts exactly once") + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // Real L2 miss: Product entity not yet cached + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // Real L2 miss: Product entity not yet cached + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // Real L2 miss: root field not yet cached + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts, Shadow: true}, // Shadow L2 miss: User entity not yet cached + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Product written for real caching + {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Product written for real caching + {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Root field written for real caching + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // User written (shadow still populates L2) + }, + FieldHashes: fieldHashesSubgraph, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Request 2: Product real cache hit, User shadow → still fetched + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + assert.Equal(t, 0, tracker.GetCount(productsHost), "request 2: products should NOT be called (root field real cache hit)") + assert.Equal(t, 0, tracker.GetCount(reviewsHost), "request 2: reviews should NOT be called (Product entity real cache hit)") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 2: accounts SHOULD be called (User entity shadow)") + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // Real L2 hit: Product served from cache (no subgraph call) + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // Real L2 hit: Product served from cache (no subgraph call) + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // Real L2 hit: root field served from cache + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234, Shadow: true}, // Shadow L2 hit: accounts still called for comparison + }, + L2Writes: []resolve.CacheWriteEvent{ + // Only User re-written (shadow always fetches fresh); Product/root NOT re-written (real hit) + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Shadow re-write: fresh data from accounts + }, + ShadowComparisons: []resolve.ShadowComparisonEvent{ + // Only User has shadow comparisons; Product uses real caching + {CacheKey: keyUser1234, EntityType: "User", IsFresh: true, CachedHash: shadowHashUser1234, FreshHash: shadowHashUser1234, CachedBytes: shadowBytesUser1234, FreshBytes: shadowBytesUser1234, DataSource: dsAccounts, ConfiguredTTL: 30 * time.Second}, // Fresh: cached User matches subgraph + }, + FieldHashes: fieldHashesL2MixedShadow, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("shadow mode without analytics - safety only", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + shadowConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), // analytics NOT enabled + withSubgraphEntityCachingConfigs(shadowConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) + + // Request 1: Populate cache + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + // No stats when analytics is disabled + assert.Empty(t, headers.Get("X-Cache-Analytics"), "analytics header should not be set when analytics disabled") + + // Request 2: Shadow mode — accounts still fetched (data not served from cache) + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "request 2: accounts should be called (shadow mode)") + // No stats when analytics is disabled + assert.Empty(t, headers.Get("X-Cache-Analytics"), "analytics header should not be set when analytics disabled") + }) + + t.Run("graduation - shadow to real", func(t *testing.T) { + t.Parallel() + // Same FakeLoaderCache shared across both engine setups + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Phase 1: Shadow mode for User + shadowConfigs := engine.SubgraphCachingConfigs{ + {SubgraphName: "products", RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }}, + {SubgraphName: "reviews", EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, + }}, + {SubgraphName: "accounts", EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, ShadowMode: true}, + }}, + } + + setup1 := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(shadowConfigs), + )) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost1 := mustParseHost(setup1.AccountsUpstreamServer.URL) + + // Phase 1, Request 1: Populate L2 cache + tracker.Reset() + resp, headers := gqlClient.QueryWithHeaders(ctx, setup1.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // Real L2 miss: first request, cache empty + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: dsReviews}, // Real L2 miss: first request, cache empty + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // Real L2 miss: root field not yet cached + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: dsAccounts, Shadow: true}, // Shadow L2 miss: User not yet cached + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", ByteSize: byteSizeProductTop1, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Product written for real caching + {CacheKey: keyProductTop2, EntityType: "Product", ByteSize: byteSizeProductTop2, DataSource: dsReviews, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Product written for real caching + {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Root field written for real caching + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // User written (shadow still populates L2) + }, + FieldHashes: fieldHashesSubgraph, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Phase 1, Request 2: Shadow — accounts still called + tracker.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup1.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost1), "phase 1 request 2: accounts should be called (shadow mode)") + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // Real L2 hit: Product served from cache + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // Real L2 hit: Product served from cache + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // Real L2 hit: root field from cache + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234, Shadow: true}, // Shadow L2 hit: cached but accounts still called + }, + L2Writes: []resolve.CacheWriteEvent{ + // Only shadow User re-written; Product/root use real caching (no re-write on hit) + {CacheKey: keyUser1234, EntityType: "User", ByteSize: byteSizeUser1234, DataSource: dsAccounts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Shadow re-write with fresh data from accounts + }, + ShadowComparisons: []resolve.ShadowComparisonEvent{ + {CacheKey: keyUser1234, EntityType: "User", IsFresh: true, CachedHash: shadowHashUser1234, FreshHash: shadowHashUser1234, CachedBytes: shadowBytesUser1234, FreshBytes: shadowBytesUser1234, DataSource: dsAccounts, ConfiguredTTL: 30 * time.Second}, // Fresh: cached User matches subgraph (safe to graduate) + }, + FieldHashes: fieldHashesL2MixedShadow, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + + setup1.Close() + + // Phase 2: Graduated to real caching (same cache, new engine) + realConfigs := engine.SubgraphCachingConfigs{ + {SubgraphName: "products", RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }}, + {SubgraphName: "reviews", EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, + }}, + {SubgraphName: "accounts", EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, // No ShadowMode! + }}, + } + + tracker2 := newSubgraphCallTracker(http.DefaultTransport) + trackingClient2 := &http.Client{Transport: tracker2} + + setup2 := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), // SAME cache + withHTTPClient(trackingClient2), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(realConfigs), + )) + t.Cleanup(setup2.Close) + + accountsHost2 := mustParseHost(setup2.AccountsUpstreamServer.URL) + + // Phase 2, Request 3: Real L2 hit — accounts NOT called + tracker2.Reset() + resp, headers = gqlClient.QueryWithHeaders(ctx, setup2.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, expectedResponseBody, string(resp)) + assert.Equal(t, 0, tracker2.GetCount(accountsHost2), "phase 2: accounts should NOT be called (real L2 hit)") + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyProductTop1, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop1}, // Real L2 hit: cached by Phase 1 + {CacheKey: keyProductTop2, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: dsReviews, ByteSize: byteSizeProductTop2}, // Real L2 hit: cached by Phase 1 + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // Real L2 hit: root field cached by Phase 1 + {CacheKey: keyUser1234, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: dsAccounts, ByteSize: byteSizeUser1234}, // Real L2 hit: graduated from shadow, no longer calls accounts + }, + // No L2Writes: all real cache hits, no fetches needed + // No ShadowComparisons: User is no longer in shadow mode + FieldHashes: fieldHashesL2, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) +} + +// TestFederationCaching_MutationImpact verifies that mutation impact analytics correctly record +// entity cache key, freshness hash, and staleness detection for mutated entities. +func TestFederationCaching_MutationImpact(t *testing.T) { + t.Parallel() + + // Configure entity caching for User on accounts subgraph + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + t.Run("mutation with prior cache shows stale entity", func(t *testing.T) { + t.Parallel() + mutationQuery := `mutation { updateUsername(id: "1234", newUsername: "UpdatedMe") { id username } }` + // Uses a query that triggers entity caching for User through authorWithoutProvides. + entityQuery := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Request 1: Query to populate L2 cache with User entity + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + // Request 2: Mutation — analytics must identify the mutation entity, + // but mutations are not allowed to read L2 for stale-value inspection. + tracker.Reset() + respMut, headersMut := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, mutationQuery, nil, t) + assert.Equal(t, `{"data":{"updateUsername":{"id":"1234","username":"UpdatedMe"}}}`, string(respMut)) + + snap := normalizeSnapshot(parseCacheAnalytics(t, headersMut)) + require.NotNil(t, snap.MutationEvents, "should have mutation impact events") + require.Equal(t, 1, len(snap.MutationEvents), "should have exactly 1 mutation impact event") + + event := snap.MutationEvents[0] + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + FieldHashes: []resolve.EntityFieldHash{ + // Hash of "UpdatedMe" (post-mutation username) + {EntityType: "User", FieldName: "username", FieldHash: 16932466035575627600, KeyRaw: `{"id":"1234"}`}, + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 1, UniqueKeys: 1}, // Mutation returned 1 User entity + }, + MutationEvents: []resolve.MutationEvent{ + { + MutationRootField: "updateUsername", + EntityType: "User", + EntityCacheKey: `{"__typename":"User","key":{"id":"1234"}}`, + HadCachedValue: false, // Mutation analytics must not read L2 + IsStale: false, // No cache read means no stale comparison + CachedHash: event.CachedHash, + FreshHash: event.FreshHash, + CachedBytes: event.CachedBytes, + FreshBytes: event.FreshBytes, + }, + }, + }), snap) + }) + + t.Run("mutation without prior cache shows no-cache event", func(t *testing.T) { + t.Parallel() + mutationQuery := `mutation { updateUsername(id: "1234", newUsername: "UpdatedMe") { id username } }` + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // NO prior query — L2 cache is empty + // Send mutation directly + tracker.Reset() + respMut, headersMut := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, mutationQuery, nil, t) + assert.Equal(t, `{"data":{"updateUsername":{"id":"1234","username":"UpdatedMe"}}}`, string(respMut)) + + snap := normalizeSnapshot(parseCacheAnalytics(t, headersMut)) + require.NotNil(t, snap.MutationEvents, "should have mutation impact events") + require.Equal(t, 1, len(snap.MutationEvents), "should have exactly 1 mutation impact event") + + event := snap.MutationEvents[0] + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + FieldHashes: []resolve.EntityFieldHash{ + // Hash of "UpdatedMe" (post-mutation username) + {EntityType: "User", FieldName: "username", FieldHash: 16932466035575627600, KeyRaw: `{"id":"1234"}`}, + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 1, UniqueKeys: 1}, // Mutation returned 1 User entity + }, + MutationEvents: []resolve.MutationEvent{ + { + MutationRootField: "updateUsername", + EntityType: "User", + EntityCacheKey: `{"__typename":"User","key":{"id":"1234"}}`, + HadCachedValue: false, // No prior query, L2 cache was empty + IsStale: false, // Cannot be stale without a cached value to compare + FreshHash: event.FreshHash, + FreshBytes: event.FreshBytes, + }, + }, + }), snap) + }) +} + +// TestFederationCachingAliases verifies that aliased fields produce correct cache analytics, +// ensuring field hashes and entity tracking work with GraphQL aliases. +func TestFederationCachingAliases(t *testing.T) { + t.Parallel() + // Helper to create a standard setup for alias caching tests + setupAliasCachingTest := func(t *testing.T) ( + *federationtesting.FederationSetup, + *GraphqlClient, + context.Context, + context.CancelFunc, + *subgraphCallTracker, + *FakeLoaderCache, + string, // accountsHost + ) { + t.Helper() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + return setup, gqlClient, ctx, cancel, tracker, defaultCache, accountsHost + } + + t.Run("L2 hit - alias then no alias", func(t *testing.T) { + t.Parallel() + setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) + + // Request 1: Use alias userName for username + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { topProducts { name reviews { body authorWithoutProvides { userName: username } } } }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"userName":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"userName":"Me"}}]}]}}`, + string(resp)) + + accountsCalls1 := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once") + + // Request 2: No alias (original field name) + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, + string(resp)) + + accountsCalls2 := tracker.GetCount(accountsHost) + assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit from normalized cache)") + }) + + t.Run("L2 hit - two different aliases for same field", func(t *testing.T) { + t.Parallel() + setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) + + // Request 1: alias u1 for username + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { topProducts { name reviews { body authorWithoutProvides { u1: username } } } }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"u1":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"u1":"Me"}}]}]}}`, + string(resp)) + + accountsCalls1 := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once") + + // Request 2: alias u2 for username + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { topProducts { name reviews { body authorWithoutProvides { u2: username } } } }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"u2":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"u2":"Me"}}]}]}}`, + string(resp)) + + accountsCalls2 := tracker.GetCount(accountsHost) + assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit - same underlying field)") + }) + + t.Run("no collision - alias matches another field name", func(t *testing.T) { + t.Parallel() + setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) + + // Request 1: alias realName for username (realName is another real field on User) + // This triggers an accounts entity fetch for username, stores normalized {"username":"Me"} in L2 + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { topProducts { name reviews { body authorWithoutProvides { realName: username } } } }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"realName":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"realName":"Me"}}]}]}}`, + string(resp)) + + accountsCalls1 := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once for username") + + // Request 2: actual username field (no alias) - same underlying field + // Should be an L2 hit because both resolve username from accounts + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, + string(resp)) + + accountsCalls2 := tracker.GetCount(accountsHost) + assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit - same underlying field username)") + }) + + t.Run("no collision - field name used as alias for another field", func(t *testing.T) { + t.Parallel() + setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) + + // Request 1: username field (no alias) - triggers accounts entity fetch for username + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, + string(resp)) + + accountsCalls1 := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once") + + // Request 2: different alias (u1) for same field (username) + // Should be an L2 hit because the underlying field is the same + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { topProducts { name reviews { body authorWithoutProvides { u1: username } } } }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"u1":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"u1":"Me"}}]}]}}`, + string(resp)) + + accountsCalls2 := tracker.GetCount(accountsHost) + assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit - same underlying field)") + }) + + t.Run("L2 hit - multiple fields some aliased some not", func(t *testing.T) { + t.Parallel() + setup, gqlClient, ctx, _, tracker, defaultCache, accountsHost := setupAliasCachingTest(t) + + // Request 1: alias username and include realName (realName comes from reviews, not accounts) + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { topProducts { name reviews { body authorWithoutProvides { userName: username realName } } } }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"userName":"Me","realName":"User Usington"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"userName":"Me","realName":"User Usington"}}]}]}}`, + string(resp)) + + accountsCalls1 := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls1, "Request 1 should call accounts subgraph once") + + // Request 2: no alias on username, different alias on realName + // accounts entity cache should be L2 hit (same username field) + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { topProducts { name reviews { body authorWithoutProvides { username name: realName } } } }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","name":"User Usington"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","name":"User Usington"}}]}]}}`, + string(resp)) + + accountsCalls2 := tracker.GetCount(accountsHost) + assert.Equal(t, 0, accountsCalls2, "Request 2 should skip accounts (L2 hit - same underlying username field)") + }) + + t.Run("L1 hit within single request with aliases", func(t *testing.T) { + t.Parallel() + // Tests L1 cache with aliased fields across entity fetches within the same request. + // Flow: + // 1. topProducts -> products + // 2. reviews -> reviews (entity fetch for Products) + // 3. authorWithoutProvides -> accounts (entity fetch for User 1234, aliased userName: username) + // -> User 1234 stored in L1 with normalized field names + // 4. sameUserReviewers -> reviews (returns [User 1234] reference) + // 5. Entity resolution for sameUserReviewers -> accounts + // -> User 1234 is L1 HIT (already fetched in step 3), entire accounts call skipped + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL1Cache: true, EnableL2Cache: false}), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // Query with alias on username - sameUserReviewers returns same user, + // should be L1 hit from the first entity fetch + tracker.Reset() + query := `query { + topProducts { + reviews { + authorWithoutProvides { + id + userName: username + sameUserReviewers { + id + userName: username + } + } + } + } + }` + resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","userName":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","userName":"Me"}]}}]}]}}`, + string(resp)) + + // With L1 enabled: first accounts call fetches User 1234 for authorWithoutProvides + // sameUserReviewers entity resolution hits L1 -> accounts call skipped + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, "Should call accounts subgraph once (sameUserReviewers skipped via L1)") + }) + + t.Run("L1 hit within single request with mixed alias and no alias", func(t *testing.T) { + t.Parallel() + // Same as above, but the nested sameUserReviewers uses the original field name (no alias) + // while the outer authorWithoutProvides uses an alias. L1 cache stores normalized data, + // so the nested fetch should still hit L1 despite the different field naming. + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL1Cache: true, EnableL2Cache: false}), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // Outer authorWithoutProvides uses alias "userName: username" + // Nested sameUserReviewers uses plain "username" (no alias) + // L1 should still hit because cache stores normalized (original) field names + tracker.Reset() + query := `query { + topProducts { + reviews { + authorWithoutProvides { + id + userName: username + sameUserReviewers { + id + username + } + } + } + } + }` + resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}`, + string(resp)) + + // With L1 enabled: first accounts call fetches User 1234 for authorWithoutProvides + // sameUserReviewers entity resolution hits L1 -> accounts call skipped + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, "Should call accounts subgraph once (sameUserReviewers skipped via L1)") + }) + + t.Run("L2 hit - aliased root field then original root field", func(t *testing.T) { + t.Parallel() + setup, gqlClient, ctx, _, tracker, defaultCache, _ := setupAliasCachingTest(t) + productsHost := mustParseHost(setup.ProductsUpstreamServer.URL) + + // Request 1: alias the root field topProducts as tp + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { tp: topProducts { name } }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"tp":[{"name":"Trilby"},{"name":"Fedora"}]}}`, + string(resp)) + + productsCalls1 := tracker.GetCount(productsHost) + assert.Equal(t, 1, productsCalls1, "Request 1 should call products subgraph once") + + // Request 2: same root field without alias — should L2 hit (same cache key) + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { topProducts { name } }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby"},{"name":"Fedora"}]}}`, + string(resp)) + + productsCalls2 := tracker.GetCount(productsHost) + assert.Equal(t, 0, productsCalls2, "Request 2 should skip products (L2 hit from aliased root field)") + }) + + t.Run("L2 hit - two different root field aliases", func(t *testing.T) { + t.Parallel() + setup, gqlClient, ctx, _, tracker, defaultCache, _ := setupAliasCachingTest(t) + productsHost := mustParseHost(setup.ProductsUpstreamServer.URL) + + // Request 1: alias p1 for topProducts + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { p1: topProducts { name } }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"p1":[{"name":"Trilby"},{"name":"Fedora"}]}}`, + string(resp)) + + productsCalls1 := tracker.GetCount(productsHost) + assert.Equal(t, 1, productsCalls1, "Request 1 should call products subgraph once") + + // Request 2: different alias p2 for same root field + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { p2: topProducts { name } }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"p2":[{"name":"Trilby"},{"name":"Fedora"}]}}`, + string(resp)) + + productsCalls2 := tracker.GetCount(productsHost) + assert.Equal(t, 0, productsCalls2, "Request 2 should skip products (L2 hit - same underlying root field)") + }) + + t.Run("L1+L2 combined - alias entity caching across both layers", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL1Cache: true, EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) + + // Request 1: alias on username, sameUserReviewers triggers L1 hit within request + // L2 is also populated on the first entity fetch + defaultCache.ClearLog() + tracker.Reset() + query1 := `query { + topProducts { + reviews { + authorWithoutProvides { + id + userName: username + sameUserReviewers { + id + userName: username + } + } + } + } + }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","userName":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","userName":"Me","sameUserReviewers":[{"id":"1234","userName":"Me"}]}}]}]}}`, + string(resp)) + + accountsCalls1 := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls1, "Request 1: accounts called once (sameUserReviewers skipped via L1)") + + // Request 2: same query without alias — L2 hit for User entity, no accounts calls + defaultCache.ClearLog() + tracker.Reset() + query2 := `query { + topProducts { + reviews { + authorWithoutProvides { + id + username + sameUserReviewers { + id + username + } + } + } + } + }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}`, + string(resp)) + + accountsCalls2 := tracker.GetCount(accountsHost) + assert.Equal(t, 0, accountsCalls2, "Request 2: accounts skipped (L2 hit from normalized cache)") + }) + + t.Run("L2 analytics - aliased root field", func(t *testing.T) { + t.Parallel() + const ( + keyTopProducts = `{"__typename":"Query","field":"topProducts"}` + dsProducts = "products" + byteSizeTopProducts = 53 + hashProductNameTrilby = uint64(1032923585965781586) + hashProductNameFedora = uint64(2432227032303632641) + ) + + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Shared field hashes: Product.name for Trilby and Fedora from root field response + // Products are not entity-resolved (no @key fetch), so KeyRaw is empty + fieldHashes := []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameTrilby, KeyRaw: "{}"}, // xxhash("Trilby"), no entity key (root field) + {EntityType: "Product", FieldName: "name", FieldHash: hashProductNameFedora, KeyRaw: "{}"}, // xxhash("Fedora"), no entity key (root field) + } + entityTypes := []resolve.EntityTypeInfo{ + {TypeName: "Product", Count: 2, UniqueKeys: 1}, // 2 products from root field, no entity keys + } + + // Request 1: aliased root field — L2 miss, populates cache + tracker.Reset() + query1 := `query { tp: topProducts { name } }` + resp, headers := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query1, nil, t) + assert.Equal(t, `{"data":{"tp":[{"name":"Trilby"},{"name":"Fedora"}]}}`, string(resp)) + + // Cache key must use original field name "topProducts", NOT the alias "tp" + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: dsProducts}, // L2 miss: first request, cache empty + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: keyTopProducts, EntityType: "Query", ByteSize: byteSizeTopProducts, DataSource: dsProducts, CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Root field written after products fetch + }, + FieldHashes: fieldHashes, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Request 2: original root field (no alias) — L2 hit from Request 1 + tracker.Reset() + query2 := `query { topProducts { name } }` + resp, headers = gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query2, nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby"},{"name":"Fedora"}]}}`, string(resp)) + + // Same cache key hit regardless of alias difference + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyTopProducts, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: dsProducts, ByteSize: byteSizeTopProducts}, // L2 hit: populated by aliased Request 1 + }, + // No L2Writes: served from cache + FieldHashes: fieldHashes, + EntityTypes: entityTypes, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("L1 dedup - two aliases for same entity field in single request", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL1Cache: true, EnableL2Cache: false}), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) + + // Two aliases (a1, a2) for the same entity field (authorWithoutProvides) + // Both resolve the same User 1234 — second should be L1 hit + tracker.Reset() + query := `query { + topProducts { + reviews { + a1: authorWithoutProvides { + id + username + } + a2: authorWithoutProvides { + id + username + } + } + } + }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"reviews":[{"a1":{"id":"1234","username":"Me"},"a2":{"id":"1234","username":"Me"}}]},{"reviews":[{"a1":{"id":"1234","username":"Me"},"a2":{"id":"1234","username":"Me"}}]}]}}`, + string(resp)) + + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, "Should call accounts once (second alias L1 hit for same User entity)") + }) +} + +// TestFederationCaching_HeaderImpactAnalytics verifies that subgraph header prefix hashes +// are correctly applied to L2 cache keys and reflected in analytics events. +func TestFederationCaching_HeaderImpactAnalytics(t *testing.T) { + t.Parallel() + t.Run("shadow mode with header prefix - same response different headers", func(t *testing.T) { + t.Parallel() + mockHeaders := &headerForwardingMock{ + headers: map[string]http.Header{ + "products": {"Authorization": {"Bearer token-A"}}, + "reviews": {"Authorization": {"Bearer token-A"}}, + "accounts": {"Authorization": {"Bearer token-A"}}, + }, + } + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": NewFakeLoaderCache()}), + withHTTPClient(&http.Client{Transport: tracker}), + withSubgraphHeadersBuilder(mockHeaders), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: true, ShadowMode: true}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: true, ShadowMode: true}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: true, ShadowMode: true}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Request 1: L2 miss → fetch → write with token-A header hash prefix + tracker.Reset() + resp, headers := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { topProducts { name reviews { body authorWithoutProvides { username } } } }`, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, + string(resp)) + + snap1 := normalizeSnapshot(parseCacheAnalytics(t, headers)) + + // Capture response hashes from first request (deterministic subgraph responses) + responseHashes := make(map[string]uint64, len(snap1.HeaderImpactEvents)) + for _, ev := range snap1.HeaderImpactEvents { + responseHashes[ev.BaseKey] = ev.ResponseHash + } + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: `{"__typename":"Product","key":{"upc":"top-1"}}`, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: "reviews", Shadow: true}, // cache empty (first request) + {CacheKey: `{"__typename":"Product","key":{"upc":"top-2"}}`, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: "reviews", Shadow: true}, // cache empty (first request) + {CacheKey: `{"__typename":"Query","field":"topProducts"}`, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: "products", Shadow: true}, // cache empty (first request) + {CacheKey: `{"__typename":"User","key":{"id":"1234"}}`, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: "accounts", Shadow: true}, // cache empty (first request) + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: `11945571715631340836:{"__typename":"Product","key":{"upc":"top-1"}}`, EntityType: "Product", ByteSize: 177, DataSource: "reviews", CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, + {CacheKey: `11945571715631340836:{"__typename":"Product","key":{"upc":"top-2"}}`, EntityType: "Product", ByteSize: 233, DataSource: "reviews", CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, + {CacheKey: `11945571715631340836:{"__typename":"Query","field":"topProducts"}`, EntityType: "Query", ByteSize: 127, DataSource: "products", CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, + {CacheKey: `11945571715631340836:{"__typename":"User","key":{"id":"1234"}}`, EntityType: "User", ByteSize: 49, DataSource: "accounts", CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, + }, + FieldHashes: []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: 1032923585965781586, KeyRaw: `{"upc":"top-1"}`, Source: resolve.FieldSourceSubgraph}, + {EntityType: "Product", FieldName: "name", FieldHash: 2432227032303632641, KeyRaw: `{"upc":"top-2"}`, Source: resolve.FieldSourceSubgraph}, + {EntityType: "User", FieldName: "username", FieldHash: 4957449860898447395, KeyRaw: `{"id":"1234"}`, Source: resolve.FieldSourceSubgraph}, + {EntityType: "User", FieldName: "username", FieldHash: 4957449860898447395, KeyRaw: `{"id":"1234"}`, Source: resolve.FieldSourceSubgraph}, + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "Product", Count: 2, UniqueKeys: 2}, + {TypeName: "User", Count: 2, UniqueKeys: 1}, + }, + HeaderImpactEvents: []resolve.HeaderImpactEvent{ + // Authorization: Bearer token-A → header hash 11945571715631340836 + {BaseKey: `{"__typename":"Product","key":{"upc":"top-1"}}`, HeaderHash: 11945571715631340836, ResponseHash: responseHashes[`{"__typename":"Product","key":{"upc":"top-1"}}`], EntityType: "Product", DataSource: "reviews"}, + {BaseKey: `{"__typename":"Product","key":{"upc":"top-2"}}`, HeaderHash: 11945571715631340836, ResponseHash: responseHashes[`{"__typename":"Product","key":{"upc":"top-2"}}`], EntityType: "Product", DataSource: "reviews"}, + {BaseKey: `{"__typename":"Query","field":"topProducts"}`, HeaderHash: 11945571715631340836, ResponseHash: responseHashes[`{"__typename":"Query","field":"topProducts"}`], EntityType: "Query", DataSource: "products"}, + {BaseKey: `{"__typename":"User","key":{"id":"1234"}}`, HeaderHash: 11945571715631340836, ResponseHash: responseHashes[`{"__typename":"User","key":{"id":"1234"}}`], EntityType: "User", DataSource: "accounts"}, + }, + }), snap1) + + // Request 2: Switch to token-B headers (actually different headers forwarded to subgraphs) + mockHeaders.setAll(http.Header{"Authorization": {"Bearer token-B"}}) + + tracker.Reset() + resp, headers = gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { topProducts { name reviews { body authorWithoutProvides { username } } } }`, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, + string(resp)) + + snap2 := normalizeSnapshot(parseCacheAnalytics(t, headers)) + + // Key insight: different headers (token-B) → SAME ResponseHash → headers are irrelevant + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: `{"__typename":"Product","key":{"upc":"top-1"}}`, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: "reviews", Shadow: true}, // token-B prefix not in cache + {CacheKey: `{"__typename":"Product","key":{"upc":"top-2"}}`, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: "reviews", Shadow: true}, // token-B prefix not in cache + {CacheKey: `{"__typename":"Query","field":"topProducts"}`, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: "products", Shadow: true}, // token-B prefix not in cache + {CacheKey: `{"__typename":"User","key":{"id":"1234"}}`, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: "accounts", Shadow: true}, // token-B prefix not in cache + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: `4753115417090238877:{"__typename":"Product","key":{"upc":"top-1"}}`, EntityType: "Product", ByteSize: 177, DataSource: "reviews", CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, + {CacheKey: `4753115417090238877:{"__typename":"Product","key":{"upc":"top-2"}}`, EntityType: "Product", ByteSize: 233, DataSource: "reviews", CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, + {CacheKey: `4753115417090238877:{"__typename":"Query","field":"topProducts"}`, EntityType: "Query", ByteSize: 127, DataSource: "products", CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, + {CacheKey: `4753115417090238877:{"__typename":"User","key":{"id":"1234"}}`, EntityType: "User", ByteSize: 49, DataSource: "accounts", CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, + }, + FieldHashes: []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: 1032923585965781586, KeyRaw: `{"upc":"top-1"}`, Source: resolve.FieldSourceSubgraph}, + {EntityType: "Product", FieldName: "name", FieldHash: 2432227032303632641, KeyRaw: `{"upc":"top-2"}`, Source: resolve.FieldSourceSubgraph}, + {EntityType: "User", FieldName: "username", FieldHash: 4957449860898447395, KeyRaw: `{"id":"1234"}`, Source: resolve.FieldSourceSubgraph}, + {EntityType: "User", FieldName: "username", FieldHash: 4957449860898447395, KeyRaw: `{"id":"1234"}`, Source: resolve.FieldSourceSubgraph}, + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "Product", Count: 2, UniqueKeys: 2}, + {TypeName: "User", Count: 2, UniqueKeys: 1}, + }, + HeaderImpactEvents: []resolve.HeaderImpactEvent{ + // Authorization: Bearer token-B → header hash 4753115417090238877; SAME ResponseHash → headers irrelevant + {BaseKey: `{"__typename":"Product","key":{"upc":"top-1"}}`, HeaderHash: 4753115417090238877, ResponseHash: responseHashes[`{"__typename":"Product","key":{"upc":"top-1"}}`], EntityType: "Product", DataSource: "reviews"}, + {BaseKey: `{"__typename":"Product","key":{"upc":"top-2"}}`, HeaderHash: 4753115417090238877, ResponseHash: responseHashes[`{"__typename":"Product","key":{"upc":"top-2"}}`], EntityType: "Product", DataSource: "reviews"}, + {BaseKey: `{"__typename":"Query","field":"topProducts"}`, HeaderHash: 4753115417090238877, ResponseHash: responseHashes[`{"__typename":"Query","field":"topProducts"}`], EntityType: "Query", DataSource: "products"}, + {BaseKey: `{"__typename":"User","key":{"id":"1234"}}`, HeaderHash: 4753115417090238877, ResponseHash: responseHashes[`{"__typename":"User","key":{"id":"1234"}}`], EntityType: "User", DataSource: "accounts"}, + }, + }), snap2) + }) + + t.Run("non-shadow mode - events on L2 miss, no events on L2 hit", func(t *testing.T) { + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": NewFakeLoaderCache()}), + withHTTPClient(&http.Client{Transport: tracker}), + withSubgraphHeadersBuilder(&headerForwardingMock{ + headers: map[string]http.Header{ + "products": {"Authorization": {"Bearer token-A"}}, + "reviews": {"Authorization": {"Bearer token-A"}}, + "accounts": {"Authorization": {"Bearer token-A"}}, + }, + }), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: true}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: true}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: true}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Request 1: L2 miss → fetch → HeaderImpactEvents recorded + tracker.Reset() + resp, headers := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { topProducts { name reviews { body authorWithoutProvides { username } } } }`, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, + string(resp)) + + snap1 := normalizeSnapshot(parseCacheAnalytics(t, headers)) + + // Capture response hashes (deterministic) + responseHashes := make(map[string]uint64, len(snap1.HeaderImpactEvents)) + for _, ev := range snap1.HeaderImpactEvents { + responseHashes[ev.BaseKey] = ev.ResponseHash + } + + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: `{"__typename":"Product","key":{"upc":"top-1"}}`, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: "reviews"}, // L2 miss: cache empty + {CacheKey: `{"__typename":"Product","key":{"upc":"top-2"}}`, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: "reviews"}, // L2 miss: cache empty + {CacheKey: `{"__typename":"Query","field":"topProducts"}`, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: "products"}, // L2 miss: root field not yet cached + {CacheKey: `{"__typename":"User","key":{"id":"1234"}}`, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: "accounts"}, // L2 miss: User not yet cached + }, + L2Writes: []resolve.CacheWriteEvent{ + // Authorization: Bearer token-A → header hash prefix 11945571715631340836 + {CacheKey: `11945571715631340836:{"__typename":"Product","key":{"upc":"top-1"}}`, EntityType: "Product", ByteSize: 177, DataSource: "reviews", CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, + {CacheKey: `11945571715631340836:{"__typename":"Product","key":{"upc":"top-2"}}`, EntityType: "Product", ByteSize: 233, DataSource: "reviews", CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, + {CacheKey: `11945571715631340836:{"__typename":"Query","field":"topProducts"}`, EntityType: "Query", ByteSize: 127, DataSource: "products", CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, + {CacheKey: `11945571715631340836:{"__typename":"User","key":{"id":"1234"}}`, EntityType: "User", ByteSize: 49, DataSource: "accounts", CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, + }, + FieldHashes: []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: 1032923585965781586, KeyRaw: `{"upc":"top-1"}`, Source: resolve.FieldSourceSubgraph}, + {EntityType: "Product", FieldName: "name", FieldHash: 2432227032303632641, KeyRaw: `{"upc":"top-2"}`, Source: resolve.FieldSourceSubgraph}, + {EntityType: "User", FieldName: "username", FieldHash: 4957449860898447395, KeyRaw: `{"id":"1234"}`, Source: resolve.FieldSourceSubgraph}, + {EntityType: "User", FieldName: "username", FieldHash: 4957449860898447395, KeyRaw: `{"id":"1234"}`, Source: resolve.FieldSourceSubgraph}, + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "Product", Count: 2, UniqueKeys: 2}, + {TypeName: "User", Count: 2, UniqueKeys: 1}, + }, + HeaderImpactEvents: []resolve.HeaderImpactEvent{ + {BaseKey: `{"__typename":"Product","key":{"upc":"top-1"}}`, HeaderHash: 11945571715631340836, ResponseHash: responseHashes[`{"__typename":"Product","key":{"upc":"top-1"}}`], EntityType: "Product", DataSource: "reviews"}, + {BaseKey: `{"__typename":"Product","key":{"upc":"top-2"}}`, HeaderHash: 11945571715631340836, ResponseHash: responseHashes[`{"__typename":"Product","key":{"upc":"top-2"}}`], EntityType: "Product", DataSource: "reviews"}, + {BaseKey: `{"__typename":"Query","field":"topProducts"}`, HeaderHash: 11945571715631340836, ResponseHash: responseHashes[`{"__typename":"Query","field":"topProducts"}`], EntityType: "Query", DataSource: "products"}, + {BaseKey: `{"__typename":"User","key":{"id":"1234"}}`, HeaderHash: 11945571715631340836, ResponseHash: responseHashes[`{"__typename":"User","key":{"id":"1234"}}`], EntityType: "User", DataSource: "accounts"}, + }, + }), snap1) + + // Request 2: Same headers → L2 hit → no fetch → empty analytics (except L2 reads) + tracker.Reset() + resp, headers = gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { topProducts { name reviews { body authorWithoutProvides { username } } } }`, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, + string(resp)) + + snap2 := normalizeSnapshot(parseCacheAnalytics(t, headers)) + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: `{"__typename":"Product","key":{"upc":"top-1"}}`, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: "reviews", ByteSize: 177}, // L2 hit: populated by request 1 + {CacheKey: `{"__typename":"Product","key":{"upc":"top-2"}}`, EntityType: "Product", Kind: resolve.CacheKeyHit, DataSource: "reviews", ByteSize: 233}, // L2 hit: populated by request 1 + {CacheKey: `{"__typename":"Query","field":"topProducts"}`, EntityType: "Query", Kind: resolve.CacheKeyHit, DataSource: "products", ByteSize: 127}, // L2 hit: root field cached by request 1 + {CacheKey: `{"__typename":"User","key":{"id":"1234"}}`, EntityType: "User", Kind: resolve.CacheKeyHit, DataSource: "accounts", ByteSize: 49}, // L2 hit: User cached by request 1 + }, + // No L2Writes, no HeaderImpactEvents: all served from cache, no fresh fetches + FieldHashes: []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: 1032923585965781586, KeyRaw: `{"upc":"top-1"}`, Source: resolve.FieldSourceL2}, + {EntityType: "Product", FieldName: "name", FieldHash: 2432227032303632641, KeyRaw: `{"upc":"top-2"}`, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: 4957449860898447395, KeyRaw: `{"id":"1234"}`, Source: resolve.FieldSourceL2}, + {EntityType: "User", FieldName: "username", FieldHash: 4957449860898447395, KeyRaw: `{"id":"1234"}`, Source: resolve.FieldSourceL2}, + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "Product", Count: 2, UniqueKeys: 2}, + {TypeName: "User", Count: 2, UniqueKeys: 1}, + }, + }), snap2) + }) + + t.Run("no events when IncludeSubgraphHeaderPrefix is false", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": NewFakeLoaderCache()}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + tracker.Reset() + resp, headers := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { topProducts { name reviews { body authorWithoutProvides { username } } } }`, nil, t) + assert.Equal(t, + `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, + string(resp)) + + snap := normalizeSnapshot(parseCacheAnalytics(t, headers)) + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: `{"__typename":"Product","key":{"upc":"top-1"}}`, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: "reviews"}, + {CacheKey: `{"__typename":"Product","key":{"upc":"top-2"}}`, EntityType: "Product", Kind: resolve.CacheKeyMiss, DataSource: "reviews"}, + {CacheKey: `{"__typename":"Query","field":"topProducts"}`, EntityType: "Query", Kind: resolve.CacheKeyMiss, DataSource: "products"}, + {CacheKey: `{"__typename":"User","key":{"id":"1234"}}`, EntityType: "User", Kind: resolve.CacheKeyMiss, DataSource: "accounts"}, + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: `{"__typename":"Product","key":{"upc":"top-1"}}`, EntityType: "Product", ByteSize: 177, DataSource: "reviews", CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, + {CacheKey: `{"__typename":"Product","key":{"upc":"top-2"}}`, EntityType: "Product", ByteSize: 233, DataSource: "reviews", CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, + {CacheKey: `{"__typename":"Query","field":"topProducts"}`, EntityType: "Query", ByteSize: 127, DataSource: "products", CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, + {CacheKey: `{"__typename":"User","key":{"id":"1234"}}`, EntityType: "User", ByteSize: 49, DataSource: "accounts", CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, + }, + FieldHashes: []resolve.EntityFieldHash{ + {EntityType: "Product", FieldName: "name", FieldHash: 1032923585965781586, KeyRaw: `{"upc":"top-1"}`, Source: resolve.FieldSourceSubgraph}, + {EntityType: "Product", FieldName: "name", FieldHash: 2432227032303632641, KeyRaw: `{"upc":"top-2"}`, Source: resolve.FieldSourceSubgraph}, + {EntityType: "User", FieldName: "username", FieldHash: 4957449860898447395, KeyRaw: `{"id":"1234"}`, Source: resolve.FieldSourceSubgraph}, + {EntityType: "User", FieldName: "username", FieldHash: 4957449860898447395, KeyRaw: `{"id":"1234"}`, Source: resolve.FieldSourceSubgraph}, + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "Product", Count: 2, UniqueKeys: 2}, + {TypeName: "User", Count: 2, UniqueKeys: 1}, + }, + // No HeaderImpactEvents: IncludeSubgraphHeaderPrefix is false + }), snap) + }) +} diff --git a/execution/engine/federation_caching_batch_test.go b/execution/engine/federation_caching_batch_test.go new file mode 100644 index 0000000000..9d81e3139e --- /dev/null +++ b/execution/engine/federation_caching_batch_test.go @@ -0,0 +1,922 @@ +package engine_test + +import ( + "context" + "net/http" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +const ( + productKeyTop1 = `{"__typename":"Product","key":{"upc":"top-1"}}` + productKeyTop2 = `{"__typename":"Product","key":{"upc":"top-2"}}` + productKeyTop3 = `{"__typename":"Product","key":{"upc":"top-3"}}` + + productValueTop1 = `{"upc":"top-1","name":"Trilby","price":11}` + productValueTop2 = `{"upc":"top-2","name":"Fedora","price":22}` + productValueTop3 = `{"upc":"top-3","name":"Boater","price":33}` +) + +func expectedBatchProductCache(upcs ...string) map[string]string { + expected := make(map[string]string, len(upcs)) + for _, upc := range upcs { + switch upc { + case "top-1": + expected[productKeyTop1] = productValueTop1 + case "top-2": + expected[productKeyTop2] = productValueTop2 + case "top-3": + expected[productKeyTop3] = productValueTop3 + } + } + return expected +} + +func assertFakeLoaderCacheContents(t *testing.T, cache *FakeLoaderCache, want map[string]string) { + t.Helper() + + cache.mu.RLock() + got := make(map[string]string, len(cache.storage)) + for key, entry := range cache.storage { + got[key] = string(entry.data) + } + cache.mu.RUnlock() + + assert.Equal(t, want, got) +} + +// TestBatchEntityCacheLookup_FullFetch_AllMiss tests batch entity cache with all cache misses. +// Query products(upcs: ["top-1","top-2","top-3"]) with ArgumentIsEntityKey=true. +// All entities are fetched from the subgraph and cached individually. +func TestBatchEntityCacheLookup_FullFetch_AllMiss(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + }), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "products", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "Product", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "upc", ArgumentPath: []string{"upcs"}, ArgumentIsEntityKey: true}, + }, + }, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + productsHost := productsURLParsed.Host + + // Request 1: all cache misses → subgraph called + defaultCache.ClearLog() + tracker.Reset() + resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { products(upcs: ["top-1", "top-2", "top-3"]) { upc name price } }`, nil, t) + + assert.Equal(t, `{"data":{"products":[{"upc":"top-1","name":"Trilby","price":11},{"upc":"top-2","name":"Fedora","price":22},{"upc":"top-3","name":"Boater","price":33}]}}`, string(resp)) + t.Logf("Request 1 tracker: %v", tracker.GetCounts()) + assert.Equal(t, 1, tracker.GetCount(productsHost), "first request should call products subgraph once") + + // Verify cache log: 1 get (batch miss) + 1 set (batch write) + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: productKeyTop1, Hit: false}, + {Key: productKeyTop2, Hit: false}, + {Key: productKeyTop3, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: productKeyTop1, TTL: 30 * time.Second}, + {Key: productKeyTop2, TTL: 30 * time.Second}, + {Key: productKeyTop3, TTL: 30 * time.Second}, + }}, + }, defaultCache.GetLog()) + assertFakeLoaderCacheContents(t, defaultCache, expectedBatchProductCache("top-1", "top-2", "top-3")) +} + +// TestBatchEntityCacheLookup_FullFetch_AllHit tests that a second identical batch request +// serves all entities from cache without calling the subgraph. +func TestBatchEntityCacheLookup_FullFetch_AllHit(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + }), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "products", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "Product", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "upc", ArgumentPath: []string{"upcs"}, ArgumentIsEntityKey: true}, + }, + }, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + productsHost := productsURLParsed.Host + + // Request 1: populate cache + resp1, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { products(upcs: ["top-1", "top-2", "top-3"]) { upc name price } }`, nil, t) + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: productKeyTop1, Hit: false}, + {Key: productKeyTop2, Hit: false}, + {Key: productKeyTop3, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: productKeyTop1, TTL: 30 * time.Second}, + {Key: productKeyTop2, TTL: 30 * time.Second}, + {Key: productKeyTop3, TTL: 30 * time.Second}, + }}, + }, defaultCache.GetLog()) + assertFakeLoaderCacheContents(t, defaultCache, expectedBatchProductCache("top-1", "top-2", "top-3")) + defaultCache.ClearLog() + + // Request 2: should hit cache — no subgraph call + tracker.Reset() + resp2, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { products(upcs: ["top-1", "top-2", "top-3"]) { upc name price } }`, nil, t) + + assert.Equal(t, string(resp1), string(resp2), "both requests should return identical responses") + assert.Equal(t, 0, tracker.GetCount(productsHost), "second request should NOT call products subgraph (all cache hits)") + + // Exact cache log: single GET with all 3 hits, no SET (served from cache) + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: productKeyTop1, Hit: true}, + {Key: productKeyTop2, Hit: true}, + {Key: productKeyTop3, Hit: true}, + }}, + }, defaultCache.GetLog()) + assertFakeLoaderCacheContents(t, defaultCache, expectedBatchProductCache("top-1", "top-2", "top-3")) +} + +// TestBatchEntityCacheLookup_FullFetch_PartialMiss_FetchesAll tests that in full fetch mode, +// even when some entities are cached, the resolver is called with the full argument list. +func TestBatchEntityCacheLookup_FullFetch_PartialMiss_FetchesAll(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + }), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "products", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "Product", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "upc", ArgumentPath: []string{"upcs"}, ArgumentIsEntityKey: true}, + }, + }, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + productsHost := productsURLParsed.Host + + // Request 1: warm cache with just top-1 + gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { products(upcs: ["top-1"]) { upc name price } }`, nil, t) + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: productKeyTop1, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: productKeyTop1, TTL: 30 * time.Second}}}, + }, defaultCache.GetLog()) + assertFakeLoaderCacheContents(t, defaultCache, expectedBatchProductCache("top-1")) + + // Request 2: top-1 cached, top-2 not → full fetch mode fetches all + defaultCache.ClearLog() + tracker.Reset() + resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { products(upcs: ["top-1", "top-2"]) { upc name price } }`, nil, t) + + assert.Equal(t, `{"data":{"products":[{"upc":"top-1","name":"Trilby","price":11},{"upc":"top-2","name":"Fedora","price":22}]}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(productsHost), "full fetch mode should call products subgraph with the complete list") + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: productKeyTop1, Hit: true}, + {Key: productKeyTop2, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: productKeyTop1, TTL: 30 * time.Second}, + {Key: productKeyTop2, TTL: 30 * time.Second}, + }}, + }, defaultCache.GetLog()) + assertFakeLoaderCacheContents(t, defaultCache, expectedBatchProductCache("top-1", "top-2")) +} + +// TestBatchEntityCacheLookup_FullFetch_EmptyList tests that an empty list argument +// returns an empty array without calling the resolver. +func TestBatchEntityCacheLookup_FullFetch_EmptyList(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + }), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "products", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "Product", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "upc", ArgumentPath: []string{"upcs"}, ArgumentIsEntityKey: true}, + }, + }, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + productsHost := productsURLParsed.Host + + defaultCache.ClearLog() + tracker.Reset() + resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { products(upcs: []) { upc name price } }`, nil, t) + + assert.Equal(t, `{"data":{"products":[]}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(productsHost), "empty list should not call products subgraph") + + // No cache operations should have occurred + assert.Equal(t, []CacheLogEntry{}, defaultCache.GetLog()) + assertFakeLoaderCacheContents(t, defaultCache, expectedBatchProductCache()) +} + +// TestBatchEntityCacheLookup_CacheKeySharing_ScalarAndBatch tests that scalar and batch +// lookups produce the same cache key format, enabling cache sharing. +func TestBatchEntityCacheLookup_CacheKeySharing_ScalarAndBatch(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + }), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "product", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "Product", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "upc", ArgumentPath: []string{"upc"}}, + }, + }, + }, + }, + { + TypeName: "Query", + FieldName: "products", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "Product", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "upc", ArgumentPath: []string{"upcs"}, ArgumentIsEntityKey: true}, + }, + }, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + productsHost := productsURLParsed.Host + + // Request 1: scalar product(upc: "top-1") populates cache + gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { product(upc: "top-1") { upc name price } }`, nil, t) + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: productKeyTop1, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: productKeyTop1, TTL: 30 * time.Second}}}, + }, defaultCache.GetLog()) + assertFakeLoaderCacheContents(t, defaultCache, expectedBatchProductCache("top-1")) + + // Request 2: batch products(upcs: ["top-1", "top-2"]) — top-1 hits cache (from scalar), + // top-2 misses. Full fetch mode still calls subgraph with full list. + defaultCache.ClearLog() + tracker.Reset() + resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { products(upcs: ["top-1", "top-2"]) { upc name price } }`, nil, t) + + assert.Equal(t, `{"data":{"products":[{"upc":"top-1","name":"Trilby","price":11},{"upc":"top-2","name":"Fedora","price":22}]}}`, string(resp)) + // In full fetch mode, partial miss means subgraph is called + assert.Equal(t, 1, tracker.GetCount(productsHost), "full fetch mode with partial miss should call products subgraph") + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: productKeyTop1, Hit: true}, + {Key: productKeyTop2, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: productKeyTop1, TTL: 30 * time.Second}, + {Key: productKeyTop2, TTL: 30 * time.Second}, + }}, + }, defaultCache.GetLog()) + assertFakeLoaderCacheContents(t, defaultCache, expectedBatchProductCache("top-1", "top-2")) +} + +// TestBatchEntityCacheLookup_FullFetch_SingleElement tests that a single-element batch +// behaves identically to scalar lookup — same cache key format. +func TestBatchEntityCacheLookup_FullFetch_SingleElement(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + }), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "products", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "Product", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "upc", ArgumentPath: []string{"upcs"}, ArgumentIsEntityKey: true}, + }, + }, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + productsHost := productsURLParsed.Host + + // Request 1: single-element batch + resp1, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { products(upcs: ["top-1"]) { upc name price } }`, nil, t) + assert.Equal(t, `{"data":{"products":[{"upc":"top-1","name":"Trilby","price":11}]}}`, string(resp1)) + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: productKeyTop1, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: productKeyTop1, TTL: 30 * time.Second}}}, + }, defaultCache.GetLog()) + assertFakeLoaderCacheContents(t, defaultCache, expectedBatchProductCache("top-1")) + + // Request 2: should hit cache + defaultCache.ClearLog() + tracker.Reset() + resp2, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { products(upcs: ["top-1"]) { upc name price } }`, nil, t) + assert.Equal(t, string(resp1), string(resp2)) + assert.Equal(t, 0, tracker.GetCount(productsHost), "second request should hit cache") + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: productKeyTop1, Hit: true}}}, + }, defaultCache.GetLog()) + assertFakeLoaderCacheContents(t, defaultCache, expectedBatchProductCache("top-1")) +} + +func TestBatchEntityCacheLookup_PartialFetch_SomeCached(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphRequestTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + }), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "products", + CacheName: "default", + TTL: 30 * time.Second, + PartialBatchLoad: true, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "Product", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "upc", ArgumentPath: []string{"upcs"}, ArgumentIsEntityKey: true}, + }, + }, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + productsHost := productsURLParsed.Host + + gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { products(upcs: ["top-1"]) { upc name price } }`, nil, t) + + warmLog := defaultCache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: productKeyTop1, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: productKeyTop1, TTL: 30 * time.Second}}}, + }, warmLog) + assertFakeLoaderCacheContents(t, defaultCache, expectedBatchProductCache("top-1")) + defaultCache.ClearLog() + + tracker.Reset() + resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { products(upcs: ["top-1", "top-2", "top-3"]) { upc name price } }`, nil, t) + + assert.Equal(t, `{"data":{"products":[{"upc":"top-1","name":"Trilby","price":11},{"upc":"top-2","name":"Fedora","price":22},{"upc":"top-3","name":"Boater","price":33}]}}`, string(resp)) + + productsRequests := tracker.GetRequests(productsHost) + require.Equal(t, 1, len(productsRequests)) + assert.Equal(t, `{"query":"query($a: [String!]!){products(upcs: $a){upc name price}}","variables":{"a":["top-2","top-3"]}}`, productsRequests[0]) + + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: productKeyTop1, Hit: true}, + {Key: productKeyTop2, Hit: false}, + {Key: productKeyTop3, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: productKeyTop2, TTL: 30 * time.Second}, + {Key: productKeyTop3, TTL: 30 * time.Second}, + }}, + }, defaultCache.GetLog()) + assertFakeLoaderCacheContents(t, defaultCache, expectedBatchProductCache("top-1", "top-2", "top-3")) +} + +func TestBatchEntityCacheLookup_PartialFetch_AllHit(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + }), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "products", + CacheName: "default", + TTL: 30 * time.Second, + PartialBatchLoad: true, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "Product", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "upc", ArgumentPath: []string{"upcs"}, ArgumentIsEntityKey: true}, + }, + }, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + productsHost := productsURLParsed.Host + + gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { products(upcs: ["top-1", "top-2", "top-3"]) { upc name price } }`, nil, t) + + warmLog := defaultCache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: productKeyTop1, Hit: false}, + {Key: productKeyTop2, Hit: false}, + {Key: productKeyTop3, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: productKeyTop1, TTL: 30 * time.Second}, + {Key: productKeyTop2, TTL: 30 * time.Second}, + {Key: productKeyTop3, TTL: 30 * time.Second}, + }}, + }, warmLog) + assertFakeLoaderCacheContents(t, defaultCache, expectedBatchProductCache("top-1", "top-2", "top-3")) + defaultCache.ClearLog() + + tracker.Reset() + resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { products(upcs: ["top-1", "top-2", "top-3"]) { upc name price } }`, nil, t) + + assert.Equal(t, `{"data":{"products":[{"upc":"top-1","name":"Trilby","price":11},{"upc":"top-2","name":"Fedora","price":22},{"upc":"top-3","name":"Boater","price":33}]}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(productsHost)) + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: productKeyTop1, Hit: true}, + {Key: productKeyTop2, Hit: true}, + {Key: productKeyTop3, Hit: true}, + }}, + }, defaultCache.GetLog()) + assertFakeLoaderCacheContents(t, defaultCache, expectedBatchProductCache("top-1", "top-2", "top-3")) +} + +func TestBatchEntityCacheLookup_PartialFetch_AllMiss(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphRequestTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + }), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "products", + CacheName: "default", + TTL: 30 * time.Second, + PartialBatchLoad: true, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "Product", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "upc", ArgumentPath: []string{"upcs"}, ArgumentIsEntityKey: true}, + }, + }, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + productsHost := productsURLParsed.Host + + tracker.Reset() + defaultCache.ClearLog() + resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { products(upcs: ["top-1", "top-2", "top-3"]) { upc name price } }`, nil, t) + + assert.Equal(t, `{"data":{"products":[{"upc":"top-1","name":"Trilby","price":11},{"upc":"top-2","name":"Fedora","price":22},{"upc":"top-3","name":"Boater","price":33}]}}`, string(resp)) + + // Verify subgraph was called with full argument list (all miss) + assert.Equal(t, 1, tracker.GetRequestCount(productsHost)) + + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: productKeyTop1, Hit: false}, + {Key: productKeyTop2, Hit: false}, + {Key: productKeyTop3, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: productKeyTop1, TTL: 30 * time.Second}, + {Key: productKeyTop2, TTL: 30 * time.Second}, + {Key: productKeyTop3, TTL: 30 * time.Second}, + }}, + }, defaultCache.GetLog()) + assertFakeLoaderCacheContents(t, defaultCache, expectedBatchProductCache("top-1", "top-2", "top-3")) +} + +func TestBatchEntityCacheLookup_PartialFetch_OrderPreservation(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphRequestTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + }), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "products", + CacheName: "default", + TTL: 30 * time.Second, + PartialBatchLoad: true, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "Product", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "upc", ArgumentPath: []string{"upcs"}, ArgumentIsEntityKey: true}, + }, + }, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + productsHost := productsURLParsed.Host + + gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { products(upcs: ["top-3"]) { upc name price } }`, nil, t) + + warmLog := defaultCache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: productKeyTop3, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: productKeyTop3, TTL: 30 * time.Second}}}, + }, warmLog) + assertFakeLoaderCacheContents(t, defaultCache, expectedBatchProductCache("top-3")) + defaultCache.ClearLog() + + tracker.Reset() + resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { products(upcs: ["top-3", "top-1"]) { upc name price } }`, nil, t) + + assert.Equal(t, `{"data":{"products":[{"upc":"top-3","name":"Boater","price":33},{"upc":"top-1","name":"Trilby","price":11}]}}`, string(resp)) + + productsRequests := tracker.GetRequests(productsHost) + require.Equal(t, 1, len(productsRequests)) + assert.Equal(t, `{"query":"query($a: [String!]!){products(upcs: $a){upc name price}}","variables":{"a":["top-1"]}}`, productsRequests[0]) + + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: productKeyTop3, Hit: true}, + {Key: productKeyTop1, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{{Key: productKeyTop1, TTL: 30 * time.Second}}}, + }, defaultCache.GetLog()) + assertFakeLoaderCacheContents(t, defaultCache, expectedBatchProductCache("top-1", "top-3")) +} + +// TestBatchEntityKeyCachingWithArgumentIsEntityKey tests that ArgumentIsEntityKey=true +// produces per-element cache keys (not a single batch key), enabling individual entity +// cache hits on a second identical request with zero subgraph calls. +func TestBatchEntityKeyCachingWithArgumentIsEntityKey(t *testing.T) { + t.Parallel() + productKeyTop1 := `{"__typename":"Product","key":{"upc":"top-1"}}` + productKeyTop2 := `{"__typename":"Product","key":{"upc":"top-2"}}` + productKeyTop3 := `{"__typename":"Product","key":{"upc":"top-3"}}` + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + }), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "products", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "Product", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "upc", ArgumentPath: []string{"upcs"}, ArgumentIsEntityKey: true}, + }, + }, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + productsHost := productsURLParsed.Host + + // Request 1: all cache misses — subgraph called, 3 per-element keys written + tracker.Reset() + resp1, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { products(upcs: ["top-1", "top-2", "top-3"]) { upc name price } }`, nil, t) + + assert.Equal(t, `{"data":{"products":[{"upc":"top-1","name":"Trilby","price":11},{"upc":"top-2","name":"Fedora","price":22},{"upc":"top-3","name":"Boater","price":33}]}}`, string(resp1)) + assert.Equal(t, 1, tracker.GetCount(productsHost), "first request should call products subgraph once") + + // Verify per-element cache contents were written + assertFakeLoaderCacheContents(t, defaultCache, map[string]string{ + productKeyTop1: `{"upc":"top-1","name":"Trilby","price":11}`, + productKeyTop2: `{"upc":"top-2","name":"Fedora","price":22}`, + productKeyTop3: `{"upc":"top-3","name":"Boater","price":33}`, + }) + + // Verify cache log: 1 get (batch miss) + 1 set (batch write) + assert.Equal(t, []CacheLogEntry{ + {Operation: CacheOperationGet, Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-3"}}`, Hit: false}, + }}, + {Operation: CacheOperationSet, Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-3"}}`, TTL: 30 * time.Second}, + }}, + }, defaultCache.GetLog()) + + // Request 2: all cache hits — zero subgraph calls + defaultCache.ClearLog() + tracker.Reset() + resp2, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { products(upcs: ["top-1", "top-2", "top-3"]) { upc name price } }`, nil, t) + + assert.Equal(t, string(resp1), string(resp2), "both requests should return identical responses") + assert.Equal(t, 0, tracker.GetCount(productsHost), "second request should NOT call products subgraph (all cache hits)") + + // Verify cache log: 1 get (all hits) — no SET needed + assert.Equal(t, []CacheLogEntry{ + {Operation: CacheOperationGet, Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-3"}}`, Hit: true}, + }}, + }, defaultCache.GetLog()) +} diff --git a/execution/engine/federation_caching_entity_field_args_test.go b/execution/engine/federation_caching_entity_field_args_test.go new file mode 100644 index 0000000000..53ba4c1818 --- /dev/null +++ b/execution/engine/federation_caching_entity_field_args_test.go @@ -0,0 +1,1965 @@ +package engine_test + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/http" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// queryWithRawVariables sends a GraphQL query with raw JSON variables (no key reordering by json.Marshal). +// This is needed to test that different JSON key orderings of the same input produce the same cache hash. +func queryWithRawVariables(t *testing.T, ctx context.Context, addr, query string, rawVariablesJSON string) []byte { + t.Helper() + + queryJSON, err := json.Marshal(query) + require.NoError(t, err) + + var bodyBytes []byte + if rawVariablesJSON != "" { + bodyBytes = []byte(`{"query":` + string(queryJSON) + `,"variables":` + rawVariablesJSON + `}`) + } else { + bodyBytes = []byte(`{"query":` + string(queryJSON) + `}`) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, addr, bytes.NewBuffer(bodyBytes)) + require.NoError(t, err) + req.Header.Set("Content-Type", "application/json") + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + respBody, err := io.ReadAll(resp.Body) + require.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + return respBody +} + +// TestEntityFieldArgsCaching verifies that entity fields with arguments produce distinct +// cache entries (via xxhash suffix), so different argument values never share cached data. +func TestEntityFieldArgsCaching(t *testing.T) { + t.Parallel() + t.Run("same args - L2 miss then hit", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + accountsURLParsed, err := url.Parse(setup.AccountsUpstreamServer.URL) + require.NoError(t, err) + productsURLParsed, err := url.Parse(setup.ProductsUpstreamServer.URL) + require.NoError(t, err) + reviewsURLParsed, err := url.Parse(setup.ReviewsUpstreamServer.URL) + require.NoError(t, err) + accountsHost := accountsURLParsed.Host + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + peekCache := func(key string) string { + data, ok := defaultCache.Peek(key) + if !ok { + return "" + } + return string(data) + } + + query := `query EntityFieldArgsFormal { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + greeting(style: "formal") + } + } + } + }` + + // Request 1: greeting(style: "formal") - should miss cache + defaultCache.ClearLog() + tracker.Reset() + resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + expectedResp := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","greeting":"Good day, Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","greeting":"Good day, Me"}}]}]}}` + assert.Equal(t, expectedResp, string(resp), "Response should contain formal greeting") + + // Cache content after Request 1: + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"username":"Me","greeting_1dc2e714f80c47e8":"Good day, Me","__typename":"User"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterFirst := defaultCache.GetLog() + assert.Equal(t, 6, len(logAfterFirst), "Should have 6 cache operations (get+set for topProducts, Products, Users)") + + wantLogFirst := []CacheLogEntry{ + // Root field Query.topProducts - MISS + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}}}, + // Product entity fetches - MISS + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }}, + // User entity fetches - MISS (entity key unchanged by field args) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "First request cache log should show all misses") + + assert.Equal(t, 1, tracker.GetCount(productsHost), "First request should call products subgraph once") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "First request should call reviews subgraph once") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First request should call accounts subgraph once") + + // Request 2: same query - should hit cache + defaultCache.ClearLog() + tracker.Reset() + resp, _ = gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + assert.Equal(t, expectedResp, string(resp), "Second request should return identical response from cache") + + // Cache content after Request 2 (unchanged - all hits): + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"username":"Me","greeting_1dc2e714f80c47e8":"Good day, Me","__typename":"User"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterSecond := defaultCache.GetLog() + assert.Equal(t, 3, len(logAfterSecond), "Should have 3 cache get operations (all hits)") + + wantLogSecond := []CacheLogEntry{ + // Root field Query.topProducts - HIT + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + // Product entity fetches - HITS + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }}, + // User entity fetches - HIT (greeting_ found in cached entity) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Second request should show all cache hits") + + assert.Equal(t, 0, tracker.GetCount(productsHost), "Second request should skip products subgraph") + assert.Equal(t, 0, tracker.GetCount(reviewsHost), "Second request should skip reviews subgraph") + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Second request should skip accounts subgraph") + }) + + t.Run("different args - no data mixing", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + accountsURLParsed, err := url.Parse(setup.AccountsUpstreamServer.URL) + require.NoError(t, err) + productsURLParsed, err := url.Parse(setup.ProductsUpstreamServer.URL) + require.NoError(t, err) + reviewsURLParsed, err := url.Parse(setup.ReviewsUpstreamServer.URL) + require.NoError(t, err) + accountsHost := accountsURLParsed.Host + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + peekCache := func(key string) string { + data, ok := defaultCache.Peek(key) + if !ok { + return "" + } + return string(data) + } + + queryFormal := `query EntityFieldArgsFormal { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + greeting(style: "formal") + } + } + } + }` + + queryCasual := `query EntityFieldArgsCasual { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + greeting(style: "casual") + } + } + } + }` + + // Request 1: greeting(style: "formal") + defaultCache.ClearLog() + tracker.Reset() + resp1, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, queryFormal, nil, t) + + expectedFormal := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","greeting":"Good day, Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","greeting":"Good day, Me"}}]}]}}` + assert.Equal(t, expectedFormal, string(resp1), "First request should return formal greeting") + + // Cache content after Request 1: + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"username":"Me","greeting_1dc2e714f80c47e8":"Good day, Me","__typename":"User"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterFirst := defaultCache.GetLog() + assert.Equal(t, 6, len(logAfterFirst), "Should have 6 cache operations for first request") + + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}}}, + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }}, + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "First request cache log") + + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First request should call accounts once") + + // Request 2: greeting(style: "casual") - different args, should miss User cache + // The entity key is the same, but the cached entity lacks greeting_ + defaultCache.ClearLog() + tracker.Reset() + resp2, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, queryCasual, nil, t) + + expectedCasual := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","greeting":"Hey, Me!"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","greeting":"Hey, Me!"}}]}]}}` + assert.Equal(t, expectedCasual, string(resp2), "Second request should return casual greeting, not formal") + + // Cache content after Request 2 (User merged: both formal and casual variants present): + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"username":"Me","greeting_1dc2e714f80c47e8":"Good day, Me","__typename":"User","greeting_e4956d127c0d173e":"Hey, Me!"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterSecond := defaultCache.GetLog() + + // The L2 cache GET returns the User entity (key exists → FakeLoaderCache reports HIT), + // but the Loader's validateItemHasRequiredData fails because greeting_ + // is missing from the cached entity. The Loader treats it as a miss, re-fetches from + // accounts, and merges the new data with the old cached entity. So we expect: GET (hit at L2 layer) + SET. + wantLogSecond := []CacheLogEntry{ + // topProducts root field - HIT + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + // Product entities - HIT + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }}, + // User entity - L2 returns data (HIT) but Loader rejects it (missing casual field) → re-fetch → SET + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Second request: User entity found in L2 but missing casual field → re-fetch + re-store") + + // Accounts must be called because the cached entity lacked the casual greeting variant + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Accounts should be called again for different args") + // topProducts and Products should still hit cache + assert.Equal(t, 0, tracker.GetCount(productsHost), "Products should hit cache") + assert.Equal(t, 0, tracker.GetCount(reviewsHost), "Reviews should hit cache") + }) + + t.Run("aliases with different args - both cached together", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + accountsURLParsed, err := url.Parse(setup.AccountsUpstreamServer.URL) + require.NoError(t, err) + accountsHost := accountsURLParsed.Host + peekCache := func(key string) string { + data, ok := defaultCache.Peek(key) + if !ok { + return "" + } + return string(data) + } + + query := `query EntityFieldArgsAliases { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + formalGreeting: greeting(style: "formal") + casualGreeting: greeting(style: "casual") + } + } + } + }` + + // Request 1: formalGreeting + casualGreeting aliases - both variants in single fetch + defaultCache.ClearLog() + tracker.Reset() + resp1, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + expectedAliases := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","formalGreeting":"Good day, Me","casualGreeting":"Hey, Me!"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","formalGreeting":"Good day, Me","casualGreeting":"Hey, Me!"}}]}]}}` + assert.Equal(t, expectedAliases, string(resp1), "First request should return both greeting variants") + + // Cache content after Request 1 (both alias variants stored with their respective arg-hash suffixes): + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"username":"Me","greeting_1dc2e714f80c47e8":"Good day, Me","greeting_e4956d127c0d173e":"Hey, Me!","__typename":"User"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + // Root field Query.topProducts - MISS (first request, L2 empty) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}}}, + // Product entity fetches - MISS (first request, L2 empty) + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }}, + // User entity fetches - MISS (first request, L2 empty; entity stored with both arg-suffixed fields) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "First request should show all misses") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Accounts should be called once (single entity batch)") + + // Request 2: same aliases query - should fully hit cache + defaultCache.ClearLog() + tracker.Reset() + resp2, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + assert.Equal(t, expectedAliases, string(resp2), "Second request should return identical response from cache") + + // Cache content after Request 2 (unchanged - all hits): + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"username":"Me","greeting_1dc2e714f80c47e8":"Good day, Me","greeting_e4956d127c0d173e":"Hey, Me!","__typename":"User"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterSecond := defaultCache.GetLog() + assert.Equal(t, 3, len(logAfterSecond), "Should have 3 cache get operations (all hits)") + + wantLogSecond := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }}, + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Second request should show all cache hits") + + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Accounts should not be called on cache hit") + }) + + t.Run("aliases cached then single field hits cache", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + accountsURLParsed, err := url.Parse(setup.AccountsUpstreamServer.URL) + require.NoError(t, err) + accountsHost := accountsURLParsed.Host + peekCache := func(key string) string { + data, ok := defaultCache.Peek(key) + if !ok { + return "" + } + return string(data) + } + + queryAliases := `query EntityFieldArgsAliases { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + formalGreeting: greeting(style: "formal") + casualGreeting: greeting(style: "casual") + } + } + } + }` + + queryFormal := `query EntityFieldArgsFormal { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + greeting(style: "formal") + } + } + } + }` + + // Request 1: cache both variants via aliases + defaultCache.ClearLog() + tracker.Reset() + resp1, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, queryAliases, nil, t) + + expectedAliases := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","formalGreeting":"Good day, Me","casualGreeting":"Hey, Me!"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","formalGreeting":"Good day, Me","casualGreeting":"Hey, Me!"}}]}]}}` + assert.Equal(t, expectedAliases, string(resp1), "Aliases request should return both greeting variants") + + // Cache content after Request 1 (entity has both greeting variants): + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"username":"Me","greeting_1dc2e714f80c47e8":"Good day, Me","greeting_e4956d127c0d173e":"Hey, Me!","__typename":"User"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + // Root field Query.topProducts - MISS (first request, L2 empty) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}}}, + // Product entity fetches - MISS (first request, L2 empty) + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }}, + // User entity fetches - MISS (first request, L2 empty) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "First request should show all misses") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Accounts should be called once") + + // Request 2: single field greeting(style: "formal") - should hit cache + // The cached entity has both greeting_ and greeting_ + defaultCache.ClearLog() + tracker.Reset() + resp2, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, queryFormal, nil, t) + + expectedFormal := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","greeting":"Good day, Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","greeting":"Good day, Me"}}]}]}}` + assert.Equal(t, expectedFormal, string(resp2), "Single field request should return formal greeting from cache") + + // Cache content after Request 2 (unchanged - entity still has both variants): + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"username":"Me","greeting_1dc2e714f80c47e8":"Good day, Me","greeting_e4956d127c0d173e":"Hey, Me!","__typename":"User"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterSecond := defaultCache.GetLog() + assert.Equal(t, 3, len(logAfterSecond), "Should have 3 cache get operations (all hits)") + + wantLogSecond := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }}, + // Cached entity has both suffixed fields; formal variant found -> HIT + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Single field request should hit cache with entity that has both variants") + + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Accounts should not be called when formal variant exists in cache") + }) + + t.Run("enum argument - miss then hit", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + accountsURLParsed, err := url.Parse(setup.AccountsUpstreamServer.URL) + require.NoError(t, err) + accountsHost := accountsURLParsed.Host + peekCache := func(key string) string { + data, ok := defaultCache.Peek(key) + if !ok { + return "" + } + return string(data) + } + + query := `query EntityFieldArgsCustomGreeting($input: GreetingInput!) { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + customGreeting(input: $input) + } + } + } + }` + + vars := queryVariables{"input": map[string]any{"style": "FORMAL"}} + + // Request 1: customGreeting with enum FORMAL - should miss + defaultCache.ClearLog() + tracker.Reset() + resp1, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, vars, t) + + expectedResp := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","customGreeting":"Good day, Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","customGreeting":"Good day, Me"}}]}]}}` + assert.Equal(t, expectedResp, string(resp1), "First request should return formal customGreeting") + + // Cache content after Request 1: + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"username":"Me","customGreeting_5c96b2bdff7784c6":"Good day, Me","__typename":"User"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + // Root field Query.topProducts - MISS (first request, L2 empty) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}}}, + // Product entity fetches - MISS (first request, L2 empty) + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }}, + // User entity fetches - MISS (first request, L2 empty) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "First request should show all misses") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Accounts should be called once") + + // Request 2: same enum value - should hit cache + defaultCache.ClearLog() + tracker.Reset() + resp2, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, vars, t) + assert.Equal(t, expectedResp, string(resp2), "Second request should return identical response from cache") + + // Cache content after Request 2 (unchanged - all hits): + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"username":"Me","customGreeting_5c96b2bdff7784c6":"Good day, Me","__typename":"User"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterSecond := defaultCache.GetLog() + wantLogSecond := []CacheLogEntry{ + // Root field Query.topProducts - HIT (populated by Request 1) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + // Product entity fetches - HIT (populated by Request 1) + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }}, + // User entity fetches - HIT (customGreeting_ found in cached entity) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Second request should show all cache hits") + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Accounts should not be called on cache hit") + }) + + t.Run("enum argument - different enum values different cache entries", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + accountsURLParsed, err := url.Parse(setup.AccountsUpstreamServer.URL) + require.NoError(t, err) + productsURLParsed, err := url.Parse(setup.ProductsUpstreamServer.URL) + require.NoError(t, err) + accountsHost := accountsURLParsed.Host + productsHost := productsURLParsed.Host + peekCache := func(key string) string { + data, ok := defaultCache.Peek(key) + if !ok { + return "" + } + return string(data) + } + + query := `query EntityFieldArgsCustomGreeting($input: GreetingInput!) { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + customGreeting(input: $input) + } + } + } + }` + + varsFormal := queryVariables{"input": map[string]any{"style": "FORMAL"}} + varsCasual := queryVariables{"input": map[string]any{"style": "CASUAL"}} + + expectedFormal := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","customGreeting":"Good day, Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","customGreeting":"Good day, Me"}}]}]}}` + expectedCasual := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","customGreeting":"Hey, Me!"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","customGreeting":"Hey, Me!"}}]}]}}` + + // Request 1: FORMAL enum + defaultCache.ClearLog() + tracker.Reset() + resp1, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, varsFormal, t) + assert.Equal(t, expectedFormal, string(resp1), "FORMAL should produce formal greeting") + + // Cache content after Request 1: + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"username":"Me","customGreeting_5c96b2bdff7784c6":"Good day, Me","__typename":"User"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + // Root field Query.topProducts - MISS (first request, L2 empty) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}}}, + // Product entity fetches - MISS (first request, L2 empty) + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }}, + // User entity fetches - MISS (first request, L2 empty) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "First request should show all misses") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Accounts should be called once for FORMAL") + + // Request 2: CASUAL enum - different hash, should miss User cache + defaultCache.ClearLog() + tracker.Reset() + resp2, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, varsCasual, t) + assert.Equal(t, expectedCasual, string(resp2), "CASUAL should produce casual greeting, not formal") + + // Cache content after Request 2 (User merged: both FORMAL and CASUAL variants present): + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"username":"Me","customGreeting_5c96b2bdff7784c6":"Good day, Me","__typename":"User","customGreeting_3fe84620597916f8":"Hey, Me!"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterSecond := defaultCache.GetLog() + wantLogSecond := []CacheLogEntry{ + // Root field Query.topProducts - HIT (populated by Request 1) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + // Product entity fetches - HIT (populated by Request 1) + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }}, + // User entity - L2 returns data (HIT) but Loader rejects it (missing casual enum hash) → re-fetch + merge → SET + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Second request: User entity found but missing casual enum variant → re-fetch + re-store") + + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Accounts should be called again for different enum value") + assert.Equal(t, 0, tracker.GetCount(productsHost), "Products should hit cache") + }) + + t.Run("nested input object - changing nested field produces different hash", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + accountsURLParsed, err := url.Parse(setup.AccountsUpstreamServer.URL) + require.NoError(t, err) + accountsHost := accountsURLParsed.Host + peekCache := func(key string) string { + data, ok := defaultCache.Peek(key) + if !ok { + return "" + } + return string(data) + } + + query := `query EntityFieldArgsCustomGreeting($input: GreetingInput!) { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + customGreeting(input: $input) + } + } + } + }` + + varsUppercase := queryVariables{"input": map[string]any{ + "style": "FORMAL", + "formatting": map[string]any{"uppercase": true}, + }} + varsNoUppercase := queryVariables{"input": map[string]any{ + "style": "FORMAL", + "formatting": map[string]any{"uppercase": false}, + }} + + expectedUppercase := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","customGreeting":"GOOD DAY, ME"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","customGreeting":"GOOD DAY, ME"}}]}]}}` + expectedNormal := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","customGreeting":"Good day, Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","customGreeting":"Good day, Me"}}]}]}}` + + // Request 1: uppercase=true + defaultCache.ClearLog() + tracker.Reset() + resp1, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, varsUppercase, t) + assert.Equal(t, expectedUppercase, string(resp1), "uppercase=true should produce uppercased greeting") + + // Cache content after Request 1: + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"username":"Me","customGreeting_f26a2578aca5e6a1":"GOOD DAY, ME","__typename":"User"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + // Root field Query.topProducts - MISS (first request, L2 empty) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}}}, + // Product entity fetches - MISS (first request, L2 empty) + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }}, + // User entity fetches - MISS (first request, L2 empty) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "First request should show all misses") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Accounts should be called once") + + // Request 2: uppercase=false - different nested field value, different hash + defaultCache.ClearLog() + tracker.Reset() + resp2, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, varsNoUppercase, t) + assert.Equal(t, expectedNormal, string(resp2), "uppercase=false should produce normal greeting") + + // Cache content after Request 2 (User merged: both uppercase=true and uppercase=false variants present): + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"username":"Me","customGreeting_f26a2578aca5e6a1":"GOOD DAY, ME","__typename":"User","customGreeting_e5bb1eb0d1896f64":"Good day, Me"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterSecond := defaultCache.GetLog() + wantLogSecond := []CacheLogEntry{ + // Root field Query.topProducts - HIT (populated by Request 1) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + // Product entity fetches - HIT (populated by Request 1) + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }}, + // User entity - L2 returns data (HIT) but Loader rejects it (different nested field hash) → re-fetch + merge → SET + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Second request: User entity found but missing uppercase=false variant → re-fetch + re-store") + + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Accounts should be called again for different nested field value") + }) + + t.Run("nested input object - different nested fields present", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + accountsURLParsed, err := url.Parse(setup.AccountsUpstreamServer.URL) + require.NoError(t, err) + accountsHost := accountsURLParsed.Host + peekCache := func(key string) string { + data, ok := defaultCache.Peek(key) + if !ok { + return "" + } + return string(data) + } + + query := `query EntityFieldArgsCustomGreeting($input: GreetingInput!) { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + customGreeting(input: $input) + } + } + } + }` + + varsUppercase := queryVariables{"input": map[string]any{ + "style": "FORMAL", + "formatting": map[string]any{"uppercase": true}, + }} + varsPrefix := queryVariables{"input": map[string]any{ + "style": "FORMAL", + "formatting": map[string]any{"prefix": "Dr."}, + }} + + expectedUppercase := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","customGreeting":"GOOD DAY, ME"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","customGreeting":"GOOD DAY, ME"}}]}]}}` + expectedPrefix := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","customGreeting":"Dr. Good day, Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","customGreeting":"Dr. Good day, Me"}}]}]}}` + + // Request 1: formatting with uppercase + defaultCache.ClearLog() + tracker.Reset() + resp1, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, varsUppercase, t) + assert.Equal(t, expectedUppercase, string(resp1), "uppercase should produce uppercased greeting") + + // Cache content after Request 1: + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"username":"Me","customGreeting_f26a2578aca5e6a1":"GOOD DAY, ME","__typename":"User"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + // Root field Query.topProducts - MISS (first request, L2 empty) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}}}, + // Product entity fetches - MISS (first request, L2 empty) + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }}, + // User entity fetches - MISS (first request, L2 empty) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "First request should show all misses") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Accounts should be called once") + + // Request 2: formatting with prefix - different fields present, different hash + defaultCache.ClearLog() + tracker.Reset() + resp2, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, varsPrefix, t) + assert.Equal(t, expectedPrefix, string(resp2), "prefix should produce prefixed greeting") + + // Cache content after Request 2 (User merged: both uppercase and prefix variants present): + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"username":"Me","customGreeting_f26a2578aca5e6a1":"GOOD DAY, ME","__typename":"User","customGreeting_cc61634e04b7fbf6":"Dr. Good day, Me"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterSecond := defaultCache.GetLog() + wantLogSecond := []CacheLogEntry{ + // Root field Query.topProducts - HIT (populated by Request 1) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + // Product entity fetches - HIT (populated by Request 1) + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }}, + // User entity - L2 returns data (HIT) but Loader rejects it (different nested fields hash) → re-fetch + merge → SET + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Second request: User entity found but missing prefix variant → re-fetch + re-store") + + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Accounts should be called again for different nested fields") + }) + + t.Run("nested input object - same fields different key order produces same hash", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + accountsURLParsed, err := url.Parse(setup.AccountsUpstreamServer.URL) + require.NoError(t, err) + accountsHost := accountsURLParsed.Host + peekCache := func(key string) string { + data, ok := defaultCache.Peek(key) + if !ok { + return "" + } + return string(data) + } + + query := `query EntityFieldArgsCustomGreeting($input: GreetingInput!) { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + customGreeting(input: $input) + } + } + } + }` + + expectedResp := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","customGreeting":"GOOD DAY, ME"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","customGreeting":"GOOD DAY, ME"}}]}]}}` + + // Request 1: style first, then formatting (raw JSON to preserve key order) + defaultCache.ClearLog() + tracker.Reset() + resp1 := queryWithRawVariables(t, ctx, setup.GatewayServer.URL, + query, + `{"input":{"style":"FORMAL","formatting":{"uppercase":true}}}`) + assert.Equal(t, expectedResp, string(resp1), "Order 1 should produce uppercased greeting") + + // Cache content after Request 1: + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"username":"Me","customGreeting_f26a2578aca5e6a1":"GOOD DAY, ME","__typename":"User"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + // Root field Query.topProducts - MISS (first request, L2 empty) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}}}, + // Product entity fetches - MISS (first request, L2 empty) + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }}, + // User entity fetches - MISS (first request, L2 empty) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "First request should show all misses") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Accounts should be called once for order 1") + + // Request 2: formatting first, then style (same logical input, different JSON key order) + // Raw JSON ensures the key order is preserved as-is (Go's json.Marshal would sort keys) + defaultCache.ClearLog() + tracker.Reset() + resp2 := queryWithRawVariables(t, ctx, setup.GatewayServer.URL, + query, + `{"input":{"formatting":{"uppercase":true},"style":"FORMAL"}}`) + assert.Equal(t, expectedResp, string(resp2), "Order 2 should produce same uppercased greeting") + + // Cache content after Request 2 (unchanged - canonical JSON hashing makes key order irrelevant): + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"username":"Me","customGreeting_f26a2578aca5e6a1":"GOOD DAY, ME","__typename":"User"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterSecond := defaultCache.GetLog() + wantLogSecond := []CacheLogEntry{ + // Root field Query.topProducts - HIT (populated by Request 1) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + // Product entity fetches - HIT (populated by Request 1) + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }}, + // User entity - HIT (canonical JSON hashing makes key order irrelevant) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Second request should show all cache hits (key order canonicalized)") + + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Accounts should NOT be called when same input is sent with different key order") + }) + + t.Run("different args merge enables third request cache hit", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + accountsURLParsed, err := url.Parse(setup.AccountsUpstreamServer.URL) + require.NoError(t, err) + accountsHost := accountsURLParsed.Host + peekCache := func(key string) string { + data, ok := defaultCache.Peek(key) + if !ok { + return "" + } + return string(data) + } + + queryFormal := `query EntityFieldArgsFormal { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + greeting(style: "formal") + } + } + } + }` + + queryCasual := `query EntityFieldArgsCasual { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + greeting(style: "casual") + } + } + } + }` + + // Request 1: greeting(style: "formal") → L2 miss → fetch → store + defaultCache.ClearLog() + tracker.Reset() + resp1, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, queryFormal, nil, t) + + expectedFormal := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","greeting":"Good day, Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","greeting":"Good day, Me"}}]}]}}` + assert.Equal(t, expectedFormal, string(resp1), "Request 1 should return formal greeting") + + // Cache content after Request 1: + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"username":"Me","greeting_1dc2e714f80c47e8":"Good day, Me","__typename":"User"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + // All misses on first request - L2 empty + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}}}, + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }}, + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "Request 1: all misses, populate cache") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Request 1 should call accounts once") + + // Request 2: greeting(style: "casual") → L2 validation fails → fetch → merge-store + defaultCache.ClearLog() + tracker.Reset() + resp2, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, queryCasual, nil, t) + + expectedCasual := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","greeting":"Hey, Me!"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","greeting":"Hey, Me!"}}]}]}}` + assert.Equal(t, expectedCasual, string(resp2), "Request 2 should return casual greeting") + + // Cache content after Request 2 (merged: both formal and casual variants present): + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"username":"Me","greeting_1dc2e714f80c47e8":"Good day, Me","__typename":"User","greeting_e4956d127c0d173e":"Hey, Me!"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterSecond := defaultCache.GetLog() + wantLogSecond := []CacheLogEntry{ + // topProducts and Products - HIT (populated by Request 1) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }}, + // User entity - L2 returns data (HIT) but Loader rejects it (missing casual field) → re-fetch + merge → SET + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Request 2: User entity found but missing casual field → re-fetch + merge") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Request 2 should call accounts once (casual variant missing)") + + // Request 3: greeting(style: "formal") again → L2 HIT (formal variant exists in merged entity) + defaultCache.ClearLog() + tracker.Reset() + resp3, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, queryFormal, nil, t) + assert.Equal(t, expectedFormal, string(resp3), "Request 3 should return formal greeting from cache") + + // Cache content after Request 3 (unchanged - full cache hit, no write): + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"username":"Me","greeting_1dc2e714f80c47e8":"Good day, Me","__typename":"User","greeting_e4956d127c0d173e":"Hey, Me!"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterThird := defaultCache.GetLog() + wantLogThird := []CacheLogEntry{ + // All GETs are hits - no SETs needed + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }}, + // User entity - HIT (formal variant exists in merged entity from Request 2) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogThird), sortCacheLogEntries(logAfterThird), "Request 3: all cache hits, no fetches needed") + + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Request 3 should NOT call accounts (formal variant in merged cache)") + }) + + t.Run("different args merge enables combined alias cache hit", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + accountsURLParsed, err := url.Parse(setup.AccountsUpstreamServer.URL) + require.NoError(t, err) + accountsHost := accountsURLParsed.Host + peekCache := func(key string) string { + data, ok := defaultCache.Peek(key) + if !ok { + return "" + } + return string(data) + } + + queryFormal := `query EntityFieldArgsFormal { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + greeting(style: "formal") + } + } + } + }` + + queryCasual := `query EntityFieldArgsCasual { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + greeting(style: "casual") + } + } + } + }` + + queryBothAliases := `query EntityFieldArgsBothAliases { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + formalGreeting: greeting(style: "formal") + casualGreeting: greeting(style: "casual") + } + } + } + }` + + // Request 1: greeting(style: "formal") → L2 miss → fetch → store + defaultCache.ClearLog() + tracker.Reset() + resp1, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, queryFormal, nil, t) + + expectedFormal := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","greeting":"Good day, Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","greeting":"Good day, Me"}}]}]}}` + assert.Equal(t, expectedFormal, string(resp1), "Request 1 should return formal greeting") + + // Cache content after Request 1: + assert.Equal(t, + `{"username":"Me","greeting_1dc2e714f80c47e8":"Good day, Me","__typename":"User"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + // All misses on first request - L2 empty + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}}}, + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }}, + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "Request 1: all misses, populate cache") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Request 1 should call accounts once") + + // Request 2: greeting(style: "casual") → L2 validation fails → fetch → merge-store + defaultCache.ClearLog() + tracker.Reset() + resp2, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, queryCasual, nil, t) + + expectedCasual := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","greeting":"Hey, Me!"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","greeting":"Hey, Me!"}}]}]}}` + assert.Equal(t, expectedCasual, string(resp2), "Request 2 should return casual greeting") + + // Cache content after Request 2 (merged: both variants present): + assert.Equal(t, + `{"username":"Me","greeting_1dc2e714f80c47e8":"Good day, Me","__typename":"User","greeting_e4956d127c0d173e":"Hey, Me!"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterSecond := defaultCache.GetLog() + wantLogSecond := []CacheLogEntry{ + // topProducts and Products - HIT (populated by Request 1) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }}, + // User entity - L2 returns data (HIT) but Loader rejects it (missing casual field) → re-fetch + merge → SET + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Request 2: User entity found but missing casual field → re-fetch + merge") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Request 2 should call accounts once (casual variant missing)") + + // Request 3: combined alias query with both variants → L2 HIT (both variants exist in merged entity) + defaultCache.ClearLog() + tracker.Reset() + resp3, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, queryBothAliases, nil, t) + + expectedBoth := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","formalGreeting":"Good day, Me","casualGreeting":"Hey, Me!"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","formalGreeting":"Good day, Me","casualGreeting":"Hey, Me!"}}]}]}}` + assert.Equal(t, expectedBoth, string(resp3), "Request 3 should return both greeting variants from cache") + + // Cache content after Request 3 (unchanged - full cache hit, no write): + assert.Equal(t, + `{"username":"Me","greeting_1dc2e714f80c47e8":"Good day, Me","__typename":"User","greeting_e4956d127c0d173e":"Hey, Me!"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterThird := defaultCache.GetLog() + wantLogThird := []CacheLogEntry{ + // All GETs are hits - no SETs needed + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }}, + // User entity - HIT (both variants exist in merged entity) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogThird), sortCacheLogEntries(logAfterThird), "Request 3: all cache hits, both variants served from merged entity") + + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Request 3 should NOT call accounts (both variants in merged cache)") + }) + + t.Run("non-arg fields merge across fetches", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + accountsURLParsed, err := url.Parse(setup.AccountsUpstreamServer.URL) + require.NoError(t, err) + accountsHost := accountsURLParsed.Host + peekCache := func(key string) string { + data, ok := defaultCache.Peek(key) + if !ok { + return "" + } + return string(data) + } + + queryUsernameOnly := `query UsernameOnly { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + } + } + } + }` + + queryUsernameAndNickname := `query UsernameAndNickname { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + nickname + } + } + } + }` + + queryNicknameOnly := `query NicknameOnly { + topProducts { + name + reviews { + body + authorWithoutProvides { + nickname + } + } + } + }` + + // Request 1: username only → L2 miss → fetch → store + defaultCache.ClearLog() + tracker.Reset() + resp1, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, queryUsernameOnly, nil, t) + + expectedUsernameOnly := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}` + assert.Equal(t, expectedUsernameOnly, string(resp1), "Request 1 should return username only") + + // Cache content after Request 1: + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"__typename":"User","id":"1234","username":"Me"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + // All misses on first request - L2 empty + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}}}, + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }}, + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "Request 1: all misses, populate cache") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Request 1 should call accounts once") + + // Request 2: username + nickname → L2 validation fails (missing nickname) → fetch → merge-store + defaultCache.ClearLog() + tracker.Reset() + resp2, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, queryUsernameAndNickname, nil, t) + + expectedUsernameAndNickname := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","nickname":"nick-Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","nickname":"nick-Me"}}]}]}}` + assert.Equal(t, expectedUsernameAndNickname, string(resp2), "Request 2 should return username and nickname") + + // Cache content after Request 2 (merged: both username and nickname present): + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"__typename":"User","id":"1234","username":"Me","nickname":"nick-Me"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterSecond := defaultCache.GetLog() + wantLogSecond := []CacheLogEntry{ + // Root field Query.topProducts - HIT (populated by Request 1) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + // Product entity fetches - HIT (populated by Request 1) + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }}, + // User entity - L2 returns data (HIT) but Loader rejects it (missing nickname) → re-fetch + merge → SET + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Request 2: User entity found but missing nickname → re-fetch + merge") + + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Request 2 should call accounts once (nickname missing)") + + // Request 3: nickname only → L2 HIT (nickname exists in merged entity) + defaultCache.ClearLog() + tracker.Reset() + resp3, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, queryNicknameOnly, nil, t) + + expectedNicknameOnly := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"nickname":"nick-Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"nickname":"nick-Me"}}]}]}}` + assert.Equal(t, expectedNicknameOnly, string(resp3), "Request 3 should return nickname from cache") + + // Cache content after Request 3 (unchanged - full cache hit, no write): + assert.Equal(t, + `{"topProducts":[{"name":"Trilby","__typename":"Product","upc":"top-1"},{"name":"Fedora","__typename":"Product","upc":"top-2"}]}`, + peekCache(`{"__typename":"Query","field":"topProducts"}`)) + assert.Equal(t, + `{"name":"Trilby","__typename":"Product","upc":"top-1","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-1"}}`)) + assert.Equal(t, + `{"name":"Fedora","__typename":"Product","upc":"top-2","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}`, + peekCache(`{"__typename":"Product","key":{"upc":"top-2"}}`)) + assert.Equal(t, + `{"__typename":"User","id":"1234","username":"Me","nickname":"nick-Me"}`, + peekCache(`{"__typename":"User","key":{"id":"1234"}}`)) + + logAfterThird := defaultCache.GetLog() + wantLogThird := []CacheLogEntry{ + // All GETs are hits - no SETs needed + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }}, + // User entity - HIT (nickname exists in merged entity from Request 2) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogThird), sortCacheLogEntries(logAfterThird), "Request 3: all cache hits, nickname served from merged entity") + + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Request 3 should NOT call accounts (nickname in merged cache)") + }) +} diff --git a/execution/engine/federation_caching_ext_invalidation_test.go b/execution/engine/federation_caching_ext_invalidation_test.go new file mode 100644 index 0000000000..d99caa7d7c --- /dev/null +++ b/execution/engine/federation_caching_ext_invalidation_test.go @@ -0,0 +1,825 @@ +package engine_test + +import ( + "context" + "encoding/json" + "maps" + "net/http" + "net/http/httptest" + "strconv" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + accounts "github.com/wundergraph/graphql-go-tools/execution/federationtesting/accounts/graph" + products "github.com/wundergraph/graphql-go-tools/execution/federationtesting/products/graph" + reviews "github.com/wundergraph/graphql-go-tools/execution/federationtesting/reviews/graph" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// TestFederationCaching_ExtensionsInvalidation verifies end-to-end extensions-based cache +// invalidation: a mutation response with cacheInvalidation extensions deletes the L2 entry. +func TestFederationCaching_ExtensionsInvalidation(t *testing.T) { + t.Parallel() + t.Run("mutation with extensions invalidation clears L2 cache", func(t *testing.T) { + t.Parallel() + entityQuery := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + mutationQuery := `mutation { updateUsername(id: "1234", newUsername: "UpdatedMe") { id username } }` + userKey := `{"__typename":"User","key":{"id":"1234"}}` + entityResponseMe := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}` + entityResponseUpdated := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"UpdatedMe"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"UpdatedMe"}}]}]}}` + mutationResponse := `{"data":{"updateUsername":{"id":"1234","username":"UpdatedMe"}}}` + + // Verify that a mutation response with cacheInvalidation extensions + // deletes the corresponding L2 cache entry, forcing a re-fetch. + env := newExtInvalidationEnv(t) + + // Step 1: Query populates L2 cache. + resp := env.queryEntity(entityQuery) + assert.Equal(t, entityResponseMe, resp) + assert.Equal(t, 1, env.accountsCalls(), "first request fetches from accounts") + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: userKey, Hit: false}}}, // L2 empty on first request + {Operation: "set", Items: []CacheLogItem{{Key: userKey, TTL: 30 * time.Second}}}, // populate L2 after fetch + }), env.cacheLog()) + + // Step 2: Same query — L2 hit, no subgraph call. + resp = env.queryEntity(entityQuery) + assert.Equal(t, entityResponseMe, resp) + assert.Equal(t, 0, env.accountsCalls(), "L2 cache hit") + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: userKey, Hit: true}}}, // L2 hit from Step 1 + }), env.cacheLog()) + + // Step 3: Mutation with cacheInvalidation extensions deletes User:1234. + env.onAccountsResponse(func(body []byte) []byte { + assert.Equal(t, mutationResponse, string(body)) + return injectCacheInvalidation(t, body, + `{"keys":[{"typename":"User","key":{"id":"1234"}}]}`) + }) + mutResp := env.mutate(mutationQuery) + assert.Equal(t, mutationResponse, mutResp) + env.clearModifier() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "delete", Items: []CacheLogItem{{Key: userKey}}}, // extensions-based invalidation + }), env.cacheLog()) + + // Step 4: Re-query — L2 miss after invalidation, fetches updated username. + resp = env.queryEntity(entityQuery) + assert.Equal(t, entityResponseUpdated, resp) + assert.Equal(t, 1, env.accountsCalls(), "re-fetched after invalidation") + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: userKey, Hit: false}}}, // L2 miss because Step 3 deleted it + {Operation: "set", Items: []CacheLogItem{{Key: userKey, TTL: 30 * time.Second}}}, // re-populate L2 after re-fetch + }), env.cacheLog()) + }) + + t.Run("invalidation of entity not in cache is a no-op", func(t *testing.T) { + t.Parallel() + entityQuery := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + mutationQuery := `mutation { updateUsername(id: "1234", newUsername: "UpdatedMe") { id username } }` + userKey := `{"__typename":"User","key":{"id":"1234"}}` + entityResponseMe := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}` + mutationResponse := `{"data":{"updateUsername":{"id":"1234","username":"UpdatedMe"}}}` + + // Invalidating a different entity (User:9999) should not affect + // the cached entity (User:1234). + env := newExtInvalidationEnv(t) + + // Populate cache with User:1234. + env.queryEntity(entityQuery) + + // Mutation invalidates User:9999 (never cached). + user9999Key := `{"__typename":"User","key":{"id":"9999"}}` + env.onAccountsResponse(func(body []byte) []byte { + assert.Equal(t, mutationResponse, string(body)) + return injectCacheInvalidation(t, body, + `{"keys":[{"typename":"User","key":{"id":"9999"}}]}`) + }) + mutResp := env.mutate(mutationQuery) + assert.Equal(t, mutationResponse, mutResp) + env.clearModifier() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "delete", Items: []CacheLogItem{{Key: user9999Key}}}, // delete called even though entry doesn't exist + }), env.cacheLog()) + + // User:1234 should still be cached (unaffected by User:9999 invalidation). + resp := env.queryEntity(entityQuery) + assert.Equal(t, entityResponseMe, resp) + assert.Equal(t, 0, env.accountsCalls(), "User:1234 still cached") + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: userKey, Hit: true}}}, // User:1234 still in L2 + }), env.cacheLog()) + }) + + t.Run("multiple entities invalidated in single response", func(t *testing.T) { + t.Parallel() + entityQuery := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + mutationQuery := `mutation { updateUsername(id: "1234", newUsername: "UpdatedMe") { id username } }` + userKey := `{"__typename":"User","key":{"id":"1234"}}` + mutationResponse := `{"data":{"updateUsername":{"id":"1234","username":"UpdatedMe"}}}` + + // A single mutation response can invalidate multiple entities at once. + env := newExtInvalidationEnv(t) + + // Populate cache with User:1234. + env.queryEntity(entityQuery) + + // Mutation invalidates both User:1234 and User:2345 in one response. + env.onAccountsResponse(func(body []byte) []byte { + assert.Equal(t, mutationResponse, string(body)) + return injectCacheInvalidation(t, body, + `{"keys":[{"typename":"User","key":{"id":"1234"}},{"typename":"User","key":{"id":"2345"}}]}`) + }) + env.mutate(mutationQuery) + env.clearModifier() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "delete", Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`}, + {Key: `{"__typename":"User","key":{"id":"2345"}}`}, + }}, // both entities deleted in single batch + }), env.cacheLog()) + + // User:1234 must be re-fetched after invalidation. + env.queryEntity(entityQuery) + assert.Equal(t, 1, env.accountsCalls(), "re-fetched after invalidation") + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: userKey, Hit: false}}}, // L2 miss because mutation deleted it + {Operation: "set", Items: []CacheLogItem{{Key: userKey, TTL: 30 * time.Second}}}, // re-populate L2 + }), env.cacheLog()) + }) + + t.Run("mutation without extensions does not delete", func(t *testing.T) { + t.Parallel() + entityQuery := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + mutationQuery := `mutation { updateUsername(id: "1234", newUsername: "UpdatedMe") { id username } }` + userKey := `{"__typename":"User","key":{"id":"1234"}}` + entityResponseMe := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}` + + // A mutation without cacheInvalidation extensions should not + // trigger any cache deletes — cached data survives. + env := newExtInvalidationEnv(t) + + // Populate cache. + env.queryEntity(entityQuery) + + // Verify cache hit. + resp := env.queryEntity(entityQuery) + assert.Equal(t, entityResponseMe, resp) + assert.Equal(t, 0, env.accountsCalls(), "L2 cache hit") + + // Mutation WITHOUT extensions — no cache operations. + env.mutate(mutationQuery) + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{}), env.cacheLog(), "no cache operations for mutation without extensions") + + // Cache should still be valid. + resp = env.queryEntity(entityQuery) + assert.Equal(t, entityResponseMe, resp) + assert.Equal(t, 0, env.accountsCalls(), "cache still valid") + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: userKey, Hit: true}}}, // L2 still valid + }), env.cacheLog()) + }) + + t.Run("coexistence with detectMutationEntityImpact", func(t *testing.T) { + t.Parallel() + entityQuery := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + mutationQuery := `mutation { updateUsername(id: "1234", newUsername: "UpdatedMe") { id username } }` + userKey := `{"__typename":"User","key":{"id":"1234"}}` + mutationResponse := `{"data":{"updateUsername":{"id":"1234","username":"UpdatedMe"}}}` + + // When BOTH config-based MutationCacheInvalidation AND extensions-based + // invalidation target the same key, the delete should be deduplicated + // to a single cache.Delete() call. + env := newExtInvalidationEnv(t, withMutationCacheInvalidation("updateUsername")) + + // Populate cache. + env.queryEntity(entityQuery) + assert.Equal(t, 1, env.accountsCalls()) + + // Verify cache hit. + env.queryEntity(entityQuery) + assert.Equal(t, 0, env.accountsCalls(), "L2 cache hit") + + // Mutation triggers BOTH mechanisms on User:1234. + env.onAccountsResponse(func(body []byte) []byte { + assert.Equal(t, mutationResponse, string(body)) + return injectCacheInvalidation(t, body, + `{"keys":[{"typename":"User","key":{"id":"1234"}}]}`) + }) + env.mutate(mutationQuery) + env.clearModifier() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "delete", Items: []CacheLogItem{{Key: userKey}}}, // deduplicated: detectMutationEntityImpact fires, extensions-based skipped + }), env.cacheLog(), "single delete despite both mechanisms targeting same key") + + // Cache invalidated — query should re-fetch. + env.queryEntity(entityQuery) + assert.Equal(t, 1, env.accountsCalls(), "re-fetched after combined invalidation") + }) + + t.Run("query response triggers invalidation", func(t *testing.T) { + t.Parallel() + entityQuery := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + userKey := `{"__typename":"User","key":{"id":"1234"}}` + entityResponseMe := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}` + entitiesSubgraphRespMe := `{"data":{"_entities":[{"__typename":"User","username":"Me"}]}}` + + // Cache invalidation via extensions is NOT restricted to mutations. + // A query (e.g. _entities) response can also carry invalidation extensions. + env := newExtInvalidationEnv(t) + + // Step 1: Populate L2 cache. + resp := env.queryEntity(entityQuery) + assert.Equal(t, entityResponseMe, resp) + assert.Equal(t, 1, env.accountsCalls()) + + // Step 2: Verify cache hit. + env.queryEntity(entityQuery) + assert.Equal(t, 0, env.accountsCalls(), "L2 cache hit") + + // Step 3: Manually delete cache entry, then inject invalidation into the + // _entities query response. This proves invalidation works on queries too. + env.deleteFromCache(userKey) + env.onAccountsResponse(func(body []byte) []byte { + assert.Equal(t, entitiesSubgraphRespMe, string(body)) + return injectCacheInvalidation(t, body, + `{"keys":[{"typename":"User","key":{"id":"1234"}}]}`) + }) + + resp = env.queryEntity(entityQuery) + assert.Equal(t, entityResponseMe, resp) + assert.Equal(t, 1, env.accountsCalls(), "re-fetched after manual delete") + env.clearModifier() + + // Extensions-based delete is skipped because updateL2Cache will set the same + // key with fresh data — only get(miss) + set remain. + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: userKey, Hit: false}}}, // L2 miss because we manually deleted it + {Operation: "set", Items: []CacheLogItem{{Key: userKey, TTL: 30 * time.Second}}}, // re-populate L2 (delete skipped: same key about to be set) + }), env.cacheLog()) + }) + + t.Run("with subgraph header prefix", func(t *testing.T) { + t.Parallel() + entityQuery := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + mutationQuery := `mutation { updateUsername(id: "1234", newUsername: "UpdatedMe") { id username } }` + userKey := `{"__typename":"User","key":{"id":"1234"}}` + mutationResponse := `{"data":{"updateUsername":{"id":"1234","username":"UpdatedMe"}}}` + + // When IncludeSubgraphHeaderPrefix is enabled, cache keys include a + // hash prefix (e.g. "55555:"). Invalidation must use the same prefix. + env := newExtInvalidationEnv(t, withHeaderPrefix(55555)) + prefixedKey := `55555:` + userKey + + // Populate cache (keys include header prefix). + env.queryEntity(entityQuery) + assert.Equal(t, 1, env.accountsCalls()) + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: prefixedKey, Hit: false}}}, // L2 miss, prefixed key + {Operation: "set", Items: []CacheLogItem{{Key: prefixedKey, TTL: 30 * time.Second}}}, // populate L2 with prefixed key + }), env.cacheLog()) + + // Verify cache hit. + env.queryEntity(entityQuery) + assert.Equal(t, 0, env.accountsCalls(), "L2 cache hit") + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: prefixedKey, Hit: true}}}, // L2 hit with prefixed key + }), env.cacheLog()) + + // Mutation with extensions invalidation. + env.onAccountsResponse(func(body []byte) []byte { + assert.Equal(t, mutationResponse, string(body)) + return injectCacheInvalidation(t, body, + `{"keys":[{"typename":"User","key":{"id":"1234"}}]}`) + }) + env.mutate(mutationQuery) + env.clearModifier() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "delete", Items: []CacheLogItem{{Key: prefixedKey}}}, // delete key includes header prefix + }), env.cacheLog()) + + // Cache invalidated — re-fetch. + env.queryEntity(entityQuery) + assert.Equal(t, 1, env.accountsCalls(), "re-fetched after invalidation") + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: prefixedKey, Hit: false}}}, // L2 miss after delete + {Operation: "set", Items: []CacheLogItem{{Key: prefixedKey, TTL: 30 * time.Second}}}, // re-populate L2 + }), env.cacheLog()) + }) + + t.Run("with L2CacheKeyInterceptor", func(t *testing.T) { + t.Parallel() + entityQuery := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + mutationQuery := `mutation { updateUsername(id: "1234", newUsername: "UpdatedMe") { id username } }` + userKey := `{"__typename":"User","key":{"id":"1234"}}` + mutationResponse := `{"data":{"updateUsername":{"id":"1234","username":"UpdatedMe"}}}` + + // When an L2CacheKeyInterceptor is configured, cache keys are transformed + // (e.g. "tenant-X:" prefix). Invalidation must use the same transformation. + env := newExtInvalidationEnv(t, withExtInvL2KeyInterceptor( + func(_ context.Context, key string, _ resolve.L2CacheKeyInterceptorInfo) string { + return "tenant-X:" + key + }, + )) + interceptedKey := `tenant-X:` + userKey + + // Populate cache (keys include interceptor prefix). + env.queryEntity(entityQuery) + assert.Equal(t, 1, env.accountsCalls()) + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: interceptedKey, Hit: false}}}, // L2 miss, intercepted key + {Operation: "set", Items: []CacheLogItem{{Key: interceptedKey, TTL: 30 * time.Second}}}, // populate L2 with intercepted key + }), env.cacheLog()) + + // Verify cache hit. + env.queryEntity(entityQuery) + assert.Equal(t, 0, env.accountsCalls(), "L2 cache hit") + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: interceptedKey, Hit: true}}}, // L2 hit with intercepted key + }), env.cacheLog()) + + // Mutation with extensions invalidation. + env.onAccountsResponse(func(body []byte) []byte { + assert.Equal(t, mutationResponse, string(body)) + return injectCacheInvalidation(t, body, + `{"keys":[{"typename":"User","key":{"id":"1234"}}]}`) + }) + env.mutate(mutationQuery) + env.clearModifier() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "delete", Items: []CacheLogItem{{Key: interceptedKey}}}, // delete key includes interceptor prefix + }), env.cacheLog()) + + // Cache invalidated — re-fetch. + env.queryEntity(entityQuery) + assert.Equal(t, 1, env.accountsCalls(), "re-fetched after invalidation") + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: interceptedKey, Hit: false}}}, // L2 miss after delete + {Operation: "set", Items: []CacheLogItem{{Key: interceptedKey, TTL: 30 * time.Second}}}, // re-populate L2 + }), env.cacheLog()) + }) + + // ------------------------------------------------------------------------- + // Error handling: cache invalidation must run even when errors are present. + // ------------------------------------------------------------------------- + + t.Run("error response with invalidation extensions still invalidates cache", func(t *testing.T) { + t.Parallel() + entityQuery := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + mutationQuery := `mutation { updateUsername(id: "1234", newUsername: "UpdatedMe") { id username } }` + userKey := `{"__typename":"User","key":{"id":"1234"}}` + entityResponseMe := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}` + entityResponseUpdated := `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"UpdatedMe"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"UpdatedMe"}}]}]}}` + + // When a mutation returns BOTH errors AND extensions.cacheInvalidation, + // the cache invalidation should still run despite the errors. + env := newExtInvalidationEnv(t) + + // Populate L2 cache. + resp := env.queryEntity(entityQuery) + assert.Equal(t, entityResponseMe, resp) + assert.Equal(t, 1, env.accountsCalls()) + + // Verify cache hit. + resp = env.queryEntity(entityQuery) + assert.Equal(t, entityResponseMe, resp) + assert.Equal(t, 0, env.accountsCalls(), "L2 cache hit") + + // Mutation returns errors alongside cacheInvalidation extensions. + env.onAccountsResponse(func(body []byte) []byte { + return injectErrorsAndCacheInvalidation(t, body, + `[{"message":"partial error"}]`, + `{"keys":[{"typename":"User","key":{"id":"1234"}}]}`) + }) + env.mutate(mutationQuery) + env.clearModifier() + + // Cache should be invalidated despite errors in response. + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "delete", Items: []CacheLogItem{{Key: userKey}}}, // invalidation runs despite errors + }), env.cacheLog()) + + // Re-query — L2 miss after invalidation, re-fetches updated data. + resp = env.queryEntity(entityQuery) + assert.Equal(t, entityResponseUpdated, resp) + assert.Equal(t, 1, env.accountsCalls(), "re-fetched after invalidation") + }) + + // ------------------------------------------------------------------------- + // Analytics: MutationEvent correctness with cache invalidation. + // ------------------------------------------------------------------------- + + t.Run("coexistence with analytics reports correct staleness", func(t *testing.T) { + t.Parallel() + entityQuery := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + mutationQuery := `mutation { updateUsername(id: "1234", newUsername: "UpdatedMe") { id username } }` + userKey := `{"__typename":"User","key":{"id":"1234"}}` + mutationResponse := `{"data":{"updateUsername":{"id":"1234","username":"UpdatedMe"}}}` + + // When both config-based and extensions-based invalidation target the same + // entity, analytics should correctly report the entity was cached and stale. + env := newExtInvalidationEnv(t, + withMutationCacheInvalidation("updateUsername"), + withExtInvAnalytics(), + ) + + // Populate L2 cache with User:1234 (username="Me"). + env.queryEntity(entityQuery) + assert.Equal(t, 1, env.accountsCalls()) + + // Mutation with BOTH mechanisms targeting User:1234. + env.onAccountsResponse(func(body []byte) []byte { + assert.Equal(t, mutationResponse, string(body)) + return injectCacheInvalidation(t, body, + `{"keys":[{"typename":"User","key":{"id":"1234"}}]}`) + }) + mutResp, headers := env.mutateWithHeaders(mutationQuery) + assert.Equal(t, mutationResponse, mutResp) + env.clearModifier() + + // Analytics should still identify the mutation entity, but must not read L2. + snap := normalizeSnapshot(parseCacheAnalytics(t, headers)) + require.Equal(t, 1, len(snap.MutationEvents), "should have exactly 1 mutation impact event") + + event := snap.MutationEvents[0] + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + FieldHashes: []resolve.EntityFieldHash{ + // Hash of "UpdatedMe" (post-mutation username) + {EntityType: "User", FieldName: "username", FieldHash: 16932466035575627600, KeyRaw: `{"id":"1234"}`}, + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 1, UniqueKeys: 1}, // Mutation returned 1 User entity + }, + MutationEvents: []resolve.MutationEvent{ + { + MutationRootField: "updateUsername", + EntityType: "User", + EntityCacheKey: userKey, + HadCachedValue: false, // Mutation analytics must not read L2 + IsStale: false, // No cache read means no stale comparison + CachedHash: event.CachedHash, + FreshHash: event.FreshHash, + CachedBytes: event.CachedBytes, + FreshBytes: event.FreshBytes, + }, + }, + }), snap) + + // Verify dedup still works — single delete despite both mechanisms. + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "delete", Items: []CacheLogItem{{Key: userKey}}}, // config-based delete (extensions-based skipped via dedup) + }), env.cacheLog(), "single delete despite both mechanisms; analytics must not read cache") + }) + + t.Run("analytics without prior cache reports no-cache event", func(t *testing.T) { + t.Parallel() + mutationQuery := `mutation { updateUsername(id: "1234", newUsername: "UpdatedMe") { id username } }` + userKey := `{"__typename":"User","key":{"id":"1234"}}` + mutationResponse := `{"data":{"updateUsername":{"id":"1234","username":"UpdatedMe"}}}` + + // When mutation triggers invalidation but entity was never cached, + // MutationEvent should show HadCachedValue=false, IsStale=false. + env := newExtInvalidationEnv(t, + withMutationCacheInvalidation("updateUsername"), + withExtInvAnalytics(), + ) + + // No prior query — L2 cache is empty. + // Mutation with extensions invalidation targeting User:1234. + env.onAccountsResponse(func(body []byte) []byte { + assert.Equal(t, mutationResponse, string(body)) + return injectCacheInvalidation(t, body, + `{"keys":[{"typename":"User","key":{"id":"1234"}}]}`) + }) + mutResp, headers := env.mutateWithHeaders(mutationQuery) + assert.Equal(t, mutationResponse, mutResp) + env.clearModifier() + + // Analytics should report no cached value. + snap := normalizeSnapshot(parseCacheAnalytics(t, headers)) + require.Equal(t, 1, len(snap.MutationEvents), "should have exactly 1 mutation impact event") + + event := snap.MutationEvents[0] + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + FieldHashes: []resolve.EntityFieldHash{ + // Hash of "UpdatedMe" (post-mutation username) + {EntityType: "User", FieldName: "username", FieldHash: 16932466035575627600, KeyRaw: `{"id":"1234"}`}, + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 1, UniqueKeys: 1}, // Mutation returned 1 User entity + }, + MutationEvents: []resolve.MutationEvent{ + { + MutationRootField: "updateUsername", + EntityType: "User", + EntityCacheKey: userKey, + HadCachedValue: false, // No prior query, L2 cache was empty + IsStale: false, // Cannot be stale without a cached value to compare + FreshHash: event.FreshHash, + FreshBytes: event.FreshBytes, + }, + }, + }), snap) + }) +} + +// injectCacheInvalidation injects a raw JSON cacheInvalidation object into a subgraph +// response's extensions field and returns the modified response body. +func injectCacheInvalidation(t *testing.T, body []byte, cacheInvalidationJSON string) []byte { + t.Helper() + var resp map[string]json.RawMessage + require.NoError(t, json.Unmarshal(body, &resp)) + resp["extensions"] = json.RawMessage(`{"cacheInvalidation":` + cacheInvalidationJSON + `}`) + modified, err := json.Marshal(resp) + require.NoError(t, err) + return modified +} + +// injectErrorsAndCacheInvalidation injects both errors and cacheInvalidation extensions +// into a subgraph response body. Used to test that invalidation runs even when errors are present. +func injectErrorsAndCacheInvalidation(t *testing.T, body []byte, errorsJSON string, cacheInvalidationJSON string) []byte { + t.Helper() + var resp map[string]json.RawMessage + require.NoError(t, json.Unmarshal(body, &resp)) + resp["errors"] = json.RawMessage(errorsJSON) + resp["extensions"] = json.RawMessage(`{"cacheInvalidation":` + cacheInvalidationJSON + `}`) + modified, err := json.Marshal(resp) + require.NoError(t, err) + return modified +} + +// subgraphResponseInterceptor wraps a subgraph HTTP handler and applies a modifier +// function to every response body when set. When modifier is nil, responses pass through. +type subgraphResponseInterceptor struct { + handler http.Handler + mu sync.RWMutex + modifier func(body []byte) []byte +} + +func newSubgraphResponseInterceptor(handler http.Handler) *subgraphResponseInterceptor { + return &subgraphResponseInterceptor{handler: handler} +} + +func (s *subgraphResponseInterceptor) SetModifier(fn func(body []byte) []byte) { + s.mu.Lock() + defer s.mu.Unlock() + s.modifier = fn +} + +func (s *subgraphResponseInterceptor) ClearModifier() { + s.mu.Lock() + defer s.mu.Unlock() + s.modifier = nil +} + +func (s *subgraphResponseInterceptor) ServeHTTP(w http.ResponseWriter, r *http.Request) { + s.mu.RLock() + mod := s.modifier + s.mu.RUnlock() + + if mod == nil { + s.handler.ServeHTTP(w, r) + return + } + + rec := httptest.NewRecorder() + s.handler.ServeHTTP(rec, r) + + modified := mod(rec.Body.Bytes()) + + maps.Copy(w.Header(), rec.Header()) + w.Header().Set("Content-Length", strconv.Itoa(len(modified))) + w.WriteHeader(rec.Code) + _, _ = w.Write(modified) +} + +// newFederationSetupWithInterceptor creates a FederationSetup where the accounts subgraph +// is wrapped with the response interceptor. +func newFederationSetupWithInterceptor( + interceptor *subgraphResponseInterceptor, + gatewayFn func(*federationtesting.FederationSetup) *httptest.Server, +) *federationtesting.FederationSetup { + accountsServer := httptest.NewServer(interceptor) + productsServer := httptest.NewServer(products.GraphQLEndpointHandler(products.TestOptions)) + reviewsServer := httptest.NewServer(reviews.GraphQLEndpointHandler(reviews.TestOptions)) + + setup := &federationtesting.FederationSetup{ + AccountsUpstreamServer: accountsServer, + ProductsUpstreamServer: productsServer, + ReviewsUpstreamServer: reviewsServer, + } + + setup.GatewayServer = gatewayFn(setup) + return setup +} + +// newFederationSetupWithReviewInterceptor creates a FederationSetup where the reviews +// subgraph is wrapped with the response interceptor. +func newFederationSetupWithReviewInterceptor( + interceptor *subgraphResponseInterceptor, + gatewayFn func(*federationtesting.FederationSetup) *httptest.Server, +) *federationtesting.FederationSetup { + accountsServer := httptest.NewServer(accounts.GraphQLEndpointHandler(accounts.TestOptions)) + productsServer := httptest.NewServer(products.GraphQLEndpointHandler(products.TestOptions)) + reviewsServer := httptest.NewServer(interceptor) + + setup := &federationtesting.FederationSetup{ + AccountsUpstreamServer: accountsServer, + ProductsUpstreamServer: productsServer, + ReviewsUpstreamServer: reviewsServer, + } + + setup.GatewayServer = gatewayFn(setup) + return setup +} + +// --------------------------------------------------------------------------- +// extInvalidationEnv — test environment for extensions cache invalidation tests +// --------------------------------------------------------------------------- + +type extInvalidationOption func(*extInvalidationConfig) + +type extInvalidationConfig struct { + mutationCacheInvalidationField string + headerPrefixHash uint64 + useHeaderPrefix bool + l2KeyInterceptor func(ctx context.Context, key string, info resolve.L2CacheKeyInterceptorInfo) string + enableAnalytics bool +} + +// withMutationCacheInvalidation enables the config-based MutationCacheInvalidation +// mechanism for the given mutation field (e.g. "updateUsername"). +func withMutationCacheInvalidation(fieldName string) extInvalidationOption { + return func(c *extInvalidationConfig) { + c.mutationCacheInvalidationField = fieldName + } +} + +// withHeaderPrefix enables IncludeSubgraphHeaderPrefix on the User entity config +// and sets up a mockSubgraphHeadersBuilder with the given hash for "accounts". +func withHeaderPrefix(hash uint64) extInvalidationOption { + return func(c *extInvalidationConfig) { + c.useHeaderPrefix = true + c.headerPrefixHash = hash + } +} + +// withExtInvAnalytics enables cache analytics collection on the gateway, +// allowing tests to assert on MutationEvent and other analytics data. +func withExtInvAnalytics() extInvalidationOption { + return func(c *extInvalidationConfig) { + c.enableAnalytics = true + } +} + +// withL2KeyInterceptor sets an L2CacheKeyInterceptor on the caching options. +func withExtInvL2KeyInterceptor(fn func(ctx context.Context, key string, info resolve.L2CacheKeyInterceptorInfo) string) extInvalidationOption { + return func(c *extInvalidationConfig) { + c.l2KeyInterceptor = fn + } +} + +type extInvalidationEnv struct { + t *testing.T + cache *FakeLoaderCache + tracker *subgraphCallTracker + interceptor *subgraphResponseInterceptor + setup *federationtesting.FederationSetup + gqlClient *GraphqlClient + accountsHost string + ctx context.Context +} + +// newExtInvalidationEnv creates a fully wired test environment for extensions +// cache invalidation E2E tests. All boilerplate (cache, tracker, interceptor, +// federation setup, gateway, cleanup) is handled here. +func newExtInvalidationEnv(t *testing.T, opts ...extInvalidationOption) *extInvalidationEnv { + t.Helper() + + var cfg extInvalidationConfig + for _, opt := range opts { + opt(&cfg) + } + + // Build entity cache config. + entityCfg := plan.EntityCacheConfiguration{ + TypeName: "User", + CacheName: "default", + TTL: 30 * time.Second, + IncludeSubgraphHeaderPrefix: cfg.useHeaderPrefix, + } + + subgraphCfg := engine.SubgraphCachingConfig{ + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{entityCfg}, + } + if cfg.mutationCacheInvalidationField != "" { + subgraphCfg.MutationCacheInvalidation = plan.MutationCacheInvalidationConfigurations{ + {FieldName: cfg.mutationCacheInvalidationField}, + } + } + + cachingOpts := resolve.CachingOptions{EnableL2Cache: true} + if cfg.enableAnalytics { + cachingOpts.EnableCacheAnalytics = true + } + if cfg.l2KeyInterceptor != nil { + cachingOpts.L2CacheKeyInterceptor = cfg.l2KeyInterceptor + } + + cache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": cache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + interceptor := newSubgraphResponseInterceptor(accounts.GraphQLEndpointHandler(accounts.TestOptions)) + + gatewayOpts := []cachingGatewayOptionsToFunc{ + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{subgraphCfg}), + } + if cfg.useHeaderPrefix { + gatewayOpts = append(gatewayOpts, withSubgraphHeadersBuilder(&mockSubgraphHeadersBuilder{ + hashes: map[string]uint64{"accounts": cfg.headerPrefixHash}, + })) + } + + setup := newFederationSetupWithInterceptor(interceptor, addCachingGateway(gatewayOpts...)) + t.Cleanup(setup.Close) + + return &extInvalidationEnv{ + t: t, + cache: cache, + tracker: tracker, + interceptor: interceptor, + setup: setup, + gqlClient: NewGraphqlClient(http.DefaultClient), + accountsHost: mustParseHost(setup.AccountsUpstreamServer.URL), + ctx: t.Context(), + } +} + +// resetCounters resets the subgraph call tracker and clears the cache operation log. +func (e *extInvalidationEnv) resetCounters() { + e.tracker.Reset() + e.cache.ClearLog() +} + +// queryEntity sends an entity query, resets counters first. +func (e *extInvalidationEnv) queryEntity(query string) string { + e.t.Helper() + e.resetCounters() + return string(e.gqlClient.QueryString(e.ctx, e.setup.GatewayServer.URL, query, nil, e.t)) +} + +// mutate sends a mutation, resets counters first. +func (e *extInvalidationEnv) mutate(mutation string) string { + e.t.Helper() + e.resetCounters() + return string(e.gqlClient.QueryString(e.ctx, e.setup.GatewayServer.URL, mutation, nil, e.t)) +} + +// mutateWithHeaders sends a mutation and returns both the response body +// and HTTP headers (for cache analytics inspection). Resets counters first. +func (e *extInvalidationEnv) mutateWithHeaders(mutation string) (string, http.Header) { + e.t.Helper() + e.resetCounters() + resp, headers := e.gqlClient.QueryStringWithHeaders(e.ctx, e.setup.GatewayServer.URL, mutation, nil, e.t) + return string(resp), headers +} + +// onAccountsResponse sets a modifier on the accounts subgraph interceptor. +func (e *extInvalidationEnv) onAccountsResponse(fn func(body []byte) []byte) { + e.interceptor.SetModifier(fn) +} + +// clearModifier removes the interceptor modifier. +func (e *extInvalidationEnv) clearModifier() { + e.interceptor.ClearModifier() +} + +// cacheLog returns the current cache log with keys sorted for deterministic comparison. +func (e *extInvalidationEnv) cacheLog() []CacheLogEntry { + return sortCacheLogEntries(e.cache.GetLog()) +} + +// accountsCalls returns the number of HTTP calls made to the accounts subgraph. +func (e *extInvalidationEnv) accountsCalls() int { + return e.tracker.GetCount(e.accountsHost) +} + +// deleteFromCache manually deletes keys from the L2 cache. +func (e *extInvalidationEnv) deleteFromCache(keys ...string) { + e.t.Helper() + err := e.cache.Delete(e.ctx, keys) + require.NoError(e.t, err) +} diff --git a/execution/engine/federation_caching_helpers_test.go b/execution/engine/federation_caching_helpers_test.go new file mode 100644 index 0000000000..e7d97eeb83 --- /dev/null +++ b/execution/engine/federation_caching_helpers_test.go @@ -0,0 +1,1062 @@ +package engine_test + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "maps" + "net/http" + "net/http/httptest" + "net/url" + "path" + "slices" + "sort" + "strings" + "sync" + "testing" + "time" + + "github.com/cespare/xxhash/v2" + "github.com/jensneuse/abstractlogger" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/astjson" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting/gateway" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// subgraphCallTracker tracks HTTP requests made to subgraph servers +type subgraphCallTracker struct { + mu sync.RWMutex + counts map[string]int // Maps subgraph URL to call count + original http.RoundTripper +} + +func newSubgraphCallTracker(original http.RoundTripper) *subgraphCallTracker { + return &subgraphCallTracker{ + counts: make(map[string]int), + original: original, + } +} + +func (t *subgraphCallTracker) RoundTrip(req *http.Request) (*http.Response, error) { + t.mu.Lock() + host := req.URL.Host + t.counts[host]++ + t.mu.Unlock() + return t.original.RoundTrip(req) +} + +func (t *subgraphCallTracker) GetCount(url string) int { + t.mu.RLock() + defer t.mu.RUnlock() + return t.counts[url] +} + +func (t *subgraphCallTracker) Reset() { + t.mu.Lock() + defer t.mu.Unlock() + t.counts = make(map[string]int) +} + +func (t *subgraphCallTracker) GetCounts() map[string]int { + t.mu.RLock() + defer t.mu.RUnlock() + result := make(map[string]int) + maps.Copy(result, t.counts) + return result +} + +func (t *subgraphCallTracker) DebugPrint() string { + t.mu.RLock() + defer t.mu.RUnlock() + return fmt.Sprintf("%v", t.counts) +} + +// Helper functions for gateway setup with HTTP client support +type cachingGatewayOptions struct { + enableART bool + withLoaderCache map[string]resolve.LoaderCache + httpClient *http.Client + subgraphHeadersBuilder resolve.SubgraphHeadersBuilder + cachingOptions resolve.CachingOptions + subgraphEntityCachingConfigs engine.SubgraphCachingConfigs + debugMode bool + resolverOptionsFns []func(*resolve.ResolverOptions) + remapVariables map[string]string +} + +func withCachingEnableART(enableART bool) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.enableART = enableART + } +} + +func withCachingLoaderCache(loaderCache map[string]resolve.LoaderCache) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.withLoaderCache = loaderCache + } +} + +func withHTTPClient(client *http.Client) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.httpClient = client + } +} + +func withSubgraphHeadersBuilder(builder resolve.SubgraphHeadersBuilder) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.subgraphHeadersBuilder = builder + } +} + +func withCachingOptionsFunc(cachingOpts resolve.CachingOptions) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.cachingOptions = cachingOpts + } +} + +func withSubgraphEntityCachingConfigs(configs engine.SubgraphCachingConfigs) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.subgraphEntityCachingConfigs = configs + } +} + +func withDebugMode(enabled bool) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.debugMode = enabled + } +} + +func withResolverOptions(fn func(*resolve.ResolverOptions)) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.resolverOptionsFns = append(opts.resolverOptionsFns, fn) + } +} + +func withRemapVariables(remap map[string]string) func(*cachingGatewayOptions) { + return func(opts *cachingGatewayOptions) { + opts.remapVariables = remap + } +} + +type cachingGatewayOptionsToFunc func(opts *cachingGatewayOptions) + +func addCachingGateway(options ...cachingGatewayOptionsToFunc) func(setup *federationtesting.FederationSetup) *httptest.Server { + opts := &cachingGatewayOptions{} + for _, option := range options { + option(opts) + } + return func(setup *federationtesting.FederationSetup) *httptest.Server { + httpClient := opts.httpClient + if httpClient == nil { + httpClient = http.DefaultClient + } + + poller := gateway.NewDatasource([]gateway.ServiceConfig{ + {Name: "accounts", URL: setup.AccountsUpstreamServer.URL}, + {Name: "products", URL: setup.ProductsUpstreamServer.URL, WS: strings.ReplaceAll(setup.ProductsUpstreamServer.URL, "http:", "ws:")}, + {Name: "reviews", URL: setup.ReviewsUpstreamServer.URL}, + }, httpClient) + + var gatewayOpts []gateway.GatewayOption + for _, fn := range opts.resolverOptionsFns { + gatewayOpts = append(gatewayOpts, gateway.WithResolverOptions(fn)) + } + if len(opts.remapVariables) > 0 { + gatewayOpts = append(gatewayOpts, gateway.WithRemapVariables(opts.remapVariables)) + } + gtw := gateway.HandlerWithCachingAndOpts(abstractlogger.NoopLogger, poller, httpClient, opts.enableART, opts.withLoaderCache, opts.subgraphHeadersBuilder, opts.cachingOptions, opts.subgraphEntityCachingConfigs, opts.debugMode, gatewayOpts...) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + poller.Run(ctx) + return httptest.NewServer(gtw) + } +} + +func waitForGatewayReady(t *testing.T, gatewayURL string) { + t.Helper() + + require.Eventually(t, func() bool { + resp, err := http.Post(gatewayURL, "application/json", bytes.NewBufferString(`{"query":"query { __typename }"}`)) + if err != nil { + return false + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return false + } + + return resp.StatusCode == http.StatusOK && bytes.Contains(body, []byte(`"__typename":"Query"`)) + }, time.Second, 10*time.Millisecond) +} + +// mockSubgraphHeadersBuilder is a mock implementation of SubgraphHeadersBuilder +type mockSubgraphHeadersBuilder struct { + hashes map[string]uint64 +} + +func (m *mockSubgraphHeadersBuilder) HeadersForSubgraph(subgraphName string) (http.Header, uint64) { + hash := m.hashes[subgraphName] + if hash == 0 { + // Return default hash if not found + return nil, 99999 + } + return nil, hash +} + +func (m *mockSubgraphHeadersBuilder) HashAll() uint64 { + // Return a simple hash of all subgraph hashes combined + var result uint64 + for _, hash := range m.hashes { + result ^= hash + } + return result +} + +// headerForwardingMock implements SubgraphHeadersBuilder with actual HTTP headers. +// Unlike mockSubgraphHeadersBuilder (which returns nil headers + manual hashes), +// this returns real HTTP headers and computes hashes from their content. +type headerForwardingMock struct { + mu sync.RWMutex + headers map[string]http.Header +} + +func (m *headerForwardingMock) HeadersForSubgraph(subgraphName string) (http.Header, uint64) { + m.mu.RLock() + defer m.mu.RUnlock() + h := m.headers[subgraphName] + if h == nil { + return nil, 0 + } + hash := hashHeaders(h) + // Clone to prevent mutation by downstream code (makeHTTPRequest adds Accept, Content-Type, etc.) + clone := h.Clone() + return clone, hash +} + +func (m *headerForwardingMock) HashAll() uint64 { + m.mu.RLock() + defer m.mu.RUnlock() + var result uint64 + for _, h := range m.headers { + result ^= hashHeaders(h) + } + return result +} + +func (m *headerForwardingMock) setAll(h http.Header) { + m.mu.Lock() + defer m.mu.Unlock() + for sg := range m.headers { + m.headers[sg] = h + } +} + +// hashHeaders computes a deterministic hash of HTTP headers using sorted key-value pairs. +func hashHeaders(h http.Header) uint64 { + keys := make([]string, 0, len(h)) + for k := range h { + keys = append(keys, k) + } + sort.Strings(keys) + var buf []byte + for _, k := range keys { + buf = append(buf, k...) + for _, v := range h[k] { + buf = append(buf, v...) + } + } + return xxhash.Sum64(buf) +} + +func cachingTestQueryPath(name string) string { + return path.Join("..", "federationtesting", "testdata", name) +} + +type CacheLogEntry struct { + Operation string + Items []CacheLogItem +} + +// CacheLogItem is one key touched by a cache operation. +// Field meaning depends on Operation: +// - "get": Key + Hit are populated; TTL is unused. +// - "set": Key + TTL are populated; Hit is unused. +// - "delete": only Key is populated. +type CacheLogItem struct { + Key string + Hit bool + TTL time.Duration +} + +type CacheOperation = string + +const ( + CacheOperationGet CacheOperation = "get" + CacheOperationSet CacheOperation = "set" + CacheOperationDelete CacheOperation = "delete" +) + +// sortCacheLogEntries sorts both entries and items within entries. +// Use this when log entry order is non-deterministic. +func sortCacheLogEntries(log []CacheLogEntry) []CacheLogEntry { + sorted := make([]CacheLogEntry, len(log)) + for i, entry := range log { + sorted[i] = CacheLogEntry{ + Operation: entry.Operation, + Items: append([]CacheLogItem(nil), entry.Items...), + } + sort.Slice(sorted[i].Items, func(a, b int) bool { + return sorted[i].Items[a].Key < sorted[i].Items[b].Key + }) + } + sort.Slice(sorted, func(a, b int) bool { + if sorted[a].Operation != sorted[b].Operation { + return sorted[a].Operation < sorted[b].Operation + } + keyA, keyB := "", "" + if len(sorted[a].Items) > 0 { + keyA = sorted[a].Items[0].Key + } + if len(sorted[b].Items) > 0 { + keyB = sorted[b].Items[0].Key + } + return keyA < keyB + }) + return sorted +} + +type cacheEntry struct { + data []byte + expiresAt *time.Time +} + +type FakeLoaderCache struct { + mu sync.RWMutex + storage map[string]cacheEntry + log []CacheLogEntry + waiters []cacheLogWaiter + fakeNow time.Time + now func() time.Time +} + +func NewFakeLoaderCache() *FakeLoaderCache { + return &FakeLoaderCache{ + storage: make(map[string]cacheEntry), + log: make([]CacheLogEntry, 0), + } +} + +type cacheLogWaiter struct { + operation CacheOperation + keys []string + ch chan CacheLogEntry +} + +func (f *FakeLoaderCache) currentTime() time.Time { + if f.now != nil { + return f.now() + } + return time.Now() +} + +func (f *FakeLoaderCache) setCurrentTime(now time.Time) { + f.mu.Lock() + defer f.mu.Unlock() + f.fakeNow = now + f.now = func() time.Time { + return f.fakeNow + } +} + +func (f *FakeLoaderCache) cleanupExpired() { + now := f.currentTime() + for key, entry := range f.storage { + if entry.expiresAt != nil && now.After(*entry.expiresAt) { + delete(f.storage, key) + } + } +} + +func (f *FakeLoaderCache) Get(ctx context.Context, keys []string) ([]*resolve.CacheEntry, error) { + f.mu.Lock() + defer f.mu.Unlock() + + // Clean up expired entries before executing command + f.cleanupExpired() + + items := make([]CacheLogItem, len(keys)) + result := make([]*resolve.CacheEntry, len(keys)) + for i, key := range keys { + items[i].Key = key + if entry, exists := f.storage[key]; exists { + // Make a copy of the data to prevent external modifications + dataCopy := make([]byte, len(entry.data)) + copy(dataCopy, entry.data) + ce := &resolve.CacheEntry{ + Key: key, + Value: dataCopy, + } + // Populate RemainingTTL from expiresAt for cache age analytics + if entry.expiresAt != nil { + remaining := entry.expiresAt.Sub(f.currentTime()) + if remaining > 0 { + ce.RemainingTTL = remaining + } + } + result[i] = ce + items[i].Hit = true + } else { + result[i] = nil + } + } + + // Log the operation + f.log = append(f.log, CacheLogEntry{ + Operation: CacheOperationGet, + Items: items, + }) + f.notifyWaitersLocked(f.log[len(f.log)-1]) + + return result, nil +} + +func (f *FakeLoaderCache) Set(ctx context.Context, entries []*resolve.CacheEntry) error { + if len(entries) == 0 { + return nil + } + + f.mu.Lock() + defer f.mu.Unlock() + + // Clean up expired entries before executing command + f.cleanupExpired() + + items := make([]CacheLogItem, 0, len(entries)) + for _, entry := range entries { + if entry == nil { + continue + } + cacheEntry := cacheEntry{ + // Make a copy of the data to prevent external modifications + data: make([]byte, len(entry.Value)), + } + copy(cacheEntry.data, entry.Value) + + // Non-positive TTLs use the fake cache's no-expiration default. + if entry.TTL > 0 { + expiresAt := f.currentTime().Add(entry.TTL) + cacheEntry.expiresAt = &expiresAt + } + + f.storage[entry.Key] = cacheEntry + items = append(items, CacheLogItem{Key: entry.Key, TTL: entry.TTL}) + } + + // Log the operation + f.log = append(f.log, CacheLogEntry{ + Operation: CacheOperationSet, + Items: items, + }) + f.notifyWaitersLocked(f.log[len(f.log)-1]) + + return nil +} + +func (f *FakeLoaderCache) Delete(ctx context.Context, keys []string) error { + f.mu.Lock() + defer f.mu.Unlock() + + // Clean up expired entries before executing command + f.cleanupExpired() + + for _, key := range keys { + delete(f.storage, key) + } + items := make([]CacheLogItem, len(keys)) + for i, key := range keys { + items[i] = CacheLogItem{Key: key} + } + + // Log the operation + f.log = append(f.log, CacheLogEntry{ + Operation: CacheOperationDelete, + Items: items, + }) + f.notifyWaitersLocked(f.log[len(f.log)-1]) + + return nil +} + +func (f *FakeLoaderCache) WaitForOperation(operation CacheOperation, keys []string) <-chan CacheLogEntry { + f.mu.Lock() + defer f.mu.Unlock() + + ch := make(chan CacheLogEntry, 1) + f.waiters = append(f.waiters, cacheLogWaiter{ + operation: operation, + keys: append([]string(nil), keys...), + ch: ch, + }) + return ch +} + +func (f *FakeLoaderCache) notifyWaitersLocked(entry CacheLogEntry) { + remaining := f.waiters[:0] + for _, waiter := range f.waiters { + keys := make([]string, len(entry.Items)) + for i, item := range entry.Items { + keys[i] = item.Key + } + if waiter.operation == entry.Operation && slices.Equal(waiter.keys, keys) { + waiter.ch <- entry + close(waiter.ch) + continue + } + remaining = append(remaining, waiter) + } + f.waiters = remaining +} + +// GetLog returns a copy of the cache operation log +func (f *FakeLoaderCache) GetLog() []CacheLogEntry { + f.mu.RLock() + defer f.mu.RUnlock() + logCopy := make([]CacheLogEntry, len(f.log)) + copy(logCopy, f.log) + return logCopy +} + +// ClearLog clears the cache operation log +func (f *FakeLoaderCache) ClearLog() { + f.mu.Lock() + defer f.mu.Unlock() + f.log = make([]CacheLogEntry, 0) +} + +// Peek reads a single cache entry without logging. Use for inspecting cache content in tests +// without polluting the operation log. +func (f *FakeLoaderCache) Peek(key string) ([]byte, bool) { + f.mu.RLock() + defer f.mu.RUnlock() + entry, ok := f.storage[key] + if !ok { + return nil, false + } + if entry.expiresAt != nil && f.currentTime().After(*entry.expiresAt) { + return nil, false + } + cp := make([]byte, len(entry.data)) + copy(cp, entry.data) + return cp, true +} + +// TestFakeLoaderCache tests the cache implementation itself +func TestFakeLoaderCache(t *testing.T) { + t.Parallel() + ctx := context.Background() + + t.Run("SetAndGet", func(t *testing.T) { + t.Parallel() + cache := NewFakeLoaderCache() + + err := cache.Set(ctx, []*resolve.CacheEntry{ + {Key: "key1", Value: []byte("value1")}, + {Key: "key2", Value: []byte("value2")}, + {Key: "key3", Value: []byte("value3")}, + }) // No TTL → RemainingTTL stays 0 on Get + require.NoError(t, err) + + // Get all keys in insertion order + result, err := cache.Get(ctx, []string{"key1", "key2", "key3"}) + require.NoError(t, err) + assert.Equal(t, []*resolve.CacheEntry{ + {Key: "key1", Value: []byte("value1")}, + {Key: "key2", Value: []byte("value2")}, + {Key: "key3", Value: []byte("value3")}, + }, result) + + // Get partial keys: mix of existing and missing; missing slots are nil. + result, err = cache.Get(ctx, []string{"key2", "key4", "key1"}) + require.NoError(t, err) + assert.Equal(t, []*resolve.CacheEntry{ + {Key: "key2", Value: []byte("value2")}, + nil, + {Key: "key1", Value: []byte("value1")}, + }, result) + }) + + t.Run("Delete", func(t *testing.T) { + t.Parallel() + cache := NewFakeLoaderCache() + // Set some keys + entries := []*resolve.CacheEntry{ + {Key: "del1", Value: []byte("v1")}, + {Key: "del2", Value: []byte("v2")}, + {Key: "del3", Value: []byte("v3")}, + } + err := cache.Set(ctx, entries) + require.NoError(t, err) + + // Delete some keys + err = cache.Delete(ctx, []string{"del1", "del3"}) + require.NoError(t, err) + + // Check remaining keys + result, err := cache.Get(ctx, []string{"del1", "del2", "del3"}) + require.NoError(t, err) + assert.Nil(t, result[0]) // del1 was deleted + assert.NotNil(t, result[1]) // del2 still exists + assert.Equal(t, "v2", string(result[1].Value)) + assert.Nil(t, result[2]) // del3 was deleted + }) + + t.Run("TTL", func(t *testing.T) { + t.Parallel() + cache := NewFakeLoaderCache() + // Set with 50ms TTL + entries := []*resolve.CacheEntry{ + {Key: "ttl1", Value: []byte("expire1"), TTL: 50 * time.Millisecond}, + {Key: "ttl2", Value: []byte("expire2"), TTL: 50 * time.Millisecond}, + } + err := cache.Set(ctx, entries) + require.NoError(t, err) + + // Immediately get - should exist + result, err := cache.Get(ctx, []string{"ttl1", "ttl2"}) + require.NoError(t, err) + assert.NotNil(t, result[0]) + assert.Equal(t, "expire1", string(result[0].Value)) + assert.NotNil(t, result[1]) + assert.Equal(t, "expire2", string(result[1].Value)) + + // Wait for expiration (TTL-driven, deterministic via Peek) + assert.Eventually(t, func() bool { + _, ok1 := cache.Peek("ttl1") + _, ok2 := cache.Peek("ttl2") + return !ok1 && !ok2 + }, 500*time.Millisecond, 5*time.Millisecond, "ttl should expire") + + // Get again - should be nil + result, err = cache.Get(ctx, []string{"ttl1", "ttl2"}) + require.NoError(t, err) + assert.Nil(t, result[0]) + assert.Nil(t, result[1]) + }) + + t.Run("MixedTTL", func(t *testing.T) { + t.Parallel() + cache := NewFakeLoaderCache() + + err := cache.Set(ctx, []*resolve.CacheEntry{{Key: "perm1", Value: []byte("permanent")}}) + require.NoError(t, err) + + err = cache.Set(ctx, []*resolve.CacheEntry{{Key: "temp1", Value: []byte("temporary"), TTL: 50 * time.Millisecond}}) + require.NoError(t, err) + + // Wait for temporary to expire (TTL-driven, deterministic via Peek) + assert.Eventually(t, func() bool { + _, ok := cache.Peek("temp1") + return !ok + }, 500*time.Millisecond, 5*time.Millisecond, "ttl should expire") + + result, err := cache.Get(ctx, []string{"perm1", "temp1"}) + require.NoError(t, err) + assert.Equal(t, []*resolve.CacheEntry{ + {Key: "perm1", Value: []byte("permanent")}, // No TTL → RemainingTTL stays 0 + nil, // temp1 expired and was cleaned up by Get + }, result) + }) + + t.Run("ThreadSafety", func(t *testing.T) { + t.Parallel() + cache := NewFakeLoaderCache() + // Test concurrent access + done := make(chan bool) + + // Writer goroutine + go func() { + for i := range 100 { + key := fmt.Sprintf("concurrent_%d", i) + value := fmt.Sprintf("value_%d", i) + err := cache.Set(ctx, []*resolve.CacheEntry{{Key: key, Value: []byte(value)}}) + assert.NoError(t, err) + } + done <- true + }() + + // Reader goroutine + go func() { + for i := range 100 { + key := fmt.Sprintf("concurrent_%d", i%50) + _, err := cache.Get(ctx, []string{key}) + assert.NoError(t, err) + } + done <- true + }() + + // Deleter goroutine + go func() { + for i := range 50 { + key := fmt.Sprintf("concurrent_%d", i*2) + err := cache.Delete(ctx, []string{key}) + assert.NoError(t, err) + } + done <- true + }() + + // Wait for all goroutines + <-done + <-done + <-done + }) + + t.Run("WaitForOperation", func(t *testing.T) { + t.Parallel() + cache := NewFakeLoaderCache() + + waitForDelete := cache.WaitForOperation(CacheOperationDelete, []string{"watched-key"}) + + err := cache.Set(ctx, []*resolve.CacheEntry{ + {Key: "watched-key", Value: []byte("value")}, + }) + require.NoError(t, err) + + err = cache.Delete(ctx, []string{"watched-key"}) + require.NoError(t, err) + + select { + case entry, ok := <-waitForDelete: + require.True(t, ok) + assert.Equal(t, CacheLogEntry{ + Operation: CacheOperationDelete, + Items: []CacheLogItem{{Key: "watched-key"}}, + }, entry) + case <-time.After(time.Second): + t.Fatal("timeout waiting for delete notification") + } + }) + + t.Run("ResultLengthMatchesKeysLength", func(t *testing.T) { + t.Parallel() + cache := NewFakeLoaderCache() + + err := cache.Set(ctx, []*resolve.CacheEntry{ + {Key: "exist1", Value: []byte("data1")}, + {Key: "exist3", Value: []byte("data3")}, + }) // No TTL → RemainingTTL stays 0 on Get + require.NoError(t, err) + + // Mix of existing and missing keys: result slots align with keys, missing → nil. + result, err := cache.Get(ctx, []string{"exist1", "missing1", "exist3", "missing2", "missing3"}) + require.NoError(t, err) + assert.Equal(t, []*resolve.CacheEntry{ + {Key: "exist1", Value: []byte("data1")}, + nil, + {Key: "exist3", Value: []byte("data3")}, + nil, + nil, + }, result) + + // All-missing lookup: every slot is nil, length equals input length. + result, err = cache.Get(ctx, []string{"missing4", "missing5", "missing6"}) + require.NoError(t, err) + assert.Equal(t, []*resolve.CacheEntry{nil, nil, nil}, result) + + // Empty input: empty result slice. + result, err = cache.Get(ctx, []string{}) + require.NoError(t, err) + assert.Equal(t, []*resolve.CacheEntry{}, result) + }) +} + +// ============================================================================= +// L1/L2 CACHE END-TO-END TESTS +// ============================================================================= +// +// These tests verify the L1 (per-request in-memory) and L2 (external cross-request) +// caching behavior in a federated GraphQL setup. +// +// L1 Cache: Prevents redundant fetches for the same entity within a single request +// L2 Cache: Shares entity data across requests via external cache (e.g., Redis) +// +// Lookup Order (entity fetches): L1 -> L2 -> Subgraph Fetch +// Lookup Order (root fetches): L2 -> Subgraph Fetch (no L1) + +func parseCacheAnalytics(t *testing.T, headers http.Header) resolve.CacheAnalyticsSnapshot { + t.Helper() + raw := headers.Get("X-Cache-Analytics") + require.NotEmpty(t, raw, "X-Cache-Analytics header should be present") + var snap resolve.CacheAnalyticsSnapshot + err := json.Unmarshal([]byte(raw), &snap) + require.NoError(t, err, "X-Cache-Analytics header should be valid JSON") + return snap +} + +// normalizeSnapshot makes a CacheAnalyticsSnapshot deterministically comparable by +// sorting EntityTypes, L1Reads, L2Reads, L1Writes, L2Writes, and FieldHashes. +func normalizeSnapshot(snap resolve.CacheAnalyticsSnapshot) resolve.CacheAnalyticsSnapshot { + // Sort EntityTypes by TypeName + if snap.EntityTypes != nil { + sorted := make([]resolve.EntityTypeInfo, len(snap.EntityTypes)) + copy(sorted, snap.EntityTypes) + sort.Slice(sorted, func(i, j int) bool { + return sorted[i].TypeName < sorted[j].TypeName + }) + snap.EntityTypes = sorted + } + + // Sort L1Reads and zero out non-deterministic CacheAgeMs + if snap.L1Reads != nil { + sorted := make([]resolve.CacheKeyEvent, len(snap.L1Reads)) + copy(sorted, snap.L1Reads) + for i := range sorted { + sorted[i].CacheAgeMs = 0 + } + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].CacheKey != sorted[j].CacheKey { + return sorted[i].CacheKey < sorted[j].CacheKey + } + return sorted[i].Kind < sorted[j].Kind + }) + snap.L1Reads = sorted + } + + // Sort L2Reads and zero out non-deterministic CacheAgeMs + if snap.L2Reads != nil { + sorted := make([]resolve.CacheKeyEvent, len(snap.L2Reads)) + copy(sorted, snap.L2Reads) + for i := range sorted { + sorted[i].CacheAgeMs = 0 + } + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].CacheKey != sorted[j].CacheKey { + return sorted[i].CacheKey < sorted[j].CacheKey + } + return sorted[i].Kind < sorted[j].Kind + }) + snap.L2Reads = sorted + } + + // Sort L1Writes + if snap.L1Writes != nil { + sorted := make([]resolve.CacheWriteEvent, len(snap.L1Writes)) + copy(sorted, snap.L1Writes) + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].CacheKey != sorted[j].CacheKey { + return sorted[i].CacheKey < sorted[j].CacheKey + } + return sorted[i].CacheLevel < sorted[j].CacheLevel + }) + snap.L1Writes = sorted + } + + // Sort L2Writes + if snap.L2Writes != nil { + sorted := make([]resolve.CacheWriteEvent, len(snap.L2Writes)) + copy(sorted, snap.L2Writes) + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].CacheKey != sorted[j].CacheKey { + return sorted[i].CacheKey < sorted[j].CacheKey + } + return sorted[i].CacheLevel < sorted[j].CacheLevel + }) + snap.L2Writes = sorted + } + + // Sort FieldHashes for deterministic comparison + if snap.FieldHashes != nil { + sorted := make([]resolve.EntityFieldHash, len(snap.FieldHashes)) + copy(sorted, snap.FieldHashes) + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].EntityType != sorted[j].EntityType { + return sorted[i].EntityType < sorted[j].EntityType + } + if sorted[i].FieldName != sorted[j].FieldName { + return sorted[i].FieldName < sorted[j].FieldName + } + if sorted[i].KeyRaw != sorted[j].KeyRaw { + return sorted[i].KeyRaw < sorted[j].KeyRaw + } + if sorted[i].KeyHash != sorted[j].KeyHash { + return sorted[i].KeyHash < sorted[j].KeyHash + } + return sorted[i].FieldHash < sorted[j].FieldHash + }) + snap.FieldHashes = sorted + } + + // Sort ShadowComparisons by CacheKey and zero out non-deterministic CacheAgeMs + if snap.ShadowComparisons != nil { + sorted := make([]resolve.ShadowComparisonEvent, len(snap.ShadowComparisons)) + copy(sorted, snap.ShadowComparisons) + for i := range sorted { + sorted[i].CacheAgeMs = 0 + } + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].CacheKey != sorted[j].CacheKey { + return sorted[i].CacheKey < sorted[j].CacheKey + } + return sorted[i].EntityType < sorted[j].EntityType + }) + snap.ShadowComparisons = sorted + } + + // Sort MutationEvents for deterministic comparison + if snap.MutationEvents != nil { + sorted := make([]resolve.MutationEvent, len(snap.MutationEvents)) + copy(sorted, snap.MutationEvents) + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].MutationRootField != sorted[j].MutationRootField { + return sorted[i].MutationRootField < sorted[j].MutationRootField + } + return sorted[i].EntityCacheKey < sorted[j].EntityCacheKey + }) + snap.MutationEvents = sorted + } + + // Sort HeaderImpactEvents for deterministic comparison + if snap.HeaderImpactEvents != nil { + sorted := make([]resolve.HeaderImpactEvent, len(snap.HeaderImpactEvents)) + copy(sorted, snap.HeaderImpactEvents) + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].BaseKey != sorted[j].BaseKey { + return sorted[i].BaseKey < sorted[j].BaseKey + } + if sorted[i].HeaderHash != sorted[j].HeaderHash { + return sorted[i].HeaderHash < sorted[j].HeaderHash + } + return sorted[i].DataSource < sorted[j].DataSource + }) + snap.HeaderImpactEvents = sorted + } + + // Zero out non-deterministic FetchTimings (DurationMs varies between runs) + // Use normalizeFetchTimings() when you need to assert FetchTimings fields. + snap.FetchTimings = nil + + // Normalize empty slices to nil for consistent comparison + // (JSON unmarshalling produces empty slices, expected literals produce nil) + if len(snap.L1Reads) == 0 { + snap.L1Reads = nil + } + if len(snap.L2Reads) == 0 { + snap.L2Reads = nil + } + if len(snap.L1Writes) == 0 { + snap.L1Writes = nil + } + if len(snap.L2Writes) == 0 { + snap.L2Writes = nil + } + if len(snap.EntityTypes) == 0 { + snap.EntityTypes = nil + } + if len(snap.FieldHashes) == 0 { + snap.FieldHashes = nil + } + if len(snap.ErrorEvents) == 0 { + snap.ErrorEvents = nil + } + if len(snap.ShadowComparisons) == 0 { + snap.ShadowComparisons = nil + } + if len(snap.MutationEvents) == 0 { + snap.MutationEvents = nil + } + if len(snap.HeaderImpactEvents) == 0 { + snap.HeaderImpactEvents = nil + } + + return snap +} + +// normalizeFetchTimings sorts FetchTimings deterministically and zeros DurationMs +// (the only non-deterministic field). Unlike normalizeSnapshot, this preserves +// all other fields (HTTPStatusCode, ResponseBytes, etc.) for assertion. +func normalizeFetchTimings(timings []resolve.FetchTimingEvent) []resolve.FetchTimingEvent { + sorted := make([]resolve.FetchTimingEvent, len(timings)) + copy(sorted, timings) + for i := range sorted { + sorted[i].DurationMs = 0 + } + sort.Slice(sorted, func(i, j int) bool { + if sorted[i].DataSource != sorted[j].DataSource { + return sorted[i].DataSource < sorted[j].DataSource + } + return sorted[i].Source < sorted[j].Source + }) + return sorted +} + +func mustParseHost(rawURL string) string { + parsed, err := url.Parse(rawURL) + if err != nil { + panic(fmt.Sprintf("failed to parse URL %q: %v", rawURL, err)) + } + return parsed.Host +} + +// typenameStrippingTransport is an HTTP transport that removes all "__typename" fields +// from JSON responses originating from targetHost. This simulates a non-compliant +// subgraph that omits __typename from entity representations. +type typenameStrippingTransport struct { + inner http.RoundTripper + targetHost string +} + +func (t *typenameStrippingTransport) RoundTrip(req *http.Request) (*http.Response, error) { + resp, err := t.inner.RoundTrip(req) + if err != nil || req.URL.Host != t.targetHost { + return resp, err + } + + body, err := io.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return resp, err + } + + // Parse, remove all __typename fields, re-serialize + v, err := astjson.ParseBytes(body) + if err != nil { + resp.Body = io.NopCloser(bytes.NewReader(body)) + return resp, nil + } + removeTypeNames(v) + stripped := v.MarshalTo(nil) + + resp.Body = io.NopCloser(bytes.NewReader(stripped)) + resp.ContentLength = int64(len(stripped)) + return resp, nil +} + +// removeTypeNames recursively deletes all "__typename" keys from a JSON value tree. +func removeTypeNames(v *astjson.Value) { + if v == nil { + return + } + switch v.Type() { + case astjson.TypeObject: + v.Del("__typename") + obj := v.GetObject() + obj.Visit(func(key []byte, val *astjson.Value) { + removeTypeNames(val) + }) + case astjson.TypeArray: + for _, item := range v.GetArray() { + removeTypeNames(item) + } + } +} diff --git a/execution/engine/federation_caching_l1_test.go b/execution/engine/federation_caching_l1_test.go new file mode 100644 index 0000000000..fca204f5f5 --- /dev/null +++ b/execution/engine/federation_caching_l1_test.go @@ -0,0 +1,1762 @@ +package engine_test + +import ( + "context" + "net/http" + "net/url" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// TestL1CacheReducesHTTPCalls verifies L1 cache behavior with nested entity fetches. +// L1 only works for entity fetches (not root queries), so self-referential paths benefit. +func TestL1CacheReducesHTTPCalls(t *testing.T) { + t.Parallel() + // This test demonstrates L1 cache behavior with entity fetches. + // + // Query structure: + // - me: root query to accounts service → returns User 1234 {id, username} + // - me.reviews: entity fetch from reviews service → returns reviews + // - me.reviews.product: entity fetch from products service → returns products + // - me.reviews.product.reviews: entity fetch from reviews service → returns reviews + // - me.reviews.product.reviews.authorWithoutProvides: entity fetch from accounts → returns User 1234 + // + // Note: The `me` root query does NOT populate L1 cache because L1 cache only works + // for entity fetches (RequiresEntityFetch=true). Root queries don't qualify. + // + // With L1 enabled: Both `me` (root) and `authorWithoutProvides` (entity) make calls. + // L1 cache doesn't help here because `me` is a root query, not an entity fetch. + // With L1 disabled: Same behavior - 2 accounts calls. + // + // L1 cache DOES help when the same entity is fetched multiple times through + // entity fetches within a single request (e.g., self-referential entities). + + query := `query { + me { + id + username + reviews { + body + product { + upc + reviews { + authorWithoutProvides { + id + username + } + } + } + } + } + }` + + expectedResponse := `{"data":{"me":{"id":"1234","username":"Me","reviews":[{"body":"A highly effective form of birth control.","product":{"upc":"top-1","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","product":{"upc":"top-2","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}}]}}}` + + t.Run("L1 enabled - entity fetches use L1 cache", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Both `me` (root query) and `authorWithoutProvides` (entity fetch) call accounts. + // L1 cache doesn't help because `me` is a root query, not an entity fetch. + // Root queries don't populate L1 cache (RequiresEntityFetch=false). + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, + "Both me (root query) and authorWithoutProvides (entity fetch) call accounts") + }) + + t.Run("L1 disabled - more accounts calls without cache", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // KEY ASSERTION: With L1 disabled, 2 accounts calls! + // The authorWithoutProvides.username requires another fetch since L1 is disabled. + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 2, accountsCalls, + "With L1 disabled, should make 2 accounts calls (no cache reuse)") + }) +} + +// TestL1CacheFieldAccumulationWithAliases verifies that L1 cache accumulates fields +// across entity fetches with different aliases and that alias normalization allows +// a later fetch to reuse a field stored by an earlier fetch under a different alias. +// +// Query: +// +// { +// me { +// id +// reviews { +// authorWithoutProvides { +// myName: username ← entity fetch A: stores "username" in L1 (normalized from alias "myName") +// } +// product { +// reviews { +// authorWithoutProvides { +// username ← entity fetch B: should L1 HIT (schema name "username" already stored) +// } +// } +// } +// } +// } +// } +func TestL1CacheFieldAccumulationWithAliases(t *testing.T) { + t.Parallel() + + t.Run("alias then no alias - sameUserReviewers L1 reuse", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // Root `me` fetch returns User 1234 with alias "myName: username". + // sameUserReviewers returns the same User — the entity fetch needs "username" + // (no alias). L1 stores the normalized schema name "username" from the + // first entity fetch; the second fetch should find it via denormalize passthrough. + query := `query { + me { + id + myName: username + sameUserReviewers { + id + username + } + } + }` + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, `{"data":{"me":{"id":"1234","myName":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}}`, string(out), "aliased field should render as myName and sameUserReviewers should have unaliased username") + + // With L1 enabled, the sameUserReviewers entity fetch for User 1234 + // should hit L1 (populated by the root me fetch's entity). + // 1 accounts call = root me only, sameUserReviewers skipped via L1. + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, + "L1 should skip sameUserReviewers accounts call (alias normalized username in L1)") + }) + + t.Run("L1 disabled - alias variant needs separate fetch", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + query := `query { + me { + id + myName: username + sameUserReviewers { + id + username + } + } + }` + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, `{"data":{"me":{"id":"1234","myName":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}}`, string(out)) + + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 2, accountsCalls, + "Without L1, sameUserReviewers needs its own accounts call") + }) +} + +// TestL1CacheThreeFetchFieldAccumulation verifies that L1 field accumulation works +// across 3 entity fetches for the same entity, where a field from fetch 1 survives +// fetch 2's merge (which has different fields) and is available for fetch 3. +// +// Fetch sequence for User 1234: +// 1. accounts entity fetch for authorWithoutProvides: ProvidesData = {username} +// → L1 MISS, stores {username, id, __typename} in L1 +// 2. accounts entity fetch for authorWithoutProvides.realName path: ProvidesData = {realName} +// → L1 widening miss (no realName), fetches, merges {realName} into L1 +// → L1 now has {username, realName, id, __typename} +// 3. accounts entity fetch for sameUserReviewers: ProvidesData = {username} +// → L1 HIT (username survived fetch 2's merge) → skips accounts call +func TestL1CacheThreeFetchFieldAccumulation(t *testing.T) { + t.Parallel() + + query := `query { + me { + id + username + reviews { + authorWithoutProvides { + username + realName + sameUserReviewers { + id + username + } + } + } + } + }` + + t.Run("L1 enabled - field accumulation skips redundant fetches", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, `{"data":{"me":{"id":"1234","username":"Me","reviews":[{"authorWithoutProvides":{"username":"Me","realName":"User Usington","sameUserReviewers":[{"id":"1234","username":"Me"}]}},{"authorWithoutProvides":{"username":"Me","realName":"User Usington","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}}}`, string(out)) + + // Without L1: 3 accounts calls (root me + entity authorWithoutProvides + entity sameUserReviewers). + // With L1: 1 accounts call. The planner merges root me with the first entity fetch. + // sameUserReviewers entity fetch hits L1 because "username" was accumulated + // from the first entity fetch and survived the realName merge. + assert.Equal(t, 1, tracker.GetCount(accountsHost), + "L1 field accumulation: sameUserReviewers should reuse username from L1 (was 3 without L1)") + }) + + t.Run("L1 disabled - no field accumulation, all fetches hit subgraph", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, `{"data":{"me":{"id":"1234","username":"Me","reviews":[{"authorWithoutProvides":{"username":"Me","realName":"User Usington","sameUserReviewers":[{"id":"1234","username":"Me"}]}},{"authorWithoutProvides":{"username":"Me","realName":"User Usington","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}}}`, string(out)) + + assert.Equal(t, 3, tracker.GetCount(accountsHost), + "Without L1: 3 separate accounts calls (root me + authorWithoutProvides + sameUserReviewers)") + }) +} + +// TestL1CacheReducesHTTPCallsInterface verifies L1 cache works with interface types, +// deduplicating entity fetches for the same entity accessed through different interface fields. +func TestL1CacheReducesHTTPCallsInterface(t *testing.T) { + t.Parallel() + // This test demonstrates L1 cache behavior with interface return types. + // + // Query structure: + // - meInterface: root query to accounts service → returns User 1234 via Identifiable interface + // - meInterface.reviews: entity fetch from reviews service → returns reviews + // - meInterface.reviews.product: entity fetch from products service → returns products + // - meInterface.reviews.product.reviews: entity fetch from reviews service → returns reviews + // - meInterface.reviews.product.reviews.authorWithoutProvides: entity fetch from accounts → returns User 1234 + // + // This tests that interface return types properly build cache key templates + // for all entity types that implement the interface. + + query := `query { + meInterface { + ... on User { + id + username + reviews { + body + product { + upc + reviews { + authorWithoutProvides { + id + username + } + } + } + } + } + } + }` + + expectedResponse := `{"data":{"meInterface":{"id":"1234","username":"Me","reviews":[{"body":"A highly effective form of birth control.","product":{"upc":"top-1","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","product":{"upc":"top-2","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}}]}}}` + + t.Run("L1 enabled - interface entity fetches use L1 cache", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Same behavior as non-interface: root query + entity fetch both call accounts + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, + "Interface field should behave same as object field for L1 caching") + }) + + t.Run("L1 disabled - more accounts calls without cache", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // KEY ASSERTION: With L1 disabled, 2 accounts calls! + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 2, accountsCalls, + "With L1 disabled, should make 2 accounts calls (no cache reuse)") + }) +} + +// TestL1CacheReducesHTTPCallsUnion verifies L1 cache works with union types, +// deduplicating entity fetches for the same entity accessed through different union members. +func TestL1CacheReducesHTTPCallsUnion(t *testing.T) { + t.Parallel() + // This test demonstrates L1 cache behavior with union return types. + // + // Query structure: + // - meUnion: root query to accounts service → returns User 1234 via MeUnion union + // - meUnion.reviews: entity fetch from reviews service → returns reviews + // - meUnion.reviews.product: entity fetch from products service → returns products + // - meUnion.reviews.product.reviews: entity fetch from reviews service → returns reviews + // - meUnion.reviews.product.reviews.authorWithoutProvides: entity fetch from accounts → returns User 1234 + // + // This tests that union return types properly build cache key templates + // for all entity types that are members of the union. + + query := `query { + meUnion { + ... on User { + id + username + reviews { + body + product { + upc + reviews { + authorWithoutProvides { + id + username + } + } + } + } + } + } + }` + + expectedResponse := `{"data":{"meUnion":{"id":"1234","username":"Me","reviews":[{"body":"A highly effective form of birth control.","product":{"upc":"top-1","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","product":{"upc":"top-2","reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me"}}]}}]}}}` + + t.Run("L1 enabled - union entity fetches use L1 cache", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Same behavior as non-union: root query + entity fetch both call accounts + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, + "Union field should behave same as object field for L1 caching") + }) + + t.Run("L1 disabled - more accounts calls without cache", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // KEY ASSERTION: With L1 disabled, 2 accounts calls! + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 2, accountsCalls, + "With L1 disabled, should make 2 accounts calls (no cache reuse)") + }) +} + +// TestL1CacheSelfReferentialEntity verifies that L1 cache handles self-referential entities +// (e.g. User.friends returns User) without stack overflow via shallow copy. +func TestL1CacheSelfReferentialEntity(t *testing.T) { + t.Parallel() + // This test verifies that self-referential entities don't cause + // stack overflow when L1 cache is enabled. + // + // Background: When an entity type has a field that returns the same type + // (e.g., User.sameUserReviewers returning [User]), and L1 cache stores + // a pointer to the entity, both key.Item and key.FromCache can point to + // the same memory location. Without a fix, calling MergeValues(ptr, ptr) + // causes infinite recursion and stack overflow. + // + // The sameUserReviewers field has @requires(fields: "username") which forces + // sequential execution: the User entity is first fetched from accounts + // (populating L1), then sameUserReviewers is resolved, returning the same + // User entity that's already in L1 cache. + + query := `query { + topProducts { + reviews { + authorWithoutProvides { + id + username + sameUserReviewers { + id + username + } + } + } + } + }` + + // This response shows User 1234 appearing both at authorWithoutProvides level + // and inside sameUserReviewers (which returns the same user for testing) + expectedResponse := `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}` + + t.Run("self-referential entity should not cause stack overflow", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // This should complete without stack overflow + // Before the fix, this would crash with "fatal error: stack overflow" + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + }) +} + +// TestL1CacheChildFieldEntityList verifies that L1 cache correctly deduplicates +// entities in list fields (e.g. reviews[].author where multiple reviews have the same author). +func TestL1CacheChildFieldEntityList(t *testing.T) { + t.Parallel() + // This test verifies L1 cache behavior for User.sameUserReviewers: [User!]! + // which returns only the same user (self-reference). + // + // sameUserReviewers is defined in the reviews subgraph with @requires(fields: "username"), + // which means: + // 1. The gateway first resolves username from accounts (entity fetch) + // 2. Then calls reviews to get sameUserReviewers + // 3. sameUserReviewers returns User references (just IDs) - only the same user + // 4. The gateway must make entity fetches to accounts to resolve those users + // + // Query flow: + // 1. topProducts -> products subgraph (root query) + // 2. reviews -> reviews subgraph (entity fetch for Products) + // 3. authorWithoutProvides -> accounts subgraph (entity fetch for User 1234) + // - User 1234 is fetched and stored in L1 + // 4. sameUserReviewers -> reviews subgraph (after username resolved) + // - Returns [User 1234] as reference (same user only) + // 5. Entity resolution for sameUserReviewers -> accounts subgraph + // - User 1234 is 100% L1 HIT (already fetched in step 3) + // - THE ENTIRE ACCOUNTS CALL IS SKIPPED! + // + // With L1 enabled: The sameUserReviewers entity fetch is completely skipped + // because all entities are already in L1 cache. + + query := `query { + topProducts { + reviews { + authorWithoutProvides { + id + username + sameUserReviewers { + id + username + } + } + } + } + }` + + // User 1234's sameUserReviewers returns [User 1234] (only self) + expectedResponse := `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}` + + t.Run("L1 enabled - sameUserReviewers fetch entirely skipped via L1 cache", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, // Isolate L1 behavior + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // With L1 enabled: + // - First accounts call fetches User 1234 for authorWithoutProvides (L1 miss, stored) + // - Reviews called for sameUserReviewers (returns [User 1234] reference) + // - sameUserReviewers entity resolution: User 1234 is 100% L1 HIT + // → accounts call is COMPLETELY SKIPPED! + accountsCalls := tracker.GetCount(accountsHost) + reviewsCalls := tracker.GetCount(reviewsHost) + + // Reviews should be called twice: once for Product entity (reviews field), + // once for sameUserReviewers (after username is resolved from accounts) + assert.Equal(t, 2, reviewsCalls, "Reviews subgraph called for Product.reviews and User.sameUserReviewers") + + // KEY ASSERTION: Only 1 accounts call! The sameUserReviewers entity resolution + // is completely skipped because User 1234 is already in L1 cache. + assert.Equal(t, 1, accountsCalls, + "With L1 enabled: only 1 accounts call (sameUserReviewers entity fetch skipped via L1)") + + }) + + t.Run("L1 disabled - accounts called for sameUserReviewers", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // With L1 disabled: + // - First accounts call fetches User 1234 for authorWithoutProvides + // - Second accounts call for sameUserReviewers: User 1234 fetched again (no L1) + // Total: 2 accounts calls + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 2, accountsCalls, + "With L1 disabled: 2 accounts calls (sameUserReviewers requires separate fetch)") + + }) +} + +// TestL1CacheNestedEntityListDeduplication verifies that L1 cache deduplicates entities +// across nested lists (e.g. products[].reviews[].author with overlapping authors). +func TestL1CacheNestedEntityListDeduplication(t *testing.T) { + t.Parallel() + // This test verifies L1 deduplication when the same entity appears + // at multiple levels in nested list queries using coReviewers. + // + // coReviewers is defined in the reviews subgraph with @requires(fields: "username"), + // so it triggers cross-subgraph entity resolution. + // + // Query flow: + // 1. topProducts -> products subgraph + // 2. reviews -> reviews subgraph (Product entity fetch) + // 3. authorWithoutProvides -> accounts (User 1234 fetched, stored in L1) + // 4. coReviewers -> reviews subgraph (after username resolved) + // - Returns [User 1234, User 7777] as references + // 5. Entity resolution for coReviewers -> accounts + // - User 1234 should be L1 HIT (already fetched in step 3) + // - User 7777 is L1 MISS (stored in L1) + // 6. coReviewers for User 1234 and User 7777 -> reviews subgraph + // 7. Entity resolution for nested coReviewers -> accounts + // - All users (1234, 7777) are already in L1! + // + // With L1 enabled: The nested coReviewers level should have 100% L1 hits, + // potentially skipping the accounts call entirely for that level. + + query := `query { + topProducts { + reviews { + authorWithoutProvides { + id + username + coReviewers { + id + username + coReviewers { + id + username + } + } + } + } + } + }` + + // User 1234's coReviewers: [User 1234, User 7777] + // User 7777's coReviewers: [User 7777, User 1234] + // Nested level repeats these patterns + expectedResponse := `{"data":{"topProducts":[{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","coReviewers":[{"id":"1234","username":"Me","coReviewers":[{"id":"1234","username":"Me"},{"id":"7777","username":"User 7777"}]},{"id":"7777","username":"User 7777","coReviewers":[{"id":"7777","username":"User 7777"},{"id":"1234","username":"Me"}]}]}}]},{"reviews":[{"authorWithoutProvides":{"id":"1234","username":"Me","coReviewers":[{"id":"1234","username":"Me","coReviewers":[{"id":"1234","username":"Me"},{"id":"7777","username":"User 7777"}]},{"id":"7777","username":"User 7777","coReviewers":[{"id":"7777","username":"User 7777"},{"id":"1234","username":"Me"}]}]}}]}]}}` + + t.Run("L1 enabled - nested coReviewers benefits from L1 hits", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // With L1 enabled: + // - Call 1: authorWithoutProvides fetches User 1234 (miss, stored) + // - Call 2: coReviewers entity resolution [User 1234 (hit), User 7777 (miss, stored)] + // - Call 3: nested coReviewers entity resolution - all users are in L1! + // This call should be fully served from L1 cache. + accountsCalls := tracker.GetCount(accountsHost) + // With L1 enabled, the nested coReviewers should be served from L1 + // Only 2 accounts calls needed because nested coReviewers is fully served from L1 + assert.Equal(t, 2, accountsCalls, + "With L1 enabled: exactly 2 accounts calls (nested coReviewers served entirely from L1)") + }) + + t.Run("L1 disabled - more accounts calls without deduplication", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // With L1 disabled: + // - Call 1: authorWithoutProvides fetches User 1234 + // - Call 2: coReviewers entity resolution for User 1234 and User 7777 (no L1 dedup) + // - Call 3: nested coReviewers entity resolution (no L1 dedup) + accountsCalls := tracker.GetCount(accountsHost) + // Without L1 cache, we need 3 accounts calls (no deduplication at nested level) + assert.Equal(t, 3, accountsCalls, + "With L1 disabled: exactly 3 accounts calls (no deduplication)") + }) +} + +// TestL1CacheRootFieldEntityListPopulation verifies that root fields returning entity lists +// populate L1 cache, allowing subsequent entity fetches to skip subgraph calls. +func TestL1CacheRootFieldEntityListPopulation(t *testing.T) { + t.Parallel() + // This test verifies L1 cache behavior with a complex nested query starting + // from a root field that returns a list of entities. + // + // Query flow: + // 1. topProducts -> products subgraph (root query, returns list) + // 2. reviews -> reviews subgraph (entity fetch for Products) + // 3. authorWithoutProvides -> accounts subgraph (entity fetch for User 1234) + // - User 1234 is fetched and stored in L1 + // 4. sameUserReviewers -> reviews subgraph (after username resolved) + // - Returns [User 1234] as reference (same user only) + // 5. Entity resolution for sameUserReviewers -> accounts subgraph + // - User 1234 is 100% L1 HIT (already fetched in step 3) + // - THE ENTIRE ACCOUNTS CALL IS SKIPPED! + // + // With L1 enabled: The sameUserReviewers entity fetch is completely skipped. + // With L1 disabled: accounts is called twice (no deduplication). + + query := `query { + topProducts { + upc + name + reviews { + body + authorWithoutProvides { + id + username + sameUserReviewers { + id + username + } + } + } + } + }` + + expectedResponse := `{"data":{"topProducts":[{"upc":"top-1","name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]},{"upc":"top-2","name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}]}]}}` + + t.Run("L1 enabled - sameUserReviewers fetch skipped via L1 cache", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Query flow with L1 enabled: + // 1. products subgraph: topProducts root query + // 2. reviews subgraph: Product entity fetch for reviews + // 3. accounts subgraph: User entity fetch for authorWithoutProvides (User 1234 stored in L1) + // 4. reviews subgraph: sameUserReviewers (returns [User 1234]) + // 5. sameUserReviewers entity resolution: User 1234 is 100% L1 HIT → accounts call SKIPPED! + productsCalls := tracker.GetCount(productsHost) + reviewsCalls := tracker.GetCount(reviewsHost) + accountsCalls := tracker.GetCount(accountsHost) + + assert.Equal(t, 1, productsCalls, "Should call products subgraph once for topProducts") + assert.Equal(t, 2, reviewsCalls, "Should call reviews subgraph twice (Product.reviews + User.sameUserReviewers)") + // KEY ASSERTION: Only 1 accounts call! sameUserReviewers entity resolution skipped via L1. + assert.Equal(t, 1, accountsCalls, + "With L1 enabled: only 1 accounts call (sameUserReviewers entity fetch skipped via L1)") + + }) + + t.Run("L1 disabled - more accounts calls without L1 optimization", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Query flow with L1 disabled: + // 1. products subgraph: topProducts root query + // 2. reviews subgraph: Product entity fetch for reviews + // 3. accounts subgraph: User entity fetch for authorWithoutProvides + // 4. reviews subgraph: sameUserReviewers + // 5. accounts subgraph: User entity fetch for sameUserReviewers (no L1 → must fetch again!) + productsCalls := tracker.GetCount(productsHost) + reviewsCalls := tracker.GetCount(reviewsHost) + accountsCalls := tracker.GetCount(accountsHost) + + assert.Equal(t, 1, productsCalls, "Should call products subgraph once") + assert.Equal(t, 2, reviewsCalls, "Should call reviews subgraph twice") + // KEY ASSERTION: 2 accounts calls without L1 optimization + assert.Equal(t, 2, accountsCalls, + "With L1 disabled: 2 accounts calls (sameUserReviewers requires separate fetch)") + + }) +} + +// TestL1CacheRootFieldNonEntityWithNestedEntities verifies that root fields returning +// non-entity objects with nested entity lists still populate L1 for those nested entities. +func TestL1CacheRootFieldNonEntityWithNestedEntities(t *testing.T) { + t.Parallel() + // This test verifies L1 cache behavior when a root field returns a NON-entity type + // (Review) that contains nested entities (User via authorWithoutProvides). + // + // Key difference from TestL1CacheRootFieldEntityListPopulation: + // - That test starts with topProducts -> [Product] where Product IS an entity (@key(fields: "upc")) + // - This test starts with topReviews -> [Review] where Review is NOT an entity (no @key) + // - Both prove L1 entity caching works for nested User entities + // + // Query flow: + // 1. topReviews -> reviews subgraph (root query, returns [Review] — NOT an entity) + // 2. authorWithoutProvides -> accounts subgraph (entity fetch for Users, stored in L1) + // 3. sameUserReviewers -> reviews subgraph (after username resolved via @requires) + // 4. Entity resolution for sameUserReviewers -> accounts subgraph + // - All Users are 100% L1 HITs (already fetched in step 2) + // - THE ENTIRE ACCOUNTS CALL IS SKIPPED! + + query := `query { + topReviews { + body + authorWithoutProvides { + id + username + sameUserReviewers { + id + username + } + } + } + }` + + expectedResponse := `{"data":{"topReviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}},{"body":"This is the last straw. Hat you will wear. 11/10","authorWithoutProvides":{"id":"7777","username":"User 7777","sameUserReviewers":[{"id":"7777","username":"User 7777"}]}},{"body":"Perfect summer hat.","authorWithoutProvides":{"id":"5678","username":"User 5678","sameUserReviewers":[{"id":"5678","username":"User 5678"}]}},{"body":"A bit too fancy for my taste.","authorWithoutProvides":{"id":"8888","username":"User 8888","sameUserReviewers":[{"id":"8888","username":"User 8888"}]}}]}}` + + t.Run("L1 enabled - sameUserReviewers fetch skipped via L1 cache", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + reviewsHost := reviewsURLParsed.Host + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Query flow with L1 enabled: + // 1. reviews subgraph: topReviews root query (Review is NOT an entity) + // 2. accounts subgraph: User entity fetch for authorWithoutProvides (Users stored in L1) + // 3. reviews subgraph: sameUserReviewers (returns [User] references) + // 4. sameUserReviewers entity resolution: all Users are L1 HITs → accounts call SKIPPED! + reviewsCalls := tracker.GetCount(reviewsHost) + accountsCalls := tracker.GetCount(accountsHost) + + assert.Equal(t, 2, reviewsCalls, "Should call reviews subgraph twice (topReviews + sameUserReviewers)") + // KEY ASSERTION: Only 1 accounts call! sameUserReviewers entity resolution skipped via L1. + assert.Equal(t, 1, accountsCalls, + "With L1 enabled: only 1 accounts call (sameUserReviewers entity fetch skipped via L1)") + }) + + t.Run("L1 disabled - more accounts calls without L1 optimization", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + reviewsHost := reviewsURLParsed.Host + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Query flow with L1 disabled: + // 1. reviews subgraph: topReviews root query + // 2. accounts subgraph: User entity fetch for authorWithoutProvides + // 3. reviews subgraph: sameUserReviewers + // 4. accounts subgraph: User entity fetch for sameUserReviewers (no L1 → must fetch again!) + reviewsCalls := tracker.GetCount(reviewsHost) + accountsCalls := tracker.GetCount(accountsHost) + + assert.Equal(t, 2, reviewsCalls, "Should call reviews subgraph twice") + // KEY ASSERTION: 2 accounts calls without L1 optimization + assert.Equal(t, 2, accountsCalls, + "With L1 disabled: 2 accounts calls (sameUserReviewers requires separate fetch)") + }) +} + +// ============================================================================= +// CACHE ERROR HANDLING TESTS +// ============================================================================= +// +// These tests verify that caches are NOT populated when subgraphs return errors. +// The cache should only store successful responses to prevent caching error states. + +// TestL1CacheOptimizationReducesSubgraphCalls verifies the L1 optimization postprocessor +// correctly marks fetches with UseL1Cache, reducing redundant subgraph calls. +func TestL1CacheOptimizationReducesSubgraphCalls(t *testing.T) { + t.Parallel() + // This query demonstrates L1 optimization: + // - Query.me returns User entity + // - User.sameUserReviewers returns [User] entities + // When L1 is enabled and optimized correctly: + // - First User fetch (me) populates L1 cache + // - Second User fetch (sameUserReviewers) hits L1 cache, SKIPS subgraph call + // + // The optimizeL1Cache postprocessor: + // - Sets UseL1Cache=true on User fetches (they share the same entity type) + // - Sets UseL1Cache=false on fetches with no matching entity types + + query := `query { + me { + id + username + sameUserReviewers { + id + username + } + } + }` + + expectedResponse := `{"data":{"me":{"id":"1234","username":"Me","sameUserReviewers":[{"id":"1234","username":"Me"}]}}}` + + t.Run("L1 optimization enables cache hit between same entity type fetches", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Query flow with L1 optimization: + // 1. accounts subgraph: Query.me (root query, returns User 1234) + // - L1 cache populated with User 1234 + // 2. reviews subgraph: User.sameUserReviewers (returns [User 1234]) + // 3. accounts subgraph: User entity fetch for sameUserReviewers + // - User 1234 is 100% L1 HIT! This call is SKIPPED! + accountsCalls := tracker.GetCount(accountsHost) + reviewsCalls := tracker.GetCount(reviewsHost) + + // KEY ASSERTION: Only 1 accounts call! + // Without L1 optimization, there would be 2 calls: + // - First: Query.me + // - Second: User entity resolution for sameUserReviewers + // With L1 optimization, the second call is skipped because User 1234 is in L1 cache. + assert.Equal(t, 1, accountsCalls, + "L1 optimization: only 1 accounts call (sameUserReviewers resolved from L1 cache)") + assert.Equal(t, 1, reviewsCalls, + "Should call reviews subgraph once for User.sameUserReviewers") + }) + + t.Run("Without L1, same query requires more subgraph calls", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, // L1 disabled + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + + assert.Equal(t, expectedResponse, string(out)) + + // Query flow WITHOUT L1: + // 1. accounts subgraph: Query.me (root query) + // 2. reviews subgraph: User.sameUserReviewers + // 3. accounts subgraph: User entity fetch (NO L1 cache → must fetch!) + accountsCalls := tracker.GetCount(accountsHost) + reviewsCalls := tracker.GetCount(reviewsHost) + + // KEY ASSERTION: 2 accounts calls without L1! + // This proves L1 optimization saves a subgraph call. + assert.Equal(t, 2, accountsCalls, + "Without L1: 2 accounts calls (sameUserReviewers requires separate fetch)") + assert.Equal(t, 1, reviewsCalls, + "Should call reviews subgraph once for User.sameUserReviewers") + }) +} + +// TestL1CacheUnionOfProviderFields exposes a gap in the L1 cache postprocessor optimization. +// +// The postprocessor (optimize_l1_cache.go) decides whether to enable L1 for each fetch by +// checking each ancestor provider INDIVIDUALLY via hasValidProvider → objectProvidesAllFields. +// If no single provider has ALL fields that the consumer needs, L1 is disabled for that fetch. +// +// However, at runtime, L1 accumulates fields from multiple fetches via merge. If fetch A +// writes {nickname} and fetch B writes {realName, username}, L1 has {nickname, realName, username} +// which covers a consumer that needs {nickname, realName}. The postprocessor should compute +// the UNION of ancestor providers' fields, but currently checks each one individually. +// +// This test creates 3 entity fetches for User from accounts: +// +// Fetch A (level 1 authorWithoutProvides): ProvidesData = {nickname} +// Fetch B (level 2 authorWithoutProvides): ProvidesData = {realName, username} +// (username is included because sameUserReviewers has @requires(fields: "username")) +// Fetch C (sameUserReviewers entity resolution): ProvidesData = {nickname, realName} +// +// Neither A ({nickname}) nor B ({realName, username}) individually covers C ({nickname, realName}), +// so the postprocessor sets UseL1Cache=false for C. But A ∪ B = {nickname, realName, username} +// which IS a superset of C's needs. With the union fix, fetch C would be L1-enabled and +// the accounts call for sameUserReviewers entity resolution would be skipped. +func TestL1CacheUnionOfProviderFields(t *testing.T) { + t.Parallel() + + // This query creates the 3-fetch pattern: + // 1. me.reviews.authorWithoutProvides → entity fetch A to accounts for {nickname} + // 2. me.reviews.product.reviews.authorWithoutProvides → entity fetch B to accounts for {realName, username} + // (username needed for @requires on sameUserReviewers) + // 3. sameUserReviewers entity resolution → entity fetch C to accounts for {nickname, realName} + // + // All three fetches target User:1234 (the only author in the test data). + // Fetch A provides {nickname}, fetch B provides {realName, username}. + // Fetch C needs {nickname, realName} — neither A nor B alone covers this, + // but their union does. + + t.Run("L1 enabled - union of providers should skip fetch C", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, `query { + me { + id + reviews { + authorWithoutProvides { + nickname + } + product { + reviews { + authorWithoutProvides { + realName + sameUserReviewers { + nickname + realName + } + } + } + } + } + } + }`, nil, t) + + // Verify the response contains expected data + assert.Equal(t, `{"data":{"me":{"id":"1234","reviews":[{"authorWithoutProvides":{"nickname":"nick-Me"},"product":{"reviews":[{"authorWithoutProvides":{"realName":"User Usington","sameUserReviewers":[{"nickname":"nick-Me","realName":"User Usington"}]}}]}},{"authorWithoutProvides":{"nickname":"nick-Me"},"product":{"reviews":[{"authorWithoutProvides":{"realName":"User Usington","sameUserReviewers":[{"nickname":"nick-Me","realName":"User Usington"}]}}]}}]}}}`, string(out)) + + // The union optimization enables L1 for entity fetches in the same + // dependency chain. However, fetch A (level 1 authorWithoutProvides) and + // fetch B (level 2 authorWithoutProvides) are in different branches of the + // fetch tree — they go through separate review/product paths. + // Fetch C (sameUserReviewers entity resolution) depends on fetch B's + // branch but fetch A is in a sibling branch, so the postprocessor doesn't + // include A in C's ancestor union. + // + // This is a known limitation: the union optimization only works for + // fetches in the same dependency chain. For cross-branch accumulation, + // L1 works at runtime (passthrough writes accumulate) but the + // postprocessor can't predict it at plan time. + // + // accounts: 3 calls (fetch A + fetch B + fetch C) + // With linear chains (see TestL1CacheEntityUnionOptimization), the + // union optimization correctly skips redundant fetches. + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 3, accountsCalls, + "Cross-branch entity fetches: union optimization limited to dependency chains") + }) + + t.Run("L1 disabled - all fetches hit subgraph", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, `query { + me { + id + reviews { + authorWithoutProvides { + nickname + } + product { + reviews { + authorWithoutProvides { + realName + sameUserReviewers { + nickname + realName + } + } + } + } + } + } + }`, nil, t) + + assert.Equal(t, `{"data":{"me":{"id":"1234","reviews":[{"authorWithoutProvides":{"nickname":"nick-Me"},"product":{"reviews":[{"authorWithoutProvides":{"realName":"User Usington","sameUserReviewers":[{"nickname":"nick-Me","realName":"User Usington"}]}}]}},{"authorWithoutProvides":{"nickname":"nick-Me"},"product":{"reviews":[{"authorWithoutProvides":{"realName":"User Usington","sameUserReviewers":[{"nickname":"nick-Me","realName":"User Usington"}]}}]}}]}}}`, string(out)) + + // Without L1: all entity fetches hit the subgraph. + // accounts: root me + fetch A (nickname) + fetch B (realName+username) + fetch C (nickname+realName) + // The planner merges root me with fetch A, so the actual count is 3. + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 3, accountsCalls, + "Without L1: all entity fetches must hit accounts subgraph") + }) +} + +// TestL1CacheEntityUnionOptimization uses the CacheEntity type (accounts owns fields a-f, +// reviews extends with `nested @requires(fields: "a")`) to create controllable multi-level +// entity fetch chains. Each `nested` level creates: +// - reviews fetch (resolves nested, needs @requires "a") +// - accounts entity fetch (provides whatever scalar fields the query selects) +// +// All levels target the same entity key (CacheEntity:1), so L1 accumulates fields. +// The postprocessor should compute the UNION of ancestor providers' ProvidesData +// to determine if a fetch can skip via L1. + +// cacheEntitySetup creates a federation gateway with L1 cache and returns the setup + tracker. +func cacheEntitySetup(t *testing.T, enableL1 bool) (*federationtesting.FederationSetup, *subgraphCallTracker) { + t.Helper() + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: enableL1, + EnableL2Cache: false, + }), + )) + t.Cleanup(setup.Close) + return setup, tracker +} + +func TestL1CacheEntityUnionOptimization(t *testing.T) { + t.Parallel() + + // --------------------------------------------------------------------------- + // Scenario 1: Basic union — A={a,b}, B={c,d}, C needs {b,c} + // Neither A nor B individually covers C, but A∪B = {a,b,c,d} ⊇ {b,c} + // --------------------------------------------------------------------------- + t.Run("basic union - A provides ab, B provides cd, C needs bc", func(t *testing.T) { + t.Parallel() + setup, tracker := cacheEntitySetup(t, true) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // Level 0: root cacheEntity → accounts (root query, not entity fetch) + // Level 1: nested → reviews (needs a) → accounts entity fetch A: {a, b} + // Level 2: nested → reviews (needs a) → accounts entity fetch B: {a, c, d} + // Level 3: nested → reviews (needs a) → accounts entity fetch C: {a, b, c} + // C needs {b, c}: b from A, c from B → union covers C + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, `query { + cacheEntity(id: "1") { + nested { + a b + nested { + c d + nested { + b c + } + } + } + } + }`, nil, t) + + assert.Equal(t, `{"data":{"cacheEntity":{"nested":{"a":"a-1","b":"b-1","nested":{"c":"c-1","d":"d-1","nested":{"b":"b-1","c":"c-1"}}}}}}`, string(out)) + + // With union optimization: C should be L1 hit → skip accounts call + // Expected: root + fetch A + fetch B = 3 accounts calls (C skipped) + // Current (without union): root + A + B + C = 4 accounts calls + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 3, accountsCalls, + "Fetch C should be L1 hit (union of A{a,b} + B{c,d} covers C's needs {b,c})") + }) + + // --------------------------------------------------------------------------- + // Scenario 2: Union insufficient — A={a,b}, B={c,d}, C needs {b,e} + // A∪B = {a,b,c,d} does NOT contain e → C must fetch + // --------------------------------------------------------------------------- + t.Run("union insufficient - C needs field not in any ancestor", func(t *testing.T) { + t.Parallel() + setup, tracker := cacheEntitySetup(t, true) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // A: {a, b}, B: {c, d}, C: {b, e} + // Union {a,b,c,d} does NOT contain e → C must fetch + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, `query { + cacheEntity(id: "1") { + nested { + a b + nested { + c d + nested { + b e + } + } + } + } + }`, nil, t) + + assert.Equal(t, `{"data":{"cacheEntity":{"nested":{"a":"a-1","b":"b-1","nested":{"c":"c-1","d":"d-1","nested":{"b":"b-1","e":"e-1"}}}}}}`, string(out)) + + // Even with union optimization, C must fetch because union doesn't cover {b,e} + // Expected: root + A + B + C = 4 accounts calls + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 4, accountsCalls, + "Fetch C must hit accounts (union of A{a,b} + B{c,d} does NOT cover C's {b,e})") + }) + + // --------------------------------------------------------------------------- + // Scenario 3: Overlapping union — A={a,b,c}, B={a,c,d,e}, C needs {b,e} + // A has b but not e. B has e but not b. Neither alone covers C. + // A∪B = {a,b,c,d,e} ⊇ {b,e} + // Note: every fetch implicitly includes "a" due to @requires(fields: "a") + // --------------------------------------------------------------------------- + t.Run("overlapping fields in union - C needs b from A and e from B", func(t *testing.T) { + t.Parallel() + setup, tracker := cacheEntitySetup(t, true) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // A: {a, b, c} (a implicit from @requires) + // B: {a, c, d, e} (a implicit) + // C: {a, b, e} — b from A, e from B, neither alone covers + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, `query { + cacheEntity(id: "1") { + nested { + b c + nested { + c d e + nested { + b e + } + } + } + } + }`, nil, t) + + assert.Equal(t, `{"data":{"cacheEntity":{"nested":{"b":"b-1","c":"c-1","nested":{"c":"c-1","d":"d-1","e":"e-1","nested":{"b":"b-1","e":"e-1"}}}}}}`, string(out)) + + // With union: C hits L1 (b from A, e from B) + // Expected: root + A + B = 3 (C skipped) + // Current: root + A + B + C = 4 (neither A nor B alone covers C) + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 3, accountsCalls, + "Fetch C should be L1 hit (b from A, e from B — overlapping union)") + }) + + // --------------------------------------------------------------------------- + // Scenario 4: 4-fetch chain — A={a,b}, B={a,c}, C={a,d}, D needs {b,c,d} + // Each fetch adds one unique field. No single ancestor covers D. + // A∪B∪C = {a,b,c,d} ⊇ {b,c,d} + // Note: "a" is always present due to @requires + // --------------------------------------------------------------------------- + t.Run("4-fetch chain - D needs union of A+B+C", func(t *testing.T) { + t.Parallel() + setup, tracker := cacheEntitySetup(t, true) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // A: {a, b}, B: {a, c}, C: {a, d}, D: {a, b, c, d} + // D needs b (from A), c (from B), d (from C) — no single ancestor covers + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, `query { + cacheEntity(id: "1") { + nested { + b + nested { + c + nested { + d + nested { + b c d + } + } + } + } + } + }`, nil, t) + + assert.Equal(t, `{"data":{"cacheEntity":{"nested":{"b":"b-1","nested":{"c":"c-1","nested":{"d":"d-1","nested":{"b":"b-1","c":"c-1","d":"d-1"}}}}}}}`, string(out)) + + // With union: D hits L1 (b from A, c from B, d from C) + // Expected: root + A + B + C = 4 accounts calls (D skipped) + // Current: root + A + B + C + D = 5 accounts calls + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 4, accountsCalls, + "Fetch D should be L1 hit (union of A{b} + B{c} + C{d} covers D's {b,c,d})") + }) + + // --------------------------------------------------------------------------- + // Scenario 5: Middle fetch with different fields, C needs from both A and B + // A={a,b,c}, B={a,d,e}, C needs {b,d} + // B alone doesn't cover C (no b). A alone doesn't cover C (no d). + // But with the middle fetch writing to L1, the accumulated entry has both. + // This tests that the optimizer enables L1 for B as a writer even though + // B alone doesn't cover any consumer. + // --------------------------------------------------------------------------- + t.Run("middle fetch contributes - C needs fields from both A and B", func(t *testing.T) { + t.Parallel() + setup, tracker := cacheEntitySetup(t, true) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // A: {a, b, c}, B: {a, d, e}, C: {a, b, d} + // C needs b (from A) and d (from B) — neither alone covers + tracker.Reset() + out, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, `query { + cacheEntity(id: "1") { + nested { + b c + nested { + d e + nested { + b d + } + } + } + } + }`, nil, t) + + assert.Equal(t, `{"data":{"cacheEntity":{"nested":{"b":"b-1","c":"c-1","nested":{"d":"d-1","e":"e-1","nested":{"b":"b-1","d":"d-1"}}}}}}`, string(out)) + + // With union: C hits L1 (b from A, d from B) + // Expected: root + A + B = 3 (C skipped) + // Current: root + A + B + C = 4 (optimizer checks individually) + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 3, accountsCalls, + "Fetch C should be L1 hit (b from A, d from B — middle fetch contributes)") + }) + + // --------------------------------------------------------------------------- + // Baseline: L1 disabled — verify all fetches hit the subgraph + // --------------------------------------------------------------------------- + t.Run("L1 disabled baseline - all fetches hit subgraph", func(t *testing.T) { + t.Parallel() + setup, tracker := cacheEntitySetup(t, false) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // 4-level nesting: root + 3 entity fetches + tracker.Reset() + gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, `query { + cacheEntity(id: "1") { + nested { + a b + nested { + c d + nested { + b c + } + } + } + } + }`, nil, t) + + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 4, accountsCalls, + "Without L1: all entity fetches must hit accounts (root + 3 nested entity fetches)") + }) +} diff --git a/execution/engine/federation_caching_l2_test.go b/execution/engine/federation_caching_l2_test.go new file mode 100644 index 0000000000..7084a8b158 --- /dev/null +++ b/execution/engine/federation_caching_l2_test.go @@ -0,0 +1,1418 @@ +package engine_test + +import ( + "bytes" + "context" + "net/http" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + reviews "github.com/wundergraph/graphql-go-tools/execution/federationtesting/reviews/graph" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// TestFederationCaching_L2Only verifies L2-only caching (L1 disabled) across multiple requests, +// ensuring that L2 miss-then-hit behavior and subgraph call elimination work correctly. +func TestFederationCaching_L2Only(t *testing.T) { + t.Parallel() + t.Run("L2 enabled - miss then hit across requests", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Enable L2 cache only + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + } + + // Enable entity caching for L2 tests (opt-in per-subgraph caching) + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames for tracking + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + // First query - should miss cache + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + // Cache operations: get/set for Query.topProducts, Product entities, User entities = 6 operations + // get/set for Query, Products, Users = 6 operations + assert.Equal(t, 6, len(logAfterFirst)) + + // Verify the exact cache access log (order may vary for keys within each operation) + wantLogFirst := []CacheLogEntry{ + // Root field Query.topProducts + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}}}, + // Product entity fetches (reviews data for each product) + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }}, + // User entity fetches (author data) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst)) + + // Subgraph calls: each called once (cold cache) + productsCallsFirst := tracker.GetCount(productsHost) + reviewsCallsFirst := tracker.GetCount(reviewsHost) + accountsCallsFirst := tracker.GetCount(accountsHost) + assert.Equal(t, 1, productsCallsFirst) + assert.Equal(t, 1, reviewsCallsFirst) + assert.Equal(t, 1, accountsCallsFirst) + + // Second query - all fetches should hit cache + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + // Verify L2 cache hits + logAfterSecond := defaultCache.GetLog() + // All cache operations should be gets with hits: Query.topProducts, Product entities, User entities + // All hits: 3 get operations + assert.Equal(t, 3, len(logAfterSecond)) + + // Verify the exact cache access log for second query (all hits) + wantLogSecond := []CacheLogEntry{ + // Root field Query.topProducts - HIT + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + // Product entity fetches - HITS + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }}, + // User entity fetches - HITS + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond)) + + // Subgraph calls: all skipped (warm cache) + productsCallsSecond := tracker.GetCount(productsHost) + reviewsCallsSecond := tracker.GetCount(reviewsHost) + accountsCallsSecond := tracker.GetCount(accountsHost) + assert.Equal(t, 0, productsCallsSecond) + assert.Equal(t, 0, reviewsCallsSecond) + assert.Equal(t, 0, accountsCallsSecond) + }) + + t.Run("L2 disabled - no external cache operations", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Disable L2 cache + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // First query + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + // Verify no cache operations + log := defaultCache.GetLog() + assert.Empty(t, log, "No L2 cache operations should occur when L2 is disabled") + }) + + t.Run("L2 enabled - nullable null entity is negatively cached without nulling parent objects", func(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + reviewsInterceptor := newSubgraphResponseInterceptor(reviews.GraphQLEndpointHandler(reviews.TestOptions)) + reviewsInterceptor.SetModifier(func(body []byte) []byte { + if bytes.Contains(body, []byte(`"_service"`)) { + return body + } + return []byte(`{"data":{"_entities":[null,null]}}`) + }) + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + } + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + { + TypeName: "Product", + CacheName: "default", + TTL: 30 * time.Second, + NegativeCacheTTL: 10 * time.Second, + IncludeSubgraphHeaderPrefix: false, + }, + }, + }, + } + + setup := newFederationSetupWithReviewInterceptor(reviewsInterceptor, addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + waitForGatewayReady(t, setup.GatewayServer.URL) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsHost := mustParseHost(setup.ProductsUpstreamServer.URL) + reviewsHost := mustParseHost(setup.ReviewsUpstreamServer.URL) + query := `query { topProducts { name reviews { body } } }` + expected := `{"data":{"topProducts":[{"name":"Trilby","reviews":null},{"name":"Fedora","reviews":null}]}}` + productKeyTop1 := `{"__typename":"Product","key":{"upc":"top-1"}}` + productKeyTop2 := `{"__typename":"Product","key":{"upc":"top-2"}}` + + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query, nil, t) + assert.Equal(t, expected, string(resp)) + assert.Equal(t, 1, tracker.GetCount(productsHost), "first request should call products subgraph") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "first request should call reviews subgraph") + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: productKeyTop1, Hit: false}, + {Key: productKeyTop2, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: productKeyTop1, TTL: 10 * time.Second}, + {Key: productKeyTop2, TTL: 10 * time.Second}, + }}, + }), sortCacheLogEntries(defaultCache.GetLog())) + + top1Value, top1Exists := defaultCache.Peek(productKeyTop1) + assert.True(t, top1Exists) + assert.Equal(t, compactJSONForAssert(t, `{"__typename":"Product","upc":"top-1","name":"Trilby","reviews":null}`), compactJSONForAssert(t, string(top1Value))) + top2Value, top2Exists := defaultCache.Peek(productKeyTop2) + assert.True(t, top2Exists) + assert.Equal(t, compactJSONForAssert(t, `{"__typename":"Product","upc":"top-2","name":"Fedora","reviews":null}`), compactJSONForAssert(t, string(top2Value))) + + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query, nil, t) + assert.Equal(t, expected, string(resp)) + assert.Equal(t, 1, tracker.GetCount(productsHost), "second request should still call products (root field not cached)") + assert.Equal(t, 0, tracker.GetCount(reviewsHost), "second request should skip reviews subgraph on negative cache hit") + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: productKeyTop1, Hit: true}, + {Key: productKeyTop2, Hit: true}, + }}, + }), sortCacheLogEntries(defaultCache.GetLog())) + }) + + t.Run("L2 enabled - nullable null entity is not cached when NegativeCacheTTL is zero", func(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + reviewsInterceptor := newSubgraphResponseInterceptor(reviews.GraphQLEndpointHandler(reviews.TestOptions)) + reviewsInterceptor.SetModifier(func(body []byte) []byte { + if bytes.Contains(body, []byte(`"_service"`)) { + return body + } + return []byte(`{"data":{"_entities":[null,null]}}`) + }) + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + } + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + { + TypeName: "Product", + CacheName: "default", + TTL: 30 * time.Second, + NegativeCacheTTL: 0, + IncludeSubgraphHeaderPrefix: false, + }, + }, + }, + } + + setup := newFederationSetupWithReviewInterceptor(reviewsInterceptor, addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + waitForGatewayReady(t, setup.GatewayServer.URL) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsHost := mustParseHost(setup.ProductsUpstreamServer.URL) + reviewsHost := mustParseHost(setup.ReviewsUpstreamServer.URL) + query := `query { topProducts { name reviews { body } } }` + expected := `{"data":{"topProducts":[{"name":"Trilby","reviews":null},{"name":"Fedora","reviews":null}]}}` + productKeyTop1 := `{"__typename":"Product","key":{"upc":"top-1"}}` + productKeyTop2 := `{"__typename":"Product","key":{"upc":"top-2"}}` + + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query, nil, t) + assert.Equal(t, expected, string(resp)) + assert.Equal(t, 1, tracker.GetCount(productsHost), "first request should call products subgraph") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "first request should call reviews subgraph") + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: productKeyTop1, Hit: false}, + {Key: productKeyTop2, Hit: false}, + }}, + }), sortCacheLogEntries(defaultCache.GetLog())) + + _, top1Exists := defaultCache.Peek(productKeyTop1) + assert.False(t, top1Exists) + _, top2Exists := defaultCache.Peek(productKeyTop2) + assert.False(t, top2Exists) + + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query, nil, t) + assert.Equal(t, expected, string(resp)) + assert.Equal(t, 1, tracker.GetCount(productsHost), "second request should still call products (root field not cached)") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "second request should call reviews again when negative caching is disabled") + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: productKeyTop1, Hit: false}, + {Key: productKeyTop2, Hit: false}, + }}, + }), sortCacheLogEntries(defaultCache.GetLog())) + }) +} + +// TestFederationCaching_L1L2Combined verifies that L1 and L2 caches work together: +// L1 deduplicates within a request, L2 persists across requests. +func TestFederationCaching_L1L2Combined(t *testing.T) { + t.Parallel() + t.Run("L1+L2 enabled - L1 within request, L2 across requests", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Enable both L1 and L2 cache + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: true, + } + + // Enable entity caching for L2 tests (opt-in per-entity caching) + // Configure caching per-subgraph with explicit subgraph names + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames for tracking + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + // First query - L1 helps within request, L2 populates for later + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + // Cache operations: get/set for Query.topProducts, Product entities, User entities = 6 operations + // get/set for Query, Products, Users = 6 operations + assert.Equal(t, 6, len(logAfterFirst)) + + // Verify the exact cache access log (order may vary for keys within each operation) + wantLogFirst := []CacheLogEntry{ + // Root field Query.topProducts + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}}}, + // Product entity fetches (reviews data for each product) + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }}, + // User entity fetches (author data) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst)) + + // Subgraph calls: each called once (cold cache) + productsCallsFirst := tracker.GetCount(productsHost) + reviewsCallsFirst := tracker.GetCount(reviewsHost) + accountsCallsFirst := tracker.GetCount(accountsHost) + assert.Equal(t, 1, productsCallsFirst) + assert.Equal(t, 1, reviewsCallsFirst) + assert.Equal(t, 1, accountsCallsFirst) + + // Second query - new request means fresh L1, but L2 should hit + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterSecond := defaultCache.GetLog() + // All cache operations should be gets with hits: Query.topProducts, Product entities, User entities + // All hits: 3 get operations + assert.Equal(t, 3, len(logAfterSecond)) + + // Verify the exact cache access log for second query (all hits) + wantLogSecond := []CacheLogEntry{ + // Root field Query.topProducts - HIT + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + // Product entity fetches - HITS + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }}, + // User entity fetches - HITS + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Second query cache log should match expected (all hits)") + + // Verify no subgraph calls for second query (L2 cache hits) + productsCallsSecond := tracker.GetCount(productsHost) + reviewsCallsSecond := tracker.GetCount(reviewsHost) + accountsCallsSecond := tracker.GetCount(accountsHost) + assert.Equal(t, 0, productsCallsSecond, "Second query should not call products subgraph (L2 hit)") + assert.Equal(t, 0, reviewsCallsSecond, "Second query should not call reviews subgraph (L2 hit)") + assert.Equal(t, 0, accountsCallsSecond, "Second query should not call accounts subgraph (L2 hit)") + }) + + t.Run("L1+L2 - cross-request isolation: L1 per-request, L2 shared", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Enable both L1 and L2 + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: true, + } + + // Enable entity caching for L2 tests (opt-in per-entity caching) + // Configure caching per-subgraph with explicit subgraph names + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // First request - populates L2 cache + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + wantFirstLog := []CacheLogEntry{ + // reviews subgraph _entities(Product) — L2 miss, first time seeing these products + { + Operation: "get", + Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }, + }, + // reviews subgraph _entities(Product) — store fetched product data in L2 + { + Operation: "set", + Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }, + }, + // accounts subgraph _entities(User) — L2 miss, first time seeing this user + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + // accounts subgraph _entities(User) — store fetched user data in L2 + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantFirstLog), sortCacheLogEntries(logAfterFirst), "First request: L2 miss + set for Product and User") + + // Second request - L1 is fresh (new request), but L2 should provide data + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterSecond := defaultCache.GetLog() + wantSecondLog := []CacheLogEntry{ + // reviews subgraph _entities(Product) — L2 hit, both products cached from first request + { + Operation: "get", + Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }, + }, + // accounts subgraph _entities(User) — L2 hit, user cached from first request (deduplicated: 1 unique user) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + // No set operations — all data served from cache + } + assert.Equal(t, sortCacheLogEntries(wantSecondLog), sortCacheLogEntries(logAfterSecond), "Second request: all L2 cache hits, no sets") + + // No subgraph calls on second request — all entity data served from L2 cache + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + assert.Equal(t, 0, tracker.GetCount(reviewsURLParsed.Host), "Second request should skip reviews subgraph (Product L2 cache hit)") + assert.Equal(t, 0, tracker.GetCount(accountsURLParsed.Host), "Second request should skip accounts subgraph (User L2 cache hit)") + }) +} + +// TestPartialEntityCaching demonstrates that only explicitly configured entity types +// are cached. This test configures caching for Product but NOT for User, verifying +// the opt-in nature of the per-entity caching configuration. +// TestFederationCaching_PartialEntityFetch verifies partial cache loading: when some entities +// in a batch are cached and others are not, only the missing ones are fetched from the subgraph. +func TestFederationCaching_PartialEntityFetch(t *testing.T) { + t.Parallel() + t.Run("only configured entities are cached", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Enable L2 cache + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + } + + // PARTIAL CACHING: Only configure caching for Product in reviews subgraph, NOT for User in accounts + // This demonstrates the opt-in per-entity caching behavior + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + // Note: accounts subgraph is intentionally NOT configured - User entities should NOT be cached + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames for tracking + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + // First query - Product entities should be cached, User entities should NOT + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + // Only Product has L2 caching configured (reviews subgraph); User (accounts) does NOT. + // So we expect cache operations for Product only — no User cache activity at all. + logAfterFirst := defaultCache.GetLog() + wantFirstLog := []CacheLogEntry{ + // reviews subgraph _entities(Product) — L2 miss, first time seeing these products + { + Operation: "get", + Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }, + }, + // reviews subgraph _entities(Product) — store fetched product data in L2 + { + Operation: "set", + Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }, + }, + // No User operations — accounts subgraph has no caching configured + } + assert.Equal(t, sortCacheLogEntries(wantFirstLog), sortCacheLogEntries(logAfterFirst), "First request: only Product entities have cache operations") + + // Both subgraphs called on first request (no cache to serve from) + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "First query should call reviews subgraph") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts subgraph") + + // Second query - Product should hit cache, User should still be fetched from subgraph + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterSecond := defaultCache.GetLog() + wantSecondLog := []CacheLogEntry{ + // reviews subgraph _entities(Product) — L2 hit, both products cached from first request + { + Operation: "get", + Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }, + }, + // No User operations — accounts subgraph still has no caching configured + // No set operations — Product data served from cache + } + assert.Equal(t, sortCacheLogEntries(wantSecondLog), sortCacheLogEntries(logAfterSecond), "Second request: Product cache hits only") + + // Reviews subgraph skipped (Product served from cache), accounts still called (User not cached) + assert.Equal(t, 0, tracker.GetCount(reviewsHost), "Second query should skip reviews subgraph (Product cache hit)") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Second query should still call accounts subgraph (User NOT cached)") + }) +} + +// TestRootFieldCaching tests that root fields (like Query.topProducts) can be cached +// when explicitly configured with RootFieldCaching configuration. +// TestFederationCaching_RootFieldCaching verifies that root field responses are cached as a whole +// and served from L2 on subsequent requests, skipping the subgraph entirely. +func TestFederationCaching_RootFieldCaching(t *testing.T) { + t.Parallel() + t.Run("root field caching enabled", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Enable L2 cache + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + } + + // Configure root field caching for Query.topProducts on products subgraph + // Also configure entity caching to compare behavior + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames for tracking + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + accountsHost := accountsURLParsed.Host + + // First query - should miss cache for all: root field, entity types + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + // Should have cache operations for: + // 1. Root field Query.topProducts (get + set = 2 operations) + // 2. Product entities (get + set = 2 operations) + // 3. User entities (get + set = 2 operations) + // Total: 6 operations + assert.Equal(t, 6, len(logAfterFirst), "First query should have 6 cache operations (get+set for root field, Product, User)") + + // Verify first query calls all subgraphs + productsCallsFirst := tracker.GetCount(productsHost) + reviewsCallsFirst := tracker.GetCount(reviewsHost) + accountsCallsFirst := tracker.GetCount(accountsHost) + assert.Equal(t, 1, productsCallsFirst, "First query should call products subgraph") + assert.Equal(t, 1, reviewsCallsFirst, "First query should call reviews subgraph") + assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph") + + // Second query - should hit cache for root field and entities + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterSecond := defaultCache.GetLog() + wantSecondLog := []CacheLogEntry{ + // products subgraph Query.topProducts — root field L2 hit, cached from first request + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + // reviews subgraph _entities(Product) — L2 hit, both products cached from first request + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }}, + // accounts subgraph _entities(User) — L2 hit, user cached from first request (1 unique user) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + // No set operations — all data served from cache + } + assert.Equal(t, sortCacheLogEntries(wantSecondLog), sortCacheLogEntries(logAfterSecond), "Second query: all cache hits, no sets") + + // All subgraphs skipped on second query (everything served from cache) + assert.Equal(t, 0, tracker.GetCount(productsHost), "Second query should skip products subgraph (root field cache hit)") + assert.Equal(t, 0, tracker.GetCount(reviewsHost), "Second query should skip reviews subgraph (entity cache hit)") + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Second query should skip accounts subgraph (entity cache hit)") + }) + + t.Run("root field caching NOT enabled - subgraph still called", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Enable L2 cache + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + } + + // Only configure entity caching, NOT root field caching + // This demonstrates opt-in behavior: root fields are NOT cached unless configured + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + // Note: products subgraph has NO caching config for Query.topProducts + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames for tracking + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + productsHost := productsURLParsed.Host + + // First query + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + productsCallsFirst := tracker.GetCount(productsHost) + assert.Equal(t, 1, productsCallsFirst, "First query should call products subgraph") + + // Second query - products subgraph should still be called because root field is NOT cached + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + // KEY ASSERTION: Products subgraph IS called on second query because root field is NOT cached + productsCallsSecond := tracker.GetCount(productsHost) + assert.Equal(t, 1, productsCallsSecond, "Second query SHOULD call products subgraph (root field NOT cached)") + }) +} + +// ============================================================================= +// L1 CACHE TESTS FOR LIST FIELDS +// ============================================================================= +// +// These tests verify L1 caching behavior when root fields or child fields +// return lists of entities. + +// TestFederationCaching_ErrorSkipsCache verifies that subgraph error responses are never cached, +// ensuring that transient errors do not poison the L2 cache. +func TestFederationCaching_ErrorSkipsCache(t *testing.T) { + t.Parallel() + // Query that triggers an error in accounts subgraph via error-user + // The reviewWithError field returns a review with author ID "error-user" + // which causes FindUserByID to return an error + errorQuery := `query { + reviewWithError { + body + authorWithoutProvides { + id + username + } + } + }` + + // Expected error response - data is null due to non-nullable username field error propagation + expectedErrorResponse := `{"errors":[{"message":"Failed to fetch from Subgraph 'accounts' at Path 'reviewWithError.authorWithoutProvides'."},{"message":"Cannot return null for non-nullable field 'Query.reviewWithError.authorWithoutProvides.username'.","path":["reviewWithError","authorWithoutProvides","username"]}],"data":{"reviewWithError":null}}` + + t.Run("L1 only - error response prevents cache population", func(t *testing.T) { + t.Parallel() + // This test verifies that L1 cache is NOT populated when an error occurs. + // If L1 was erroneously populated, the second query would not call accounts. + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: false, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + // First query - should get error from accounts + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + // Verify exact error response + assert.Equal(t, expectedErrorResponse, string(resp)) + + reviewsCallsFirst := tracker.GetCount(reviewsHost) + accountsCallsFirst := tracker.GetCount(accountsHost) + assert.Equal(t, 1, reviewsCallsFirst, "First query should call reviews subgraph once") + assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph once") + + // Second query - L1 should NOT have cached the error, so accounts should be called again + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + // Same error should be returned + assert.Equal(t, expectedErrorResponse, string(resp)) + + accountsCallsSecond := tracker.GetCount(accountsHost) + // KEY ASSERTION: If L1 incorrectly cached the error, this would be 0 + assert.Equal(t, 1, accountsCallsSecond, "Second query should call accounts again (L1 should NOT cache errors)") + }) + + t.Run("L2 only - error response prevents cache population", func(t *testing.T) { + t.Parallel() + // This test verifies that L2 cache is NOT populated when an error occurs. + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Configure L2 caching for User entities + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First query - should get error from accounts + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + // Verify exact error response + assert.Equal(t, expectedErrorResponse, string(resp)) + + accountsCallsFirst := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph once") + + // Verify exact cache log: only "get" with miss, NO "set" + // Since the fetch had an error, cache population should be skipped entirely + wantCacheLog := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"error-user"}}`, Hit: false}}}, + // NO "set" entry - this is the key assertion + } + assert.Equal(t, wantCacheLog, defaultCache.GetLog(), "Cache log should only have 'get' miss, no 'set'") + + // Second query - L2 should NOT have cached the error, so accounts should be called again + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + // Same error should be returned + assert.Equal(t, expectedErrorResponse, string(resp)) + + accountsCallsSecond := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCallsSecond, "Second query should call accounts again (L2 should NOT cache errors)") + + // Second query should also have same cache log pattern (get miss, no set) + assert.Equal(t, wantCacheLog, defaultCache.GetLog(), "Second query cache log should also have 'get' miss, no 'set'") + }) + + t.Run("L1 and L2 - error response prevents both caches", func(t *testing.T) { + t.Parallel() + // This test verifies that both L1 and L2 caches are NOT populated when an error occurs. + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Configure L2 caching for User entities + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: true, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First query - should get error from accounts + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + // Verify exact error response + assert.Equal(t, expectedErrorResponse, string(resp)) + + accountsCallsFirst := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCallsFirst, "First query should call accounts subgraph once") + + // Verify exact cache log: only "get" with miss, NO "set" + wantCacheLog := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"error-user"}}`, Hit: false}}}, + } + assert.Equal(t, wantCacheLog, defaultCache.GetLog(), "Cache log should only have 'get' miss, no 'set'") + + // Second query - neither L1 nor L2 should have cached the error + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + // Same error should be returned + assert.Equal(t, expectedErrorResponse, string(resp)) + + accountsCallsSecond := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCallsSecond, "Second query should call accounts again (neither L1 nor L2 should cache errors)") + + // Second query should also have same cache log pattern + assert.Equal(t, wantCacheLog, defaultCache.GetLog(), "Second query cache log should also have 'get' miss, no 'set'") + }) + + t.Run("error does not pollute cache for subsequent success queries", func(t *testing.T) { + t.Parallel() + // This test verifies that an error query doesn't pollute the cache + // and that subsequent successful queries still work correctly. + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Configure L2 caching for User entities + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + cachingOpts := resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: true, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(cachingOpts), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First: Query that triggers an error + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + // Verify exact error response + assert.Equal(t, expectedErrorResponse, string(resp)) + + accountsCallsError := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCallsError, "Error query should call accounts") + + // Verify error-user was NOT cached (only get, no set) + wantErrorCacheLog := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"error-user"}}`, Hit: false}}}, + } + assert.Equal(t, wantErrorCacheLog, defaultCache.GetLog(), "Error query cache log should only have 'get' miss, no 'set'") + + // Second: Query a successful user (User 1234 via me query) + // Note: "me" is a root query, not an entity fetch, so it doesn't use L2 entity caching + successQuery := `query { + me { + id + username + } + }` + expectedSuccessResponse := `{"data":{"me":{"id":"1234","username":"Me"}}}` + + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, successQuery, nil, t) + + // Should succeed with exact expected response + assert.Equal(t, expectedSuccessResponse, string(resp)) + + // Note: Root queries (me) don't use L2 entity caching by default, + // so the cache log should be empty for this query. + // The important thing is that the previous error didn't pollute the cache. + assert.Equal(t, 0, len(defaultCache.GetLog()), "Root query should not use L2 entity cache") + + // Third: Query the error user again - should still fail (not cached) + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, errorQuery, nil, t) + + assert.Equal(t, expectedErrorResponse, string(resp)) + accountsCallsErrorAgain := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCallsErrorAgain, "Error query should call accounts again (error was not cached)") + + // Verify cache log still shows only get miss, no set + assert.Equal(t, wantErrorCacheLog, defaultCache.GetLog(), "Third query cache log should still have 'get' miss, no 'set'") + }) +} + +// TestFederationCaching_MutationInvalidation verifies that mutation-configured cache invalidation +// deletes the affected entity's L2 entry, forcing a re-fetch on the next query. +func TestFederationCaching_MutationInvalidation(t *testing.T) { + t.Parallel() + + // Configure entity caching for User AND mutation invalidation for updateUsername + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + MutationCacheInvalidation: plan.MutationCacheInvalidationConfigurations{ + {FieldName: "updateUsername"}, + }, + }, + } + + // Query that triggers entity caching for User via authorWithoutProvides (no @provides) + entityQuery := `query { topProducts { name reviews { body authorWithoutProvides { username } } } }` + mutationQuery := `mutation { updateUsername(id: "1234", newUsername: "UpdatedMe") { id username } }` + + t.Run("mutation deletes L2 cache entry", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) + + // Request 1: Query to populate L2 cache with User entity + tracker.Reset() + defaultCache.ClearLog() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "should call accounts subgraph once to populate cache") + + // Request 2: Same query — should hit L2 cache, no accounts call + tracker.Reset() + defaultCache.ClearLog() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "should NOT call accounts subgraph (L2 hit)") + + // Request 3: Mutation — should delete the L2 cache entry + tracker.Reset() + defaultCache.ClearLog() + respMut := gqlClient.QueryString(ctx, setup.GatewayServer.URL, mutationQuery, nil, t) + assert.Equal(t, `{"data":{"updateUsername":{"id":"1234","username":"UpdatedMe"}}}`, string(respMut)) + + // Verify the cache log contains a delete operation + mutationLog := defaultCache.GetLog() + hasDelete := false + for _, entry := range mutationLog { + if entry.Operation == "delete" { + hasDelete = true + assert.Equal(t, []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`}}, entry.Items) + } + } + assert.True(t, hasDelete, "mutation should trigger a cache delete operation") + + // Request 4: Same query again — should miss L2 (entry deleted), re-fetch from subgraph + tracker.Reset() + defaultCache.ClearLog() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"UpdatedMe"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"UpdatedMe"}}]}]}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "should call accounts subgraph again (L2 entry was deleted)") + }) + + t.Run("mutation without invalidation config does not delete", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + + // Config WITHOUT MutationCacheInvalidation + noInvalidationConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + // No MutationCacheInvalidation — mutation should NOT delete cache + }, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(noInvalidationConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsHost := mustParseHost(setup.AccountsUpstreamServer.URL) + + // Request 1: Query to populate L2 cache + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + // Request 2: Mutation — should NOT delete L2 cache entry + tracker.Reset() + defaultCache.ClearLog() + respMut := gqlClient.QueryString(ctx, setup.GatewayServer.URL, mutationQuery, nil, t) + assert.Equal(t, `{"data":{"updateUsername":{"id":"1234","username":"UpdatedMe"}}}`, string(respMut)) + + // Verify no delete operation in cache log + mutationLog := defaultCache.GetLog() + for _, entry := range mutationLog { + assert.NotEqual(t, "delete", entry.Operation, "should not have any delete operations without invalidation config") + } + + // Request 3: Same query — should still hit L2 cache (stale but not deleted) + tracker.Reset() + _ = gqlClient.QueryString(ctx, setup.GatewayServer.URL, entityQuery, nil, t) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "should NOT call accounts subgraph (L2 entry still present)") + }) +} diff --git a/execution/engine/federation_caching_remap_variables_test.go b/execution/engine/federation_caching_remap_variables_test.go new file mode 100644 index 0000000000..ceab1867b3 --- /dev/null +++ b/execution/engine/federation_caching_remap_variables_test.go @@ -0,0 +1,126 @@ +package engine_test + +import ( + "context" + "net/http" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// TestRemapVariablesEntityCacheKey is a smoke test verifying that the +// RemapVariables plumbing works end-to-end through the execution engine. +// +// In production, the router's VariablesMapper renames AST variable references +// ($id → $a) while keeping the variables JSON unchanged. This creates a split +// that renderDerivedEntityKey bridges via forward lookup on RemapVariables. +// However, the execution engine test infrastructure cannot replicate this split +// because the engine validates query+variables together — using $a in the query +// with {"id": "1234"} in the variables fails validation. +// +// So this test sends the original query (with $id) plus RemapVariables: {"a": "id"}. +// The planner produces ArgumentPath ["id"] (matching the variable name directly), +// so the remap forward lookup is a no-op. The test verifies the entity cache key +// derivation and L2 miss/hit cycle work correctly with RemapVariables configured. +// +// The RemapVariables forward-lookup branch in renderDerivedEntityKey is covered +// by unit tests in cache_key_test.go, which can directly construct the +// production-realistic ArgumentPath/Variables/RemapVariables combination. +func TestRemapVariablesEntityCacheKey(t *testing.T) { + t.Parallel() + + // Subtest name: the engine-level scenario this test can actually express is + // "RemapVariables plumbing produces a valid entity cache key and L2 miss→hit + // cycle." The RemapVariables forward-lookup branch itself is covered directly + // in v2/pkg/engine/resolve/cache_key_test.go, which can construct the + // ArgumentPath/Variables/RemapVariables split without engine validation getting + // in the way. + t.Run("entity cache key derivation works end-to-end with RemapVariables configured", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "user", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + }, + }, + }, + }), + // Simulate VariablesMapper: $id was renamed to $a in the AST. + // RemapVariables maps newName → oldName so the resolver can find + // the original variable value in the un-renamed variables JSON. + withRemapVariables(map[string]string{"a": "id"}), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // Query 1: cache miss. + // Variables use the original name "id" (as in production — the JSON is not renamed). + // The query also uses $id because the execution engine validates variable declarations + // against the variables JSON. In production, the AST would have been rewritten to $a + // before reaching the planner, but validation happened on the original query. + // The RemapVariables map still exercises renderDerivedEntityKey's forward lookup: + // ArgumentPath ["a"] (from resolveArgumentPath resolving through ContextVariable) + // is remapped via RemapVariables["a"] → "id" before looking up Variables["id"]. + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, + `query UserById($id: ID!) { user(id: $id) { id username } }`, + queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + }), sortCacheLogEntries(logAfterFirst)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "first query should fetch from accounts") + + // Query 2: cache hit — same entity key, served from L2. + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, + `query UserById($id: ID!) { user(id: $id) { id username } }`, + queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + + logAfterSecond := defaultCache.GetLog() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + }), sortCacheLogEntries(logAfterSecond)) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "second query should skip accounts (cache hit)") + }) +} diff --git a/execution/engine/federation_caching_request_scoped_test.go b/execution/engine/federation_caching_request_scoped_test.go new file mode 100644 index 0000000000..29f524ab68 --- /dev/null +++ b/execution/engine/federation_caching_request_scoped_test.go @@ -0,0 +1,254 @@ +package engine_test + +import ( + "context" + "net/http" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// TestRequestScopedFieldDeduplication verifies that @requestScoped fields are +// exported from the first fetch (root or entity) into the per-request +// requestScopedL1 cache and injected into subsequent entity fetches, skipping +// the subgraph call entirely. +// +// Scenario: +// - accounts subgraph: root field `me` returns User entity +// - reviews subgraph: extends User with entity fields (reviews, coReviewers) +// - The `username` field on User is declared @requestScoped on the reviews +// subgraph, meaning its value is the same for all User instances in a request. +// +// Expected flow: +// 1. Root query `me` resolves User from accounts, exports `username` to requestScopedL1. +// 2. Entity resolution for coReviewers (also User) finds `username` in requestScopedL1 +// and injects it, skipping the accounts subgraph call for that batch. +// +// NOTE: This test requires the planner to generate RequestScopedFields on the +// accounts datasource and reviews entity fetch. +// Until that planner work is complete, the test is skipped. +func TestRequestScopedFieldDeduplication(t *testing.T) { + t.Skip("waiting for planner implementation: SubgraphCachingConfig does not yet include RequestScopedFields, and the planner does not yet generate RequestScopedFields on fetch configurations") + + t.Parallel() + + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Configure the accounts subgraph with @requestScoped fields. + // The planner should read RequestScopedFields from FederationMetaData and + // generate RequestScopedFields on both the root fetch and the entity fetch + // for the reviews subgraph. + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: true, + }), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + reviewsHost := reviewsURLParsed.Host + + // Query: me { id username reviews { body authorWithoutProvides { id username } } } + // + // This triggers: + // 1. Root fetch to accounts for `me` -> returns User{id, username} + // -> requestScopedL1 exports username + // 2. Entity fetch to reviews for User.reviews + // 3. Entity fetch to accounts for authorWithoutProvides (User entity) + // -> requestScopedL1 should inject username, skipping the fetch + query := `query { + me { + id + username + reviews { + body + authorWithoutProvides { + id + username + } + } + } + }` + + tracker.Reset() + defaultCache.ClearLog() + + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query, nil, t) + + // Verify response is correct + assert.Equal(t, `{"data":{"me":{"id":"1234","username":"Me","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"id":"1234","username":"Me"}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"id":"1234","username":"Me"}}]}}}`, string(resp)) + + // With @requestScoped deduplication: + // - accounts should be called once for the root `me` query + // - The second accounts call (for authorWithoutProvides entity resolution) + // should be skipped because `username` was injected from requestScopedL1 + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, + "accounts subgraph should be called only once; the entity fetch for "+ + "authorWithoutProvides should be skipped via requestScoped injection") + + // reviews subgraph should still be called for User.reviews + reviewsCalls := tracker.GetCount(reviewsHost) + // Fuzzy: kept as a smoke-check while this test is under t.Skip pending planner + // implementation. The exact call count is planner-dependent and will be locked + // down when the test is re-enabled. + if reviewsCalls == 0 { + t.Fatalf("reviews subgraph should be called at least once for User.reviews") + } +} + +// TestRequestScopedFieldFallbackWithoutProvider verifies that when the root +// field that provides a @requestScoped value is NOT in the query, the first +// entity batch fetch populates the requestScopedL1 cache, and the second +// entity batch fetch skips the subgraph call by reading from requestScopedL1. +// +// Scenario: +// - No root field provides the @requestScoped value (no export source). +// - First entity batch fetch resolves the field normally and exports to requestScopedL1. +// - Second entity batch fetch finds the value in requestScopedL1 and skips. +// +// NOTE: This test requires the planner to generate RequestScopedFields on the +// first entity fetch when no root field is available. +func TestRequestScopedFieldFallbackWithoutProvider(t *testing.T) { + t.Skip("waiting for planner implementation: SubgraphCachingConfig does not yet include RequestScopedFields, and the planner does not yet generate RequestScopedFields on fetch configurations") + + t.Parallel() + + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: true, + EnableL2Cache: true, + }), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // Query topReviews without querying `me` first. + // This means there is no root field to export @requestScoped values. + // + // Expected flow: + // 1. Root fetch to reviews for topReviews -> returns Review list + // 2. First entity batch to accounts for authorWithoutProvides (User entities) + // -> fetches normally + exports username to requestScopedL1 + // 3. If there are additional entity batches for other User fields, + // they should find username in requestScopedL1 and skip the fetch. + // + // For the sameUserReviewers path: + // - reviews.authorWithoutProvides resolves User{id:1234} + // - reviews.sameUserReviewers @requires(fields: "username") triggers: + // a) Entity fetch to accounts for username (first batch -> fetches + exports) + // b) Entity fetch to accounts for sameUserReviewers' User entities + // -> should find username in requestScopedL1 and skip + query := `query { + topReviews { + body + authorWithoutProvides { + id + username + sameUserReviewers { + id + username + } + } + } + }` + + tracker.Reset() + defaultCache.ClearLog() + + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query, nil, t) + require.NotEmpty(t, resp) + + // Without @requestScoped: accounts would be called for: + // 1. authorWithoutProvides entity fetch (username for all review authors) + // 2. sameUserReviewers @requires entity fetch (username needed first) + // 3. sameUserReviewers result entity fetch + // + // With @requestScoped: after the first entity batch populates requestScopedL1, + // subsequent batches for the same @requestScoped field should skip. + // The exact reduction depends on how many entity batches the planner creates. + accountsCalls := tracker.GetCount(accountsHost) + + // We expect at least 1 call (the initial entity fetch) but fewer than + // the non-optimized case. The exact count depends on planner output. + if accountsCalls == 0 { + t.Fatalf("accounts should be called at least once for the initial entity fetch") + } + + // Log the actual call count for debugging during development. + t.Logf("accounts subgraph calls: %d (expected fewer with @requestScoped optimization)", accountsCalls) + t.Logf("all subgraph calls: %v", tracker.GetCounts()) +} diff --git a/execution/engine/federation_caching_root_args_test.go b/execution/engine/federation_caching_root_args_test.go new file mode 100644 index 0000000000..e62e89441d --- /dev/null +++ b/execution/engine/federation_caching_root_args_test.go @@ -0,0 +1,2708 @@ +package engine_test + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strconv" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// TestRootFieldCachingWithArgs verifies L2 caching for root fields with arguments, +// including EntityKeyMappings that derive entity-level cache keys from argument values. +func TestRootFieldCachingWithArgs(t *testing.T) { + t.Parallel() + t.Run("root field with args - miss then hit", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "user", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway(withCachingEnableART(false), withCachingLoaderCache(caches), withHTTPClient(trackingClient), withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), withSubgraphEntityCachingConfigs(subgraphCachingConfigs))) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First query - cache miss + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + assert.Equal(t, 2, len(logAfterFirst), "First query should have 2 cache operations (get miss + set)") + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"user","args":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"user","args":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "First query cache log should match") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts subgraph once") + + // Second query - cache hit + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + + logAfterSecond := defaultCache.GetLog() + assert.Equal(t, 1, len(logAfterSecond), "Second query should have 1 cache get (hit)") + wantLogSecond := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"user","args":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Second query should hit cache") + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Second query should skip accounts subgraph (cache hit)") + }) + + t.Run("root field with args - different args different keys", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "user", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway(withCachingEnableART(false), withCachingLoaderCache(caches), withHTTPClient(trackingClient), withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), withSubgraphEntityCachingConfigs(subgraphCachingConfigs))) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First query with id=1234 + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts once") + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"user","args":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"user","args":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "First query should miss cache and set") + + // Second query with id=5678 - different cache key + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "5678"}, t) + assert.Equal(t, `{"data":{"user":{"id":"5678","username":"User 5678"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Second query with different id should call accounts once") + + logAfterSecond := defaultCache.GetLog() + assert.Equal(t, 2, len(logAfterSecond), "Second query with different id should have get miss + set") + wantLog := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"user","args":{"id":"5678"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"user","args":{"id":"5678"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLog), sortCacheLogEntries(logAfterSecond), "Different args should produce different cache keys") + + // Third query with id=1234 - should hit cache from first query + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Third query (same as first) should hit cache") + + logAfterThird := defaultCache.GetLog() + wantLogThird := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"user","args":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogThird), sortCacheLogEntries(logAfterThird), "Third query should hit cache from first query") + }) + + t.Run("entity key mapping - uses entity key format", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "user", + CacheName: "default", + TTL: 30 * time.Second, + IncludeSubgraphHeaderPrefix: false, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + }, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway(withCachingEnableART(false), withCachingLoaderCache(caches), withHTTPClient(trackingClient), withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), withSubgraphEntityCachingConfigs(subgraphCachingConfigs))) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // Query with entity key mapping - should use entity key format + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + assert.Equal(t, 2, len(logAfterFirst), "Should have get miss + set") + wantLog := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLog), sortCacheLogEntries(logAfterFirst), "Should use entity key format, not root field format") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts once") + + // Second query - should hit cache using entity key + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + + logAfterSecond := defaultCache.GetLog() + assert.Equal(t, 1, len(logAfterSecond), "Second query should hit cache") + wantLogSecond := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Second query should hit entity cache key") + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Second query should skip accounts (cache hit)") + }) + + t.Run("entity key mapping - invalidation via entity key", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "user", + CacheName: "default", + TTL: 30 * time.Second, + IncludeSubgraphHeaderPrefix: false, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + }, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway(withCachingEnableART(false), withCachingLoaderCache(caches), withHTTPClient(trackingClient), withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), withSubgraphEntityCachingConfigs(subgraphCachingConfigs))) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First query - cache miss, populate + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts") + + // Delete the entity key from cache + err := defaultCache.Delete(ctx, []string{`{"__typename":"User","key":{"id":"1234"}}`}) + require.NoError(t, err) + + // Third query - should be a miss after deletion + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "After deletion, should call accounts again") + + logAfterDelete := defaultCache.GetLog() + wantLogDelete := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogDelete), sortCacheLogEntries(logAfterDelete), "After deletion: get miss + set") + }) + + t.Run("entity key mapping - cross-lookup from entity fetch", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Configure both root field entity key mapping AND entity caching for same type + // Both use same cache key format: {"__typename":"User","key":{"id":"1234"}} + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "user", + CacheName: "default", + TTL: 30 * time.Second, + IncludeSubgraphHeaderPrefix: false, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + }, + }, + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway(withCachingEnableART(false), withCachingLoaderCache(caches), withHTTPClient(trackingClient), withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), withSubgraphEntityCachingConfigs(subgraphCachingConfigs))) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First: Query user by ID (root field with entity key mapping) + // This caches under entity key {"__typename":"User","key":{"id":"1234"}} + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Root field query should call accounts once") + + // Verify root field used entity key format + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "Root field query should use entity key format") + + // Second: Query that triggers entity fetch for same User 1234 + // Both root field and entity fetch use the same cache key format. + // The root field stored entity-level data (extracted at merge path) thanks to EntityMergePath, + // so the entity fetch finds {"id":"1234","username":"Me"} → validation passes → cache HIT. + // No re-fetch needed, no SET operation. + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Entity fetch should skip accounts (cross-lookup hit: root field stored entity-level data)") + + logAfterSecond := defaultCache.GetLog() + wantLogSecond := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}}}, + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }}, + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Entity fetch should use same key format as root field entity key mapping") + }) + + t.Run("entity key mapping - cross-lookup from root field", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Configure both root field entity key mapping AND entity caching for same type + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "user", + CacheName: "default", + TTL: 30 * time.Second, + IncludeSubgraphHeaderPrefix: false, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + }, + }, + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway(withCachingEnableART(false), withCachingLoaderCache(caches), withHTTPClient(trackingClient), withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), withSubgraphEntityCachingConfigs(subgraphCachingConfigs))) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First: Query that triggers entity fetch for User 1234 (via topProducts → reviews → authorWithoutProvides) + // Entity fetch stores entity-level data: {"id":"1234","username":"Me"} + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts once for entity resolution") + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}}}, + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }}, + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "First query should miss all caches and set") + + // Second: Root field query with entity key mapping for same User 1234 + // Root field generates entity key {"__typename":"User","key":{"id":"1234"}} (same as entity fetch). + // Cache has entity-level data → EntityMergePath wraps it to response-level → validation passes → HIT. + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Root field query should skip accounts (cross-lookup hit from entity fetch)") + + logAfterSecond := defaultCache.GetLog() + wantLogSecond := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Root field should hit cache from entity fetch data") + }) + + t.Run("entity key mapping + header prefix", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + mockHeadersBuilder := &mockSubgraphHeadersBuilder{ + hashes: map[string]uint64{ + "accounts": 33333, + }, + } + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "user", + CacheName: "default", + TTL: 30 * time.Second, + IncludeSubgraphHeaderPrefix: true, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + }, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withSubgraphHeadersBuilder(mockHeadersBuilder), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + defaultCache.ClearLog() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + assert.Equal(t, 2, len(logAfterFirst), "Should have get miss + set") + wantLog := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `33333:{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `33333:{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLog), sortCacheLogEntries(logAfterFirst), "Entity key should have header prefix") + }) + + t.Run("root field without args - regression", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway(withCachingEnableART(false), withCachingLoaderCache(caches), withHTTPClient(trackingClient), withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), withSubgraphEntityCachingConfigs(subgraphCachingConfigs))) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + productsHost := productsURLParsed.Host + + // First query + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query { topProducts { name } }`, nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby"},{"name":"Fedora"}]}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(productsHost), "First query should call products once") + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "Should use root field key format (no entity key mapping)") + + // Second query - hit + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query { topProducts { name } }`, nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby"},{"name":"Fedora"}]}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(productsHost), "Second query should skip products (cache hit)") + + logAfterSecond := defaultCache.GetLog() + wantLogSecond := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Second query should hit cache") + }) + + t.Run("root field caching + entity caching nested", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "product", + CacheName: "default", + TTL: 30 * time.Second, + IncludeSubgraphHeaderPrefix: false, + }, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway(withCachingEnableART(false), withCachingLoaderCache(caches), withHTTPClient(trackingClient), withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), withSubgraphEntityCachingConfigs(subgraphCachingConfigs))) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + // Query product with nested reviews + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query { product(upc: "top-1") { name reviews { body } } }`, queryVariables{"upc": "top-1"}, t) + assert.Equal(t, `{"data":{"product":{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control."}]}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(productsHost), "First query should call products once") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "First query should call reviews once") + + logAfterFirst := defaultCache.GetLog() + // Should have root field get/set + entity get/set + assert.Equal(t, 4, len(logAfterFirst), "Should have 4 cache operations (root field get/set + entity get/set)") + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"product","args":{"upc":"top-1"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"product","args":{"upc":"top-1"}}`, TTL: 30 * time.Second}}}, + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "First query should miss both root field and entity cache") + + // Second identical query - all from cache + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query { product(upc: "top-1") { name reviews { body } } }`, queryVariables{"upc": "top-1"}, t) + assert.Equal(t, `{"data":{"product":{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control."}]}}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(productsHost), "Second query should skip products (root field cache hit)") + assert.Equal(t, 0, tracker.GetCount(reviewsHost), "Second query should skip reviews (entity cache hit)") + + logAfterSecond := defaultCache.GetLog() + wantLogSecond := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"product","args":{"upc":"top-1"}}`, Hit: true}}}, + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Second query should hit both root field and entity cache") + }) + + t.Run("TTL expiry", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "user", CacheName: "default", TTL: 100 * time.Millisecond, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway(withCachingEnableART(false), withCachingLoaderCache(caches), withHTTPClient(trackingClient), withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), withSubgraphEntityCachingConfigs(subgraphCachingConfigs))) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First query - cache miss + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts") + + // Second query immediately - cache hit + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Immediate second query should hit cache") + + // Wait for TTL to expire + time.Sleep(200 * time.Millisecond) + + // Third query after expiry - cache miss + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Query after TTL expiry should call accounts") + }) + + t.Run("concurrency with different IDs", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "user", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway(withCachingEnableART(false), withCachingLoaderCache(caches), withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), withSubgraphEntityCachingConfigs(subgraphCachingConfigs))) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Run 10 concurrent queries with different IDs + var wg sync.WaitGroup + results := make([]string, 10) + for i := range 10 { + wg.Add(1) + go func(idx int) { + defer wg.Done() + id := strconv.Itoa(idx + 1000) + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": id}, t) + results[idx] = string(resp) + }(i) + } + wg.Wait() + + // Verify all results + for i := range 10 { + id := strconv.Itoa(i + 1000) + expected := fmt.Sprintf(`{"data":{"user":{"id":"%s","username":"User %s"}}}`, id, id) + assert.Equal(t, expected, results[i], "Concurrent query %d should return correct result", i) + } + }) + + t.Run("two args - reversed argument order hits cache", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "userByIdAndName", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway(withCachingEnableART(false), withCachingLoaderCache(caches), withHTTPClient(trackingClient), withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), withSubgraphEntityCachingConfigs(subgraphCachingConfigs))) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First query: arguments in schema-defined order (id, username) + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query($id: ID!, $username: String!) { userByIdAndName(id: $id, username: $username) { id username } }`, queryVariables{"id": "1234", "username": "Me"}, t) + assert.Equal(t, `{"data":{"userByIdAndName":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts once") + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"userByIdAndName","args":{"id":"1234","username":"Me"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"userByIdAndName","args":{"id":"1234","username":"Me"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "First query cache log should match") + + // Second query: arguments in REVERSED order (username, id) + // The cache key should be identical because the planner always adds arguments + // in the order defined by the field configuration (schema order), not query order. + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query($username: String!, $id: ID!) { userByIdAndName(username: $username, id: $id) { username id } }`, queryVariables{"username": "Me", "id": "1234"}, t) + assert.Equal(t, `{"data":{"userByIdAndName":{"username":"Me","id":"1234"}}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Second query should skip accounts (cache hit)") + + logAfterSecond := defaultCache.GetLog() + wantLogSecond := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"userByIdAndName","args":{"id":"1234","username":"Me"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Second query (reversed args) should hit cache with identical key") + }) + + t.Run("root field more fields then fewer fields - cache hit (superset)", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "user", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway(withCachingEnableART(false), withCachingLoaderCache(caches), withHTTPClient(trackingClient), withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), withSubgraphEntityCachingConfigs(subgraphCachingConfigs))) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First query: fetch MORE fields (username + realName) - cache miss + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query($id: ID!) { user(id: $id) { username realName } }`, queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"username":"Me","realName":"Real Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts once") + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"user","args":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"user","args":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "First query cache log should match") + + // Second query: fetch FEWER fields (username only) - should be cache HIT + // The cached data has {username, realName}, the query only needs {username} → superset → hit + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query($id: ID!) { user(id: $id) { username } }`, queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"username":"Me"}}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Second query should skip accounts (cache hit)") + + logAfterSecond := defaultCache.GetLog() + wantLogSecond := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"user","args":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Second query (fewer fields) should be a cache HIT because cached data is a superset") + }) + + t.Run("root field fewer fields then more fields - cache miss (subset)", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "user", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway(withCachingEnableART(false), withCachingLoaderCache(caches), withHTTPClient(trackingClient), withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), withSubgraphEntityCachingConfigs(subgraphCachingConfigs))) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First query: fetch FEWER fields (username only) - cache miss + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query($id: ID!) { user(id: $id) { username } }`, queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts once") + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"user","args":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"user","args":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "First query cache log should match") + + // Second query: fetch MORE fields (username + realName) - should be cache MISS + // The cached data only has {username}, the query needs {username, realName} → subset → miss + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query($id: ID!) { user(id: $id) { username realName } }`, queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"username":"Me","realName":"Real Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Second query should call accounts (cache miss - needs more fields)") + + logAfterSecond := defaultCache.GetLog() + // The cache GET returns a hit (key exists), but validateItemHasRequiredData fails + // because the cached data is missing realName. This causes a re-fetch (tracker=1) and cache update. + wantLogSecond := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"user","args":{"id":"1234"}}`, Hit: true}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"user","args":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Second query should find stale cache entry but re-fetch because cached data is only a subset") + + // Third query: same more-fields query - should now hit cache (re-fetch populated it) + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query($id: ID!) { user(id: $id) { username realName } }`, queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"username":"Me","realName":"Real Me"}}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Third query should skip accounts (cache hit after re-fetch)") + + logAfterThird := defaultCache.GetLog() + wantLogThird := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"user","args":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogThird), sortCacheLogEntries(logAfterThird), "Third query should hit cache with full data from re-fetch") + }) + + t.Run("entity key mapping - multiple keys single mapping", func(t *testing.T) { + t.Parallel() + // User has @key(fields: "id") @key(fields: "username"), but root field user(id) + // only maps to the "id" key. Adding a second @key doesn't change behavior + // when only one key is mapped. + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "user", + CacheName: "default", + TTL: 30 * time.Second, + IncludeSubgraphHeaderPrefix: false, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + }, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway(withCachingEnableART(false), withCachingLoaderCache(caches), withHTTPClient(trackingClient), withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), withSubgraphEntityCachingConfigs(subgraphCachingConfigs))) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First query - miss, stores under single entity key + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts once") + + logAfterFirst := defaultCache.GetLog() + assert.Equal(t, 2, len(logAfterFirst), "Should have get miss + set") + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "Single mapping: only id key, not combined id+username") + + // Second query - hit via entity key + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Second query should skip accounts (cache hit)") + + logAfterSecond := defaultCache.GetLog() + assert.Equal(t, 1, len(logAfterSecond), "Second query should have single get hit") + wantLogSecond := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Should hit cache via entity key") + }) + + t.Run("entity key mapping - multiple keys multiple mappings", func(t *testing.T) { + t.Parallel() + // User has @key(fields: "id") @key(fields: "username"). + // Root field userByIdAndName(id, username) maps to BOTH keys. + // Data is stored under 2 entity keys, one per mapping. + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "userByIdAndName", + CacheName: "default", + TTL: 30 * time.Second, + IncludeSubgraphHeaderPrefix: false, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }, + }, + }, + }, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway(withCachingEnableART(false), withCachingLoaderCache(caches), withHTTPClient(trackingClient), withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), withSubgraphEntityCachingConfigs(subgraphCachingConfigs))) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First query - miss, stores under BOTH entity keys + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id_and_name.query"), queryVariables{"id": "1234", "username": "Me"}, t) + assert.Equal(t, `{"data":{"userByIdAndName":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts once") + + logAfterFirst := defaultCache.GetLog() + assert.Equal(t, 2, len(logAfterFirst), "Should have get miss + set (both keys)") + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}, + {Key: `{"__typename":"User","key":{"username":"Me"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"User","key":{"username":"Me"}}`, TTL: 30 * time.Second}, + }}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "Multiple mappings: data stored under both id and username keys") + + // Second query - hit (via either key) + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id_and_name.query"), queryVariables{"id": "1234", "username": "Me"}, t) + assert.Equal(t, `{"data":{"userByIdAndName":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Second query should skip accounts (cache hit)") + + logAfterSecond := defaultCache.GetLog() + assert.Equal(t, 1, len(logAfterSecond), "Second query should have single get hit") + wantLogSecond := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}, + {Key: `{"__typename":"User","key":{"username":"Me"}}`, Hit: true}, + }}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Both keys should hit cache") + }) + + t.Run("entity key mapping - multiple mappings partial args", func(t *testing.T) { + t.Parallel() + // Two entity key mappings configured (id and username), + // but only the id variable is provided. The username mapping + // cannot resolve → only a single entity cache key is generated. + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "user", + CacheName: "default", + TTL: 30 * time.Second, + IncludeSubgraphHeaderPrefix: false, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }, + }, + }, + }, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway(withCachingEnableART(false), withCachingLoaderCache(caches), withHTTPClient(trackingClient), withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), withSubgraphEntityCachingConfigs(subgraphCachingConfigs))) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First query - miss on id key, then response data backfills the sibling username key too + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts once") + + logAfterFirst := defaultCache.GetLog() + assert.Equal(t, 2, len(logAfterFirst), "Should have get miss + set (id key plus response-derived username key)") + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"User","key":{"username":"Me"}}`, TTL: 30 * time.Second}, + }}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "The response supplies username, so both entity keys are written") + + // Second query - hit via id key + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Second query should skip accounts (cache hit)") + + logAfterSecond := defaultCache.GetLog() + assert.Equal(t, 1, len(logAfterSecond), "Second query should have single get hit") + wantLogSecond := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Single id key should hit cache") + }) + + t.Run("entity key mapping - multiple mappings cross-lookup", func(t *testing.T) { + t.Parallel() + // Root field userByIdAndName stores under BOTH entity keys. + // Entity fetch for User uses @key(fields: "id") → finds data stored by root field. + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "userByIdAndName", + CacheName: "default", + TTL: 30 * time.Second, + IncludeSubgraphHeaderPrefix: false, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }, + }, + }, + }, + }, + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway(withCachingEnableART(false), withCachingLoaderCache(caches), withHTTPClient(trackingClient), withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), withSubgraphEntityCachingConfigs(subgraphCachingConfigs))) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First: Root field stores user under both entity keys (id and username) + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id_and_name.query"), queryVariables{"id": "1234", "username": "Me"}, t) + assert.Equal(t, `{"data":{"userByIdAndName":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Root field query should call accounts once") + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}, + {Key: `{"__typename":"User","key":{"username":"Me"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"User","key":{"username":"Me"}}`, TTL: 30 * time.Second}, + }}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "Root field should store under both id and username entity keys") + + // Second: Entity fetch for User 1234 via topProducts → reviews → authorWithoutProvides + // Entity fetch uses @key(fields: "id") → finds data stored under id key by root field + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/multiple_upstream_without_provides.query"), nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Entity fetch should skip accounts (cross-lookup hit: root field stored under id key)") + + logAfterSecond := defaultCache.GetLog() + wantLogSecond := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}}}, + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }}, + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Entity fetch should cross-lookup User via id key stored by root field") + }) + + t.Run("root field not configured - still calls subgraph", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Only configure products - not accounts + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway(withCachingEnableART(false), withCachingLoaderCache(caches), withHTTPClient(trackingClient), withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), withSubgraphEntityCachingConfigs(subgraphCachingConfigs))) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First query + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts (not cached)") + + logAfterFirst := defaultCache.GetLog() + assert.Equal(t, 0, len(logAfterFirst), "Unconfigured root field should produce no cache operations") + + // Second query - not cached, should call again + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Second query should also call accounts (not cached)") + + logAfterSecond := defaultCache.GetLog() + assert.Equal(t, 0, len(logAfterSecond), "Unconfigured root field should produce no cache operations on second query either") + }) + + t.Run("entity key mapping - two root fields asymmetric key coverage", func(t *testing.T) { + t.Parallel() + // userByIdAndName provides both args → 2 cache keys (id + username). + // user(id) provides only id → 1 cache key. + // Step 1: userByIdAndName writes under both keys. + // Step 2: user(id) reads via id key → hit from step 1. + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "userByIdAndName", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }}, + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }}, + }, + }, + { + TypeName: "Query", + FieldName: "user", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }}, + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }}, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // Step 1: userByIdAndName — both mappings resolve → 2 reads (miss), 2 writes + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id_and_name.query"), queryVariables{"id": "1234", "username": "Me"}, t) + assert.Equal(t, `{"data":{"userByIdAndName":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should call accounts once") + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}, + {Key: `{"__typename":"User","key":{"username":"Me"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"User","key":{"username":"Me"}}`, TTL: 30 * time.Second}, + }}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "Both mappings resolved: data stored under id and username keys") + + // Step 2: user(id) — only id mapping resolves → 1 read (hit via id key) + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Second query should skip accounts (cache hit via id key)") + + logAfterSecond := defaultCache.GetLog() + wantLogSecond := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "user(id) should hit cache via id key stored by userByIdAndName") + }) +} + +// TestRootFieldCachingWithArgs_PartialKeyWrite verifies that when only some EntityKeyMappings +// match the request arguments, only those matching keys are written to L2. +func TestRootFieldCachingWithArgs_PartialKeyWrite(t *testing.T) { + t.Parallel() + t.Run("entity key mapping - partial key write does not generate extra keys from response", func(t *testing.T) { + t.Parallel() + // Documents current behavior: when user(id) is queried with only the id + // mapping matching, the write stores under the id key only. + // The username key is NOT generated from the fetched response data. + // Verified via Peek: id key exists, username key does not. + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "user", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }}, + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }}, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // user(id) — id mapping resolves from args, username key is derived from the fetched response + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Should call accounts once") + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"User","key":{"username":"Me"}}`, TTL: 30 * time.Second}, + }}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "Fetched response should backfill the username key too") + + // Direct cache inspection: both keys present + _, idExists := defaultCache.Peek(`{"__typename":"User","key":{"id":"1234"}}`) + assert.True(t, idExists, "id key should be in cache") + _, usernameExists := defaultCache.Peek(`{"__typename":"User","key":{"username":"Me"}}`) + assert.True(t, usernameExists, "username key should be in cache once the response reveals it") + }) + + t.Run("entity key mapping - flat key cross-lookup from composite key write", func(t *testing.T) { + t.Parallel() + // userByIdAndName configured with flat @key(fields: "id") + composite key + // using id+username together as a single mapping. + // user(id) configured with flat @key(fields: "id") only. + // Step 1: userByIdAndName writes under both keys (flat id + composite id+username). + // Step 2: user(id) reads via flat id key → hit from step 1. + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "userByIdAndName", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }}, + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }}, + }, + }, + { + TypeName: "Query", + FieldName: "user", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }}, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // Step 1: userByIdAndName — both mappings resolve → 2 reads (miss), 2 writes + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id_and_name.query"), queryVariables{"id": "1234", "username": "Me"}, t) + assert.Equal(t, `{"data":{"userByIdAndName":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Should call accounts once") + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}, + {Key: `{"__typename":"User","key":{"id":"1234","username":"Me"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"User","key":{"id":"1234","username":"Me"}}`, TTL: 30 * time.Second}, + }}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "Both flat id and composite id+username keys written") + + // Step 2: user(id) — flat id mapping only → hit via flat id key from step 1 + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Should skip accounts (flat id key hit)") + + logAfterSecond := defaultCache.GetLog() + wantLogSecond := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Flat id key cross-lookup succeeds from composite key write") + }) +} + +// TestRootFieldCachingWithArgs_BothKeysHit verifies that when both EntityKeyMappings +// are populated, a second request hits both keys and skips the subgraph entirely. +func TestRootFieldCachingWithArgs_BothKeysHit(t *testing.T) { + t.Parallel() + + t.Run("both entity key mappings hit on second request", func(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "userByIdAndName", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }}, + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }}, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, + `query($id: ID!, $username: String!) { userByIdAndName(id: $id, username: $username) { id username } }`, + queryVariables{"id": "1234", "username": "Me"}, t) + assert.Equal(t, `{"data":{"userByIdAndName":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First query should fetch from subgraph") + + logAfterFirst := defaultCache.GetLog() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}, + {Key: `{"__typename":"User","key":{"username":"Me"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"User","key":{"username":"Me"}}`, TTL: 30 * time.Second}, + }}, + }), sortCacheLogEntries(logAfterFirst)) + + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, + `query($id: ID!, $username: String!) { userByIdAndName(id: $id, username: $username) { id username } }`, + queryVariables{"id": "1234", "username": "Me"}, t) + assert.Equal(t, `{"data":{"userByIdAndName":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Second query should skip subgraph (cache hit)") + + logAfterSecond := defaultCache.GetLog() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}, + {Key: `{"__typename":"User","key":{"username":"Me"}}`, Hit: true}, + }}, + }), sortCacheLogEntries(logAfterSecond)) + }) +} + +// TestRootFieldCachingWithArgs_SeededDifferentData verifies that when L2 has conflicting +// data under different entity key mappings, the fresher entry wins during merge. +func TestRootFieldCachingWithArgs_SeededDifferentData(t *testing.T) { + t.Parallel() + + t.Run("seeded L2 with different data under each key - fresher entry wins", func(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "userByIdAndName", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }}, + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }}, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + idKey := `{"__typename":"User","key":{"id":"1234"}}` + usernameKey := `{"__typename":"User","key":{"username":"Me"}}` + + err := defaultCache.Set(ctx, []*resolve.CacheEntry{ + {Key: idKey, Value: []byte(`{"id":"1234","username":"FreshName"}`), TTL: 30 * time.Second}, + }) + require.NoError(t, err) + err = defaultCache.Set(ctx, []*resolve.CacheEntry{ + {Key: usernameKey, Value: []byte(`{"id":"1234","username":"StaleName"}`), TTL: 10 * time.Second}, + }) + require.NoError(t, err) + + setupLog := defaultCache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: idKey, TTL: 30 * time.Second}}}, + {Operation: "set", Items: []CacheLogItem{{Key: usernameKey, TTL: 10 * time.Second}}}, + }, setupLog) + + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, + `query($id: ID!, $username: String!) { userByIdAndName(id: $id, username: $username) { id username } }`, + queryVariables{"id": "1234", "username": "Me"}, t) + + assert.Equal(t, `{"data":{"userByIdAndName":{"id":"1234","username":"FreshName"}}}`, string(resp), + "desired behavior serves the freshest cached entry when both keys hit") + assert.Equal(t, 0, tracker.GetCount(accountsHost), + "Should skip subgraph fetch since the selected cached entry passes validation") + + idData, idExists := defaultCache.Peek(idKey) + assert.True(t, idExists) + assert.Equal(t, `{"id":"1234","username":"FreshName"}`, string(idData)) + usernameData, usernameExists := defaultCache.Peek(usernameKey) + assert.True(t, usernameExists) + assert.Equal(t, `{"id":"1234","username":"StaleName"}`, string(usernameData)) + + logAfterQuery := defaultCache.GetLog() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}, + {Key: `{"__typename":"User","key":{"username":"Me"}}`, Hit: true}, + }}, + }), sortCacheLogEntries(logAfterQuery)) + }) +} + +// TestRootFieldCachingWithArgs_ComplementaryPartialData verifies that two partial cache entries +// under different entity key mappings are merged into a complete hit, skipping the subgraph. +func TestRootFieldCachingWithArgs_ComplementaryPartialData(t *testing.T) { + t.Parallel() + + t.Run("complementary partial data merges into a complete cache hit", func(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "userByIdAndName", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }}, + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }}, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + idKey := `{"__typename":"User","key":{"id":"1234"}}` + usernameKey := `{"__typename":"User","key":{"username":"Me"}}` + + err := defaultCache.Set(ctx, []*resolve.CacheEntry{ + {Key: idKey, Value: []byte(`{"id":"1234","username":"Me"}`), TTL: 20 * time.Second}, + }) + require.NoError(t, err) + err = defaultCache.Set(ctx, []*resolve.CacheEntry{ + {Key: usernameKey, Value: []byte(`{"id":"1234","nickname":"nick-Me"}`), TTL: 30 * time.Second}, + }) + require.NoError(t, err) + + setupLog := defaultCache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: idKey, TTL: 20 * time.Second}}}, + {Operation: "set", Items: []CacheLogItem{{Key: usernameKey, TTL: 30 * time.Second}}}, + }, setupLog) + + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, + `query($id: ID!, $username: String!) { userByIdAndName(id: $id, username: $username) { id username nickname } }`, + queryVariables{"id": "1234", "username": "Me"}, t) + + assert.Equal(t, `{"data":{"userByIdAndName":{"id":"1234","username":"Me","nickname":"nick-Me"}}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost), + "desired behavior merges complementary cache hits and skips the subgraph fetch") + + logAfterQuery := defaultCache.GetLog() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: idKey, Hit: true}, + {Key: usernameKey, Hit: true}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: idKey, TTL: 30 * time.Second}, + {Key: usernameKey, TTL: 30 * time.Second}, + }}, + }), sortCacheLogEntries(logAfterQuery)) + + idData, idExists := defaultCache.Peek(idKey) + assert.True(t, idExists) + assert.Equal(t, `{"id":"1234","username":"Me","nickname":"nick-Me"}`, string(idData)) + usernameData, usernameExists := defaultCache.Peek(usernameKey) + assert.True(t, usernameExists) + assert.Equal(t, `{"id":"1234","username":"Me","nickname":"nick-Me"}`, string(usernameData)) + }) +} + +// TestRootFieldCachingWithArgs_KeyPopulationAndBackfill verifies that a full-args query +// populates all entity key mappings, and subsequent single-arg queries hit the correct key. +func TestRootFieldCachingWithArgs_KeyPopulationAndBackfill(t *testing.T) { + t.Parallel() + + t.Run("5a - full arg query populates both keys verified via Peek", func(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "userByIdAndName", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }}, + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }}, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, + `query($id: ID!, $username: String!) { userByIdAndName(id: $id, username: $username) { id username } }`, + queryVariables{"id": "1234", "username": "Me"}, t) + assert.Equal(t, `{"data":{"userByIdAndName":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Should fetch from subgraph") + + logAfterQuery := defaultCache.GetLog() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}, + {Key: `{"__typename":"User","key":{"username":"Me"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"User","key":{"username":"Me"}}`, TTL: 30 * time.Second}, + }}, + }), sortCacheLogEntries(logAfterQuery)) + + idData, idExists := defaultCache.Peek(`{"__typename":"User","key":{"id":"1234"}}`) + assert.True(t, idExists, "id key should exist after full-arg query") + assert.Equal(t, `{"id":"1234","username":"Me"}`, string(idData)) + + usernameData, usernameExists := defaultCache.Peek(`{"__typename":"User","key":{"username":"Me"}}`) + assert.True(t, usernameExists, "username key should exist after full-arg query") + assert.Equal(t, `{"id":"1234","username":"Me"}`, string(usernameData)) + }) + + t.Run("5b - partial arg query backfills username key from response", func(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "user", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }}, + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }}, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, + `query($id: ID!) { user(id: $id) { id username } }`, + queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Should fetch from subgraph") + + logAfterQuery := defaultCache.GetLog() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"User","key":{"username":"Me"}}`, TTL: 30 * time.Second}, + }}, + }), sortCacheLogEntries(logAfterQuery)) + + idData, idExists := defaultCache.Peek(`{"__typename":"User","key":{"id":"1234"}}`) + assert.True(t, idExists, "id key should exist") + assert.Equal(t, `{"id":"1234","username":"Me"}`, string(idData)) + usernameData, usernameExists := defaultCache.Peek(`{"__typename":"User","key":{"username":"Me"}}`) + assert.True(t, usernameExists, "username key should be backfilled from the fetched response") + assert.Equal(t, `{"id":"1234","username":"Me"}`, string(usernameData)) + }) +} + +// TestRootFieldCachingWithArgs_BackfillAfterPartialHit verifies that a cache hit on one +// entity key mapping backfills the missing sibling key when the cached entity has the data. +func TestRootFieldCachingWithArgs_BackfillAfterPartialHit(t *testing.T) { + t.Parallel() + + // Scenario: the root field asks for id + username keys, only the id key is in + // L2, and that cached entity already contains username. The request should be + // served from cache, the missing username key should be backfilled, and the + // existing id key should not be rewritten. + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "userByIdAndName", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }}, + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }}, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + idKey := `{"__typename":"User","key":{"id":"1234"}}` + usernameKey := `{"__typename":"User","key":{"username":"Me"}}` + + // Seed only the id key with an entity that already proves username. + err := defaultCache.Set(ctx, []*resolve.CacheEntry{ + {Key: idKey, Value: []byte(`{"id":"1234","username":"Me"}`), TTL: 20 * time.Second}, + }) + require.NoError(t, err) + + setupLog := defaultCache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: idKey, TTL: 20 * time.Second}}}, + }, setupLog) + + defaultCache.ClearLog() + tracker.Reset() + // Make the root-field request that asks for both id and username mappings. + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, + `query($id: ID!, $username: String!) { userByIdAndName(id: $id, username: $username) { id username } }`, + queryVariables{"id": "1234", "username": "Me"}, t) + + assert.Equal(t, `{"data":{"userByIdAndName":{"id":"1234","username":"Me"}}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost)) + + // Assert the exact cache story: + // 1. L2 reads both requested keys and finds only id. + // 2. L2 writes only the missing username key. + logAfterQuery := defaultCache.GetLog() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: idKey, Hit: true}, + {Key: usernameKey, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{{Key: usernameKey, TTL: 30 * time.Second}}}, + }), sortCacheLogEntries(logAfterQuery)) + + // Assert the pre-existing id entry is unchanged and the username key now points + // at the same entity payload. + idData, idExists := defaultCache.Peek(idKey) + assert.True(t, idExists) + assert.Equal(t, `{"id":"1234","username":"Me"}`, string(idData)) + usernameData, usernameExists := defaultCache.Peek(usernameKey) + assert.True(t, usernameExists, "cache-hit serve should backfill the missing sibling key") + assert.Equal(t, `{"id":"1234","username":"Me"}`, string(usernameData)) +} + +// TestRootFieldCachingWithArgs_BackfillRequiresFieldProof verifies that a missing sibling key +// is NOT backfilled when the cached entity lacks the field needed for that key mapping. +func TestRootFieldCachingWithArgs_BackfillRequiresFieldProof(t *testing.T) { + t.Parallel() + + // Scenario: the root field asks for id + username keys, only the id key is in + // L2, and the cached entity does not contain username. The request can still be + // served from cache because it asks for id only, but the missing username key + // must not be backfilled from request args alone. + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "userByIdAndName", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }}, + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }}, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + idKey := `{"__typename":"User","key":{"id":"1234"}}` + usernameKey := `{"__typename":"User","key":{"username":"Me"}}` + + // Seed only the id key and deliberately omit username from the cached entity. + err := defaultCache.Set(ctx, []*resolve.CacheEntry{ + {Key: idKey, Value: []byte(`{"id":"1234"}`), TTL: 20 * time.Second}, + }) + require.NoError(t, err) + + setupLog := defaultCache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: idKey, TTL: 20 * time.Second}}}, + }, setupLog) + + defaultCache.ClearLog() + tracker.Reset() + // Make a request that only needs id in the response, so the cache-only path is still valid. + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, + `query($id: ID!, $username: String!) { userByIdAndName(id: $id, username: $username) { id } }`, + queryVariables{"id": "1234", "username": "Me"}, t) + + assert.Equal(t, `{"data":{"userByIdAndName":{"id":"1234"}}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost)) + + // Assert the exact cache story: + // 1. L2 reads both requested keys and finds only id. + // 2. No write happens because the cached entity never proves username. + logAfterQuery := defaultCache.GetLog() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: idKey, Hit: true}, + {Key: usernameKey, Hit: false}, + }}, + }), sortCacheLogEntries(logAfterQuery)) + + // Assert the id entry remains as seeded and the username key stays absent. + idData, idExists := defaultCache.Peek(idKey) + assert.True(t, idExists) + assert.Equal(t, `{"id":"1234"}`, string(idData)) + _, usernameExists := defaultCache.Peek(usernameKey) + assert.False(t, usernameExists, "missing sibling key must not be backfilled from request args alone") +} + +// TestRootFieldCachingWithArgs_DerivedKeyExpansionAfterFetch verifies that after a subgraph fetch, +// all entity key mappings are populated including derived keys not in the request arguments. +func TestRootFieldCachingWithArgs_DerivedKeyExpansionAfterFetch(t *testing.T) { + t.Parallel() + + // Scenario: the root field asks for id + username keys, but the cache config + // also has a third nickname mapping. Only id is seeded, so the fetch runs. The + // fetched entity should refresh id, backfill username, and add the extra + // nickname key derived from final entity data. + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "userByIdAndName", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }}, + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }}, + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "nickname", ArgumentPath: []string{"nickname"}}, + }}, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + idKey := `{"__typename":"User","key":{"id":"1234"}}` + usernameKey := `{"__typename":"User","key":{"username":"Me"}}` + nicknameKey := `{"__typename":"User","key":{"nickname":"nick-Me"}}` + + // Seed only the id key so the request has one cache hit and one requested miss. + err := defaultCache.Set(ctx, []*resolve.CacheEntry{ + {Key: idKey, Value: []byte(`{"id":"1234"}`), TTL: 20 * time.Second}, + }) + require.NoError(t, err) + + setupLog := defaultCache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: idKey, TTL: 20 * time.Second}}}, + }, setupLog) + + defaultCache.ClearLog() + tracker.Reset() + // Make the root-field request. The response returns id, username, and nickname. + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, + `query($id: ID!, $username: String!) { userByIdAndName(id: $id, username: $username) { id username nickname } }`, + queryVariables{"id": "1234", "username": "Me"}, t) + + assert.Equal(t, `{"data":{"userByIdAndName":{"id":"1234","username":"Me","nickname":"nick-Me"}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(accountsHost)) + + // Assert the exact cache story: + // 1. L2 reads the requested id + username keys and finds only id. + // 2. The fetch writes id refresh + username backfill + nickname derived key. + logAfterQuery := defaultCache.GetLog() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: idKey, Hit: true}, + {Key: usernameKey, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: idKey, TTL: 30 * time.Second}, + {Key: usernameKey, TTL: 30 * time.Second}, + {Key: nicknameKey, TTL: 30 * time.Second}, + }}, + }), sortCacheLogEntries(logAfterQuery)) + + // Assert all three keys now point at the same final entity payload. + idData, idExists := defaultCache.Peek(idKey) + assert.True(t, idExists) + assert.Equal(t, `{"id":"1234","username":"Me","nickname":"nick-Me"}`, string(idData)) + usernameData, usernameExists := defaultCache.Peek(usernameKey) + assert.True(t, usernameExists) + assert.Equal(t, `{"id":"1234","username":"Me","nickname":"nick-Me"}`, string(usernameData)) + nicknameData, nicknameExists := defaultCache.Peek(nicknameKey) + assert.True(t, nicknameExists) + assert.Equal(t, `{"id":"1234","username":"Me","nickname":"nick-Me"}`, string(nicknameData)) +} + +// TestRootFieldCachingWithArgs_FallbackAfterPartialSelection verifies that when multiple +// cached entries exist but disagree, the system falls back to a subgraph fetch. +func TestRootFieldCachingWithArgs_FallbackAfterPartialSelection(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "userByIdAndName", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }}, + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }}, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + err := defaultCache.Set(ctx, []*resolve.CacheEntry{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, Value: []byte(`{"id":"1234","username":"Me","nickname":"nick-Me"}`), TTL: 10 * time.Second}, + }) + require.NoError(t, err) + err = defaultCache.Set(ctx, []*resolve.CacheEntry{ + {Key: `{"__typename":"User","key":{"username":"Me"}}`, Value: []byte(`{"id":"1234"}`), TTL: 30 * time.Second}, + }) + require.NoError(t, err) + + setupLog := defaultCache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 10 * time.Second}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"username":"Me"}}`, TTL: 30 * time.Second}}}, + }, setupLog) + + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, + `query($id: ID!, $username: String!) { userByIdAndName(id: $id, username: $username) { id username nickname } }`, + queryVariables{"id": "1234", "username": "Me"}, t) + + assert.Equal(t, `{"data":{"userByIdAndName":{"id":"1234","username":"Me","nickname":"nick-Me"}}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost), "desired behavior resolves fresh-incomplete vs stale-complete from cache without a fetch") + + logAfterQuery := defaultCache.GetLog() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}, + {Key: `{"__typename":"User","key":{"username":"Me"}}`, Hit: true}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"User","key":{"username":"Me"}}`, TTL: 30 * time.Second}, + }}, + }), sortCacheLogEntries(logAfterQuery)) + + idData, idExists := defaultCache.Peek(`{"__typename":"User","key":{"id":"1234"}}`) + assert.True(t, idExists) + assert.Equal(t, `{"id":"1234","username":"Me","nickname":"nick-Me"}`, string(idData)) + usernameData, usernameExists := defaultCache.Peek(`{"__typename":"User","key":{"username":"Me"}}`) + assert.True(t, usernameExists) + assert.Equal(t, `{"id":"1234","username":"Me","nickname":"nick-Me"}`, string(usernameData)) +} + +// TestRootFieldCachingWithArgs_MergeConflictWholeEntrySelection verifies that when the merge +// selects the whole entry (rather than individual fields), the result is consistent. +func TestRootFieldCachingWithArgs_MergeConflictWholeEntrySelection(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "userByIdAndName", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }}, + {EntityTypeName: "User", FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }}, + }, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + idKey := `{"__typename":"User","key":{"id":"1234"}}` + usernameKey := `{"__typename":"User","key":{"username":"Me"}}` + + err := defaultCache.Set(ctx, []*resolve.CacheEntry{ + {Key: idKey, Value: []byte(`{"id":"1234","username":"OldName"}`), TTL: 20 * time.Second}, + }) + require.NoError(t, err) + err = defaultCache.Set(ctx, []*resolve.CacheEntry{ + {Key: usernameKey, Value: []byte(`{"id":"1234","username":"Me","nickname":"nick-Me"}`), TTL: 30 * time.Second}, + }) + require.NoError(t, err) + + setupLog := defaultCache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: idKey, TTL: 20 * time.Second}}}, + {Operation: "set", Items: []CacheLogItem{{Key: usernameKey, TTL: 30 * time.Second}}}, + }, setupLog) + + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, + `query($id: ID!, $username: String!) { userByIdAndName(id: $id, username: $username) { id username nickname } }`, + queryVariables{"id": "1234", "username": "Me"}, t) + + // This fixture is intentionally black-box: the desired observable outcome is that the + // fresher overlapping username value wins and the complementary nickname is retained. + assert.Equal(t, `{"data":{"userByIdAndName":{"id":"1234","username":"Me","nickname":"nick-Me"}}}`, string(resp)) + assert.Equal(t, 0, tracker.GetCount(accountsHost)) + + logAfterQuery := defaultCache.GetLog() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: idKey, Hit: true}, + {Key: usernameKey, Hit: true}, + }}, + }), sortCacheLogEntries(logAfterQuery)) + + idData, idExists := defaultCache.Peek(idKey) + assert.True(t, idExists) + assert.Equal(t, `{"id":"1234","username":"OldName"}`, string(idData)) + usernameData, usernameExists := defaultCache.Peek(usernameKey) + assert.True(t, usernameExists) + assert.Equal(t, `{"id":"1234","username":"Me","nickname":"nick-Me"}`, string(usernameData)) +} + +// TestRootFieldEntityCacheMerge verifies that when a query crosses two subgraphs +// (accounts via root field with entity key mapping, reviews via entity resolution), +// both subgraphs write entity cache entries on the first request, and the second +// request hits the cache for both without making any subgraph calls. +// This tests that root field entity writes merge with existing entity data rather +// than clobbering it. +func TestRootFieldEntityCacheMerge(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + // Configure accounts with root field entity key mapping AND entity caching, + // and reviews with entity caching for User type. + // Both share entity type User with cache name "default". + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "user", + CacheName: "default", + TTL: 30 * time.Second, + IncludeSubgraphHeaderPrefix: false, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + }, + }, + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway(withCachingEnableART(false), withCachingLoaderCache(caches), withHTTPClient(trackingClient), withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), withSubgraphEntityCachingConfigs(subgraphCachingConfigs))) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + reviewsHost := reviewsURLParsed.Host + + // First request: query that crosses both subgraphs → cache MISS for both → both write entity entries + // user(id) root field fetches from accounts, reviews field triggers entity resolution from reviews + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id_with_reviews.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me","reviews":[{"body":"A highly effective form of birth control."},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits."}]}}}`, string(resp)) + + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First request should call accounts subgraph once") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "First request should call reviews subgraph once") + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "First request should miss root field cache, set it, then entity fetch should merge") + + // Second request: same query → cache HIT for both subgraphs (entity data merged, not clobbered) + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id_with_reviews.query"), queryVariables{"id": "1234"}, t) + assert.Equal(t, `{"data":{"user":{"id":"1234","username":"Me","reviews":[{"body":"A highly effective form of birth control."},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits."}]}}}`, string(resp)) + + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Second request should skip accounts subgraph (cache hit)") + assert.Equal(t, 0, tracker.GetCount(reviewsHost), "Second request should skip reviews subgraph (cache hit)") + + logAfterSecond := defaultCache.GetLog() + wantLogSecond := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Second request should hit cache for both root field and entity resolution") +} + +// TestRootFieldCachingCompositeKeyInputObject verifies that root field caching works +// with composite entity keys mapped via multiple argument paths (simulating @is directive +// mapping with input object arguments). The cache key includes both "id" and "username" +// fields, so different argument combinations produce different cache entries. +func TestRootFieldCachingCompositeKeyInputObject(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "userByIdAndName", + CacheName: "default", + TTL: 30 * time.Second, + IncludeSubgraphHeaderPrefix: false, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }, + }, + }, + }, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway(withCachingEnableART(false), withCachingLoaderCache(caches), withHTTPClient(trackingClient), withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), withSubgraphEntityCachingConfigs(subgraphCachingConfigs))) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // First request: cache miss → subgraph called → entity key written + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id_and_name.query"), queryVariables{"id": "1234", "username": "Me"}, t) + assert.Equal(t, `{"data":{"userByIdAndName":{"id":"1234","username":"Me"}}}`, string(resp)) + + assert.Equal(t, 1, tracker.GetCount(accountsHost), "First request should call accounts subgraph once") + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234","username":"Me"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234","username":"Me"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst), "First request should miss cache and set entity key with composite key") + + // Second request: same args → cache hit → subgraph NOT called + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id_and_name.query"), queryVariables{"id": "1234", "username": "Me"}, t) + assert.Equal(t, `{"data":{"userByIdAndName":{"id":"1234","username":"Me"}}}`, string(resp)) + + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Second request should skip accounts subgraph (cache hit)") + + logAfterSecond := defaultCache.GetLog() + wantLogSecond := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234","username":"Me"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond), "Second request should hit cache for composite key") + + // Third request: different args → cache miss → subgraph called + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.Query(ctx, setup.GatewayServer.URL, cachingTestQueryPath("queries/user_by_id_and_name.query"), queryVariables{"id": "1234", "username": "Other"}, t) + assert.Equal(t, `{"data":{"userByIdAndName":{"id":"1234","username":"Other"}}}`, string(resp)) + + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Third request with different args should call accounts subgraph") + + logAfterThird := defaultCache.GetLog() + wantLogThird := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234","username":"Other"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234","username":"Other"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogThird), sortCacheLogEntries(logAfterThird), "Third request should miss cache due to different username in composite key") +} diff --git a/execution/engine/federation_caching_root_entity_test.go b/execution/engine/federation_caching_root_entity_test.go new file mode 100644 index 0000000000..9a7a6e49ae --- /dev/null +++ b/execution/engine/federation_caching_root_entity_test.go @@ -0,0 +1,505 @@ +package engine_test + +import ( + "bytes" + "context" + "net/http" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + reviews "github.com/wundergraph/graphql-go-tools/execution/federationtesting/reviews/graph" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// TestRootFieldEntityKeyMappingCacheSharing tests that a root field with EntityKeyMappings +// shares cache keys with entity fetches from another subgraph. +// +// Scenario (mirrors failing cosmo router test): +// - "products" subgraph: root field product(upc: "top-1") → {upc, name, price} +// - "reviews" subgraph: entity fetch Product._entities(upc: "top-1") → {reviews: [...]} +// - Root field uses EntityKeyMappings so L2 key = {"__typename":"Product","key":{"upc":"top-1"}} +// - Second request should hit L2 cache for both fetches (no subgraph calls) +// +// Root cause: EntityKeyMapping.ArgumentPath used the schema argument name ("upc"), +// but after variable extraction the actual variable in ctx.Variables has a normalized +// sequential name ("a"). The planner resolves this mismatch by looking up the actual +// ContextVariable path from the root field's tracked arguments. +func TestRootFieldEntityKeyMappingCacheSharing(t *testing.T) { + t.Parallel() + + t.Run("root field with EntityKeyMappings L2 hit on second request", func(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + }), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "product", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "Product", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "upc", ArgumentPath: []string{"upc"}}, + }, + }, + }, + }, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsURLParsed, err := url.Parse(setup.ProductsUpstreamServer.URL) + require.NoError(t, err) + reviewsURLParsed, err := url.Parse(setup.ReviewsUpstreamServer.URL) + require.NoError(t, err) + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + productKey := `{"__typename":"Product","key":{"upc":"top-1"}}` + + // Request 1: cache miss → both subgraphs called + defaultCache.ClearLog() + tracker.Reset() + resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { product(upc: "top-1") { upc name reviews { body } } }`, nil, t) + assert.Equal(t, `{"data":{"product":{"upc":"top-1","name":"Trilby","reviews":[{"body":"A highly effective form of birth control."}]}}}`, string(resp)) + + assert.Equal(t, 1, tracker.GetCount(productsHost), "first request should call products subgraph once") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "first request should call reviews subgraph once") + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: false}}}, // Products root field: cold cache, cache miss + {Operation: "set", Items: []CacheLogItem{{Key: productKey, TTL: 30 * time.Second}}}, // Products root field: write products payload under shared key + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: true}}}, // Reviews entity fetch: hits the shared root payload written above + {Operation: "set", Items: []CacheLogItem{{Key: productKey, TTL: 30 * time.Second}}}, // Reviews entity fetch: merge reviews payload into shared key + }), sortCacheLogEntries(defaultCache.GetLog())) + + // Request 2: should hit cache → neither subgraph called + defaultCache.ClearLog() + tracker.Reset() + resp2, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { product(upc: "top-1") { upc name reviews { body } } }`, nil, t) + assert.Equal(t, string(resp), string(resp2), "both requests should return identical responses") + + assert.Equal(t, 0, tracker.GetCount(productsHost), "second request should NOT call products subgraph (root field entity cache hit)") + assert.Equal(t, 0, tracker.GetCount(reviewsHost), "second request should NOT call reviews subgraph (entity cache hit)") + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: true}}}, // Products root field: cache hit, skip subgraph + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: true}}}, // Reviews entity fetch: cache hit on shared key, skip subgraph + }, defaultCache.GetLog()) + }) + + t.Run("shadow mode with EntityKeyMappings always calls subgraph", func(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + }), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "product", + CacheName: "default", + TTL: 30 * time.Second, + ShadowMode: true, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "Product", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "upc", ArgumentPath: []string{"upc"}}, + }, + }, + }, + }, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsURLParsed, err := url.Parse(setup.ProductsUpstreamServer.URL) + require.NoError(t, err) + reviewsURLParsed, err := url.Parse(setup.ReviewsUpstreamServer.URL) + require.NoError(t, err) + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + productKey := `{"__typename":"Product","key":{"upc":"top-1"}}` + + // Request 1: cache miss → subgraph called, shadow write populates cache + defaultCache.ClearLog() + tracker.Reset() + gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { product(upc: "top-1") { upc name reviews { body } } }`, nil, t) + assert.Equal(t, 1, tracker.GetCount(productsHost), "first request should call products subgraph") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "first request should call reviews subgraph") + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: false}}}, // Products root field (shadow): cold cache shadow read, miss + {Operation: "set", Items: []CacheLogItem{{Key: productKey, TTL: 30 * time.Second}}}, // Products root field (shadow): shadow write of products payload + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: true}}}, // Reviews entity fetch (non-shadow): hits the shared shadow-written key + {Operation: "set", Items: []CacheLogItem{{Key: productKey, TTL: 30 * time.Second}}}, // Reviews entity fetch (non-shadow): merge reviews payload under shared key + }), sortCacheLogEntries(defaultCache.GetLog())) + + // Request 2: shadow mode → subgraph MUST be called again (shadow read happens but is not served) + defaultCache.ClearLog() + tracker.Reset() + gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { product(upc: "top-1") { upc name reviews { body } } }`, nil, t) + assert.Equal(t, 1, tracker.GetCount(productsHost), "shadow mode should always call products subgraph") + assert.Equal(t, 0, tracker.GetCount(reviewsHost), "reviews entity cache is non-shadow, so second request should hit cache") + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: true}}}, // Products root field (shadow): hit, but shadow mode ignores the cached value + {Operation: "set", Items: []CacheLogItem{{Key: productKey, TTL: 30 * time.Second}}}, // Products root field (shadow): shadow re-write after subgraph call + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: true}}}, // Reviews entity fetch (non-shadow): cache hit, skip subgraph + }), sortCacheLogEntries(defaultCache.GetLog())) + }) + + t.Run("root field with EntityKeyMappings caches nullable negative entity response without nulling root object", func(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + reviewsInterceptor := newSubgraphResponseInterceptor(reviews.GraphQLEndpointHandler(reviews.TestOptions)) + reviewsInterceptor.SetModifier(func(body []byte) []byte { + if bytes.Contains(body, []byte(`"_service"`)) { + return body + } + return []byte(`{"data":{"_entities":[null]}}`) + }) + + setup := newFederationSetupWithReviewInterceptor(reviewsInterceptor, addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + }), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "product", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "Product", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "upc", ArgumentPath: []string{"upc"}}, + }, + }, + }, + }, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + { + TypeName: "Product", + CacheName: "default", + TTL: 30 * time.Second, + NegativeCacheTTL: 10 * time.Second, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsURLParsed, err := url.Parse(setup.ProductsUpstreamServer.URL) + require.NoError(t, err) + reviewsURLParsed, err := url.Parse(setup.ReviewsUpstreamServer.URL) + require.NoError(t, err) + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + query := `query { product(upc: "top-1") { upc name reviews { body } } }` + expected := `{"data":{"product":{"upc":"top-1","name":"Trilby","reviews":null}}}` + productKey := `{"__typename":"Product","key":{"upc":"top-1"}}` + + defaultCache.ClearLog() + tracker.Reset() + resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + assert.Equal(t, expected, string(resp)) + assert.Equal(t, 1, tracker.GetCount(productsHost), "first request should call products subgraph") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "first request should call reviews subgraph") + + storedValue, exists := defaultCache.Peek(productKey) + assert.True(t, exists, "shared entity/root cache key should be populated") + assert.Equal(t, compactJSONForAssert(t, `{"__typename":"Product","upc":"top-1","name":"Trilby","reviews":null}`), compactJSONForAssert(t, string(storedValue))) + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: false}}}, // Products root field: cold cache, cache miss + {Operation: "set", Items: []CacheLogItem{{Key: productKey, TTL: 30 * time.Second}}}, // Products root field: write positive payload under shared key with 30s TTL + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: true}}}, // Reviews entity fetch: hits the shared root payload written above + {Operation: "set", Items: []CacheLogItem{{Key: productKey, TTL: 10 * time.Second}}}, // Reviews entity fetch: merge reviews:null negative payload with 10s NegativeCacheTTL + }), sortCacheLogEntries(defaultCache.GetLog())) + + defaultCache.ClearLog() + tracker.Reset() + resp2, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + assert.Equal(t, expected, string(resp2)) + assert.Equal(t, 0, tracker.GetCount(productsHost), "second request should skip products subgraph on shared-key root cache hit") + assert.Equal(t, 0, tracker.GetCount(reviewsHost), "second request should skip reviews subgraph: reviews:null lives inside the shared root payload, so this is an object-shaped cache hit, not a TypeNull negative-sentinel hit") + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: true}}}, + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: true}}}, + }, defaultCache.GetLog()) + }) + + t.Run("root field with EntityKeyMappings reuses cached nullable negative field for narrower follow-up query", func(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + reviewsInterceptor := newSubgraphResponseInterceptor(reviews.GraphQLEndpointHandler(reviews.TestOptions)) + reviewsInterceptor.SetModifier(func(body []byte) []byte { + if bytes.Contains(body, []byte(`"_service"`)) { + return body + } + return []byte(`{"data":{"_entities":[null]}}`) + }) + + setup := newFederationSetupWithReviewInterceptor(reviewsInterceptor, addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + }), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "product", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "Product", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "upc", ArgumentPath: []string{"upc"}}, + }, + }, + }, + }, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + { + TypeName: "Product", + CacheName: "default", + TTL: 30 * time.Second, + NegativeCacheTTL: 10 * time.Second, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsURLParsed, err := url.Parse(setup.ProductsUpstreamServer.URL) + require.NoError(t, err) + reviewsURLParsed, err := url.Parse(setup.ReviewsUpstreamServer.URL) + require.NoError(t, err) + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + seedQuery := `query { product(upc: "top-1") { upc name reviews { body } } }` + followUpQuery := `query { product(upc: "top-1") { upc reviews { body } } }` + productKey := `{"__typename":"Product","key":{"upc":"top-1"}}` + + defaultCache.ClearLog() + tracker.Reset() + resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, seedQuery, nil, t) + assert.Equal(t, `{"data":{"product":{"upc":"top-1","name":"Trilby","reviews":null}}}`, string(resp)) + assert.Equal(t, 1, tracker.GetCount(productsHost), "seed request should call products subgraph") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "seed request should call reviews subgraph") + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: productKey, TTL: 30 * time.Second}}}, + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: true}}}, + {Operation: "set", Items: []CacheLogItem{{Key: productKey, TTL: 10 * time.Second}}}, + }), sortCacheLogEntries(defaultCache.GetLog())) + storedValue, exists := defaultCache.Peek(productKey) + assert.True(t, exists, "shared entity/root cache key should be populated after the seed request") + assert.Equal(t, compactJSONForAssert(t, `{"__typename":"Product","upc":"top-1","name":"Trilby","reviews":null}`), compactJSONForAssert(t, string(storedValue))) + + defaultCache.ClearLog() + tracker.Reset() + resp2, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, followUpQuery, nil, t) + assert.Equal(t, `{"data":{"product":{"upc":"top-1","reviews":null}}}`, string(resp2)) + assert.Equal(t, 0, tracker.GetCount(productsHost), "follow-up query should skip products subgraph on shared-key root cache hit") + assert.Equal(t, 0, tracker.GetCount(reviewsHost), "follow-up query should skip reviews subgraph: reviews:null is already stored as a field inside the shared root payload (object-shaped hit, not a TypeNull sentinel)") + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: true}}}, + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: true}}}, + }, defaultCache.GetLog()) + }) + + t.Run("root field with EntityKeyMappings does not cache nullable negative entity response when NegativeCacheTTL is unset", func(t *testing.T) { + t.Parallel() + + defaultCache := NewFakeLoaderCache() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + reviewsInterceptor := newSubgraphResponseInterceptor(reviews.GraphQLEndpointHandler(reviews.TestOptions)) + reviewsInterceptor.SetModifier(func(body []byte) []byte { + if bytes.Contains(body, []byte(`"_service"`)) { + return body + } + return []byte(`{"data":{"_entities":[null]}}`) + }) + + setup := newFederationSetupWithReviewInterceptor(reviewsInterceptor, addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{ + EnableL1Cache: false, + EnableL2Cache: true, + }), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "product", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "Product", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "upc", ArgumentPath: []string{"upc"}}, + }, + }, + }, + }, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + { + TypeName: "Product", + CacheName: "default", + TTL: 30 * time.Second, + }, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsURLParsed, err := url.Parse(setup.ProductsUpstreamServer.URL) + require.NoError(t, err) + reviewsURLParsed, err := url.Parse(setup.ReviewsUpstreamServer.URL) + require.NoError(t, err) + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + query := `query { product(upc: "top-1") { upc name reviews { body } } }` + expected := `{"data":{"product":{"upc":"top-1","name":"Trilby","reviews":null}}}` + productKey := `{"__typename":"Product","key":{"upc":"top-1"}}` + + defaultCache.ClearLog() + tracker.Reset() + resp, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + assert.Equal(t, expected, string(resp)) + assert.Equal(t, 1, tracker.GetCount(productsHost), "first request should call products subgraph") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "first request should call reviews subgraph") + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: productKey, TTL: 30 * time.Second}}}, + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: true}}}, + }, defaultCache.GetLog()) + + storedValue, exists := defaultCache.Peek(productKey) + assert.True(t, exists, "shared entity/root cache key should still hold the positive root payload") + assert.Equal(t, compactJSONForAssert(t, `{"__typename":"Product","upc":"top-1","name":"Trilby"}`), compactJSONForAssert(t, string(storedValue))) + + defaultCache.ClearLog() + tracker.Reset() + resp2, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, query, nil, t) + assert.Equal(t, expected, string(resp2)) + assert.Equal(t, 0, tracker.GetCount(productsHost), "second request should skip products subgraph on shared-key root cache hit") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "second request should call reviews subgraph again when negative caching is disabled") + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: true}}}, + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: true}}}, + }, defaultCache.GetLog()) + }) +} diff --git a/execution/engine/federation_caching_root_split_test.go b/execution/engine/federation_caching_root_split_test.go new file mode 100644 index 0000000000..67ee6779f0 --- /dev/null +++ b/execution/engine/federation_caching_root_split_test.go @@ -0,0 +1,441 @@ +package engine_test + +import ( + "context" + "net/http" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// TestRootFieldSplitByDatasource verifies that when multiple root fields are split across +// different datasource fetches, each fetch gets its own cache entry and key. +func TestRootFieldSplitByDatasource(t *testing.T) { + t.Parallel() + + // Verifies two cached root fields on the same subgraph are isolated into + // separate L2 entries; a warm request should skip both subgraph fetches. + t.Run("two cached root fields on same subgraph use independent cache entries", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + // Configure two Query root fields on accounts with the same cache and TTL. + // They share a subgraph but must not share cache keys or write entries. + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "me", CacheName: "default", TTL: 30 * time.Second}, + {TypeName: "Query", FieldName: "cat", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + gqlClient := NewGraphqlClient(http.DefaultClient) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // COLD path: cache is empty, so both root fields miss L2 and are written + // back under independent Query-field keys. + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, `{ me { id username } cat { name } }`, nil, t) + // Response proves both isolated fetches still merge into the original shape. + assert.Equal(t, `{"data":{"me":{"id":"1234","username":"Me"},"cat":{"name":"Pepper"}}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + // One bulk Get covers both root keys; one bulk Set writes both independent keys. + assert.Equal(t, 2, len(logAfterFirst), "Should have 2 cache operations (1 bulk get, 1 bulk set)") + + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Query","field":"cat"}`, Hit: false}, + {Key: `{"__typename":"Query","field":"me"}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"Query","field":"cat"}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Query","field":"me"}`, TTL: 30 * time.Second}, + }}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst)) + + // Both fields miss, so accounts is called once per isolated root fetch. + assert.Equal(t, 2, tracker.GetCount(accountsHost), "Should call accounts subgraph twice (once per root field)") + + // WARM path: both root field entries exist, so the same query should be + // served entirely from L2 with no accounts call. + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, `{ me { id username } cat { name } }`, nil, t) + // Same response proves cached values preserve the composed response shape. + assert.Equal(t, `{"data":{"me":{"id":"1234","username":"Me"},"cat":{"name":"Pepper"}}}`, string(resp)) + + logAfterSecond := defaultCache.GetLog() + // Both keys hit in one bulk Get; no Set is needed on a complete hit. + assert.Equal(t, 1, len(logAfterSecond), "Should have 1 bulk cache get operation (both hits)") + + wantLogSecond := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Query","field":"cat"}`, Hit: true}, + {Key: `{"__typename":"Query","field":"me"}`, Hit: true}, + }}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond)) + + // Complete L2 hit means both accounts root fetches are skipped. + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Should not call accounts subgraph (both cache hits)") + }) + + // Verifies isolated root fields keep their own TTL values when written to + // the same named cache. + t.Run("root fields with different TTLs write separate TTLs", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + // Same setup pattern as above, but me gets 10s and cat gets 60s to prove + // TTL is attached per root-field configuration, not per cache name. + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "me", CacheName: "default", TTL: 10 * time.Second}, + {TypeName: "Query", FieldName: "cat", CacheName: "default", TTL: 60 * time.Second}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + gqlClient := NewGraphqlClient(http.DefaultClient) + + // COLD path: both fields miss and write entries with their configured TTLs. + defaultCache.ClearLog() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, `{ me { id username } cat { name } }`, nil, t) + // Response is the control; the contract under test is the TTL in Set logs. + assert.Equal(t, `{"data":{"me":{"id":"1234","username":"Me"},"cat":{"name":"Pepper"}}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Query","field":"cat"}`, Hit: false}, + {Key: `{"__typename":"Query","field":"me"}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"Query","field":"cat"}`, TTL: 60 * time.Second}, + {Key: `{"__typename":"Query","field":"me"}`, TTL: 10 * time.Second}, + }}, + } + // Exact Set TTLs prove isolated fetches preserve per-field TTL config. + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst)) + }) + + // Verifies one cached root field does not accidentally cache its uncached + // sibling; only the cached field should hit on the warm request. + t.Run("cached root field hits while uncached sibling still fetches", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + // Only Query.me is cacheable. Query.cat remains uncached even though it + // shares the same accounts subgraph and query document. + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "me", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + gqlClient := NewGraphqlClient(http.DefaultClient) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // COLD path: me misses and writes; cat is fetched but never appears in + // the cache log because it has no root-field cache config. + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, `{ me { id username } cat { name } }`, nil, t) + // Both fields are fetched from accounts and merged despite only me caching. + assert.Equal(t, `{"data":{"me":{"id":"1234","username":"Me"},"cat":{"name":"Pepper"}}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + // Only me has get/set operations; cat is intentionally absent. + assert.Equal(t, 2, len(logAfterFirst), "Should have 2 cache operations (get+set for me only)") + + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"me"}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"me"}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst)) + + // Both root fields fetch on cold path: me to populate cache, cat because + // it is uncached. + assert.Equal(t, 2, tracker.GetCount(accountsHost), "Should call accounts subgraph twice (once per isolated root field)") + + // WARM path: me is served from L2, cat still calls accounts because it + // was never cached. + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, `{ me { id username } cat { name } }`, nil, t) + // Same response proves cached and live root-field results compose. + assert.Equal(t, `{"data":{"me":{"id":"1234","username":"Me"},"cat":{"name":"Pepper"}}}`, string(resp)) + + logAfterSecond := defaultCache.GetLog() + // Only me is looked up and hits; cat remains absent from cache operations. + assert.Equal(t, 1, len(logAfterSecond), "Should have 1 cache get (me hit)") + + wantLogSecond := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"me"}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond)) + + // The one remaining accounts call is cat only; me is served from cache. + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Should call accounts subgraph once (cat only, me from cache)") + }) + + // Verifies root-field cache isolation still composes correctly with entity + // caching across other subgraphs in the same operation. + t.Run("cached root split composes with entity caching", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + // Configure accounts root fields plus User entity caching, products root + // caching, and reviews Product entity caching to exercise mixed cache layers. + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "me", CacheName: "default", TTL: 30 * time.Second}, + {TypeName: "Query", FieldName: "cat", CacheName: "default", TTL: 30 * time.Second}, + }, + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + gqlClient := NewGraphqlClient(http.DefaultClient) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + // This query combines accounts root-field split (me/cat), products root + // caching (topProducts), and reviews/accounts entity resolution. + query := `{ + me { id username } + cat { name } + topProducts { + name + reviews { + body + authorWithoutProvides { username } + } + } + }` + + // COLD path: every configured root/entity cache is empty, so all involved + // subgraphs must be called and then populated. + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, query, nil, t) + // Response proves root-field split and entity resolution compose. + assert.Equal(t, `{"data":{"me":{"id":"1234","username":"Me"},"cat":{"name":"Pepper"},"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + wantLogFirst := []CacheLogEntry{ + { + Operation: "get", + Items: []CacheLogItem{ + {Key: `{"__typename":"Query","field":"cat"}`, Hit: false}, + {Key: `{"__typename":"Query","field":"me"}`, Hit: false}, + {Key: `{"__typename":"Query","field":"topProducts"}`, Hit: false}, + }, + }, + { + Operation: "set", + Items: []CacheLogItem{ + {Key: `{"__typename":"Query","field":"cat"}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Query","field":"me"}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}, + }, + }, + { + Operation: "get", + Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }, + }, + { + Operation: "set", + Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }, + }, + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + // Cold path misses and writes all configured root/entity cache entries. + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst)) + + // accounts: me root, cat root, and User entity resolution all miss cold. + assert.Equal(t, 3, tracker.GetCount(accountsHost), "accounts: once for me, once for cat, once for User entity") + // products and reviews each miss once for their configured cache layer. + assert.Equal(t, 1, tracker.GetCount(productsHost), "products: once for topProducts") + assert.Equal(t, 1, tracker.GetCount(reviewsHost), "reviews: once for Product entity") + + // WARM path: all root/entity entries exist, so no subgraph should be called. + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, query, nil, t) + // Same response proves all pieces can be served from their cache entries. + assert.Equal(t, `{"data":{"me":{"id":"1234","username":"Me"},"cat":{"name":"Pepper"},"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterSecond := defaultCache.GetLog() + wantLogSecond := []CacheLogEntry{ + { + Operation: "get", + Items: []CacheLogItem{ + {Key: `{"__typename":"Query","field":"cat"}`, Hit: true}, + {Key: `{"__typename":"Query","field":"me"}`, Hit: true}, + {Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}, + }, + }, + { + Operation: "get", + Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }, + }, + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + // Warm path hits every configured root/entity cache entry and writes nothing. + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond)) + + // Zero calls on every subgraph proves root-field and entity caches all hit. + assert.Equal(t, 0, tracker.GetCount(accountsHost), "accounts: all from cache") + assert.Equal(t, 0, tracker.GetCount(productsHost), "products: root field from cache") + assert.Equal(t, 0, tracker.GetCount(reviewsHost), "reviews: entity from cache") + }) + + // Verifies deleting one isolated root-field key does not evict or poison the + // sibling root-field entry stored in the same named cache. + t.Run("deleting one root field key leaves sibling cache entry intact", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + // Same two-root-field cache setup as the first subtest; this one manually + // deletes only Query.me after both entries have been populated. + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "me", CacheName: "default", TTL: 30 * time.Second}, + {TypeName: "Query", FieldName: "cat", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + gqlClient := NewGraphqlClient(http.DefaultClient) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // COLD path: populate both me and cat root-field entries. + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, `{ me { id username } cat { name } }`, nil, t) + // Control response before manual invalidation. + assert.Equal(t, `{"data":{"me":{"id":"1234","username":"Me"},"cat":{"name":"Pepper"}}}`, string(resp)) + + // Invalidate only Query.me; Query.cat should remain present and hit. + err := defaultCache.Delete(ctx, []string{`{"__typename":"Query","field":"me"}`}) + require.NoError(t, err) + + // MIXED path: cat should hit from L2, me should miss and be re-written. + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, `{ me { id username } cat { name } }`, nil, t) + // Response stays identical even though one field is refetched and one is cached. + assert.Equal(t, `{"data":{"me":{"id":"1234","username":"Me"},"cat":{"name":"Pepper"}}}`, string(resp)) + + logAfterSecond := defaultCache.GetLog() + wantLog := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Query","field":"cat"}`, Hit: true}, + {Key: `{"__typename":"Query","field":"me"}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"me"}`, TTL: 30 * time.Second}}}, + } + // Bulk Get proves cat survived deletion while me missed; Set proves me + // is re-cached after the refetch. + assert.Equal(t, sortCacheLogEntries(wantLog), sortCacheLogEntries(logAfterSecond)) + + // Only the invalidated me root field needs a new accounts call. + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Should call accounts once (me re-fetch only)") + }) +} diff --git a/execution/engine/federation_caching_source_test.go b/execution/engine/federation_caching_source_test.go new file mode 100644 index 0000000000..6b75ed48f1 --- /dev/null +++ b/execution/engine/federation_caching_source_test.go @@ -0,0 +1,296 @@ +package engine_test + +import ( + "context" + "net/http" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// TestCacheWriteEventSource_MutationL2Write verifies that L2 writes triggered by mutations +// have Source=CacheSourceMutation in analytics, distinguishing them from query-driven writes. +func TestCacheWriteEventSource_MutationL2Write(t *testing.T) { + t.Parallel() + // Verify that L2 writes triggered by a mutation have Source=CacheSourceMutation in the analytics snapshot. + defaultCache := NewFakeLoaderCache() + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + MutationFieldCaching: plan.MutationFieldCacheConfigurations{ + {FieldName: "addReview", EnableEntityL2CachePopulation: true}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Execute mutation that triggers User entity resolution → L2 write + resp, headers := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `mutation AddReview($authorID: String!, $upc: String!, $review: String!) { + addReview(authorID: $authorID, upc: $upc, review: $review) { + body + authorWithoutProvides { + username + } + } + }`, + queryVariables{"authorID": "1234", "upc": "top-1", "review": "Great!"}, t) + assert.Equal(t, `{"data":{"addReview":{"body":"Great!","authorWithoutProvides":{"username":"Me"}}}}`, string(resp)) + + // Assert entire snapshot — L2 write must have Source=CacheSourceMutation + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Writes: []resolve.CacheWriteEvent{ + { + CacheKey: `{"__typename":"User","key":{"id":"1234"}}`, + EntityType: "User", + ByteSize: 49, + DataSource: "accounts", + CacheLevel: resolve.CacheLevelL2, + TTL: 30 * time.Second, + Source: resolve.CacheSourceMutation, // Mutation-triggered L2 write carries Source=mutation + }, + }, + FieldHashes: []resolve.EntityFieldHash{ + {EntityType: "User", FieldName: "username", FieldHash: 4957449860898447395, KeyRaw: `{"id":"1234"}`}, // xxhash("Me") + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 1, UniqueKeys: 1}, // Mutation triggered resolution of 1 User entity + }, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) +} + +// TestMutationCacheTTLOverride_E2E verifies end-to-end that MutationFieldCacheConfiguration.TTL +// overrides the entity's default TTL for mutation-driven L2 writes. +func TestMutationCacheTTLOverride_E2E(t *testing.T) { + t.Parallel() + // Verify that MutationFieldCacheConfiguration.TTL overrides the entity's default TTL. + defaultCache := NewFakeLoaderCache() + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 300 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + MutationFieldCaching: plan.MutationFieldCacheConfigurations{ + {FieldName: "addReview", EnableEntityL2CachePopulation: true, TTL: 60 * time.Second}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + defaultCache.ClearLog() + + // Execute mutation — TTL should be 60s (mutation override), not 300s (entity default) + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, + `mutation AddReview($authorID: String!, $upc: String!, $review: String!) { + addReview(authorID: $authorID, upc: $upc, review: $review) { + body + authorWithoutProvides { + username + } + } + }`, + queryVariables{"authorID": "1234", "upc": "top-1", "review": "Great!"}, t) + assert.Equal(t, `{"data":{"addReview":{"body":"Great!","authorWithoutProvides":{"username":"Me"}}}}`, string(resp)) + + // Assert entire cache log — single Set with mutation TTL override (60s), no Get (mutations skip L2 reads) + assert.Equal(t, []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 60 * time.Second}}}, // L2 write uses mutation TTL override (60s), not entity default (300s) + }, defaultCache.GetLog()) +} + +// TestOnSubscriptionCacheCallbacks verifies that subscription cache lifecycle callbacks +// (OnSubscriptionCacheHit, OnSubscriptionCacheSet) are invoked at the correct times. +func TestOnSubscriptionCacheCallbacks(t *testing.T) { + t.Parallel() + t.Run("OnSubscriptionCacheWrite fires on subscription entity population", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + + var mu sync.Mutex + var writeEvents []resolve.CacheWriteEvent + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + SubscriptionEntityPopulation: plan.SubscriptionEntityPopulationConfigurations{ + {TypeName: "Product", FieldName: "updateProductPrice", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + }), + withResolverOptions(func(opts *resolve.ResolverOptions) { + opts.OnSubscriptionCacheWrite = func(event resolve.CacheWriteEvent) { + mu.Lock() + writeEvents = append(writeEvents, event) + mu.Unlock() + } + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + wsAddr := strings.ReplaceAll(setup.GatewayServer.URL, "http://", "ws://") + + // Subscribe to product updates — subscription entity population writes Product to L2 + messages := collectSubscriptionMessages(ctx, gqlClient, setup, wsAddr, + `subscription UpdatePrice($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + } + }`, + queryVariables{"upc": "top-4"}, 1, t) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","name":"Bowler","price":1}}}}`, messages[0]) + + // Assert entire callback events slice — exactly 1 event with all fields matching + mu.Lock() + defer mu.Unlock() + assert.Equal(t, []resolve.CacheWriteEvent{ + { + CacheKey: `{"__typename":"Product","key":{"upc":"top-4"}}`, + EntityType: "Product", + ByteSize: 64, // Serialized Product entity size for upc=top-4 Bowler/price=1 + DataSource: "products", + CacheLevel: resolve.CacheLevelL2, + TTL: 30 * time.Second, + Source: resolve.CacheSourceSubscription, // Subscription cache write carries Source=subscription + }, + }, writeEvents) + }) + + t.Run("OnSubscriptionCacheInvalidate fires on invalidation-only subscription", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + + var mu sync.Mutex + var invalidateCalls []struct { + entityType string + keys []string + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + SubscriptionEntityPopulation: plan.SubscriptionEntityPopulationConfigurations{ + {TypeName: "Product", FieldName: "updateProductPrice", CacheName: "default", TTL: 30 * time.Second, EnableInvalidationOnKeyOnly: true}, + }, + }, + }), + withResolverOptions(func(opts *resolve.ResolverOptions) { + opts.OnSubscriptionCacheInvalidate = func(entityType string, keys []string) { + mu.Lock() + invalidateCalls = append(invalidateCalls, struct { + entityType string + keys []string + }{entityType, keys}) + mu.Unlock() + } + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Pre-populate L2 so there's something to invalidate + err := defaultCache.Set(ctx, []*resolve.CacheEntry{ + {Key: `{"__typename":"Product","key":{"upc":"top-4"}}`, Value: []byte(`{"upc":"top-4","name":"Bowler","price":100,"__typename":"Product"}`), TTL: 30 * time.Second}, + }) + require.NoError(t, err) + seedLog := defaultCache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-4"}}`, TTL: 30 * time.Second}}}, + }, seedLog) + + wsAddr := strings.ReplaceAll(setup.GatewayServer.URL, "http://", "ws://") + + // Subscribe using key-only query — selects only @key field (upc), so invalidation mode triggers + defaultCache.ClearLog() + messages := collectSubscriptionMessages(ctx, gqlClient, setup, wsAddr, + `subscription UpdatePriceKeyOnly($upc: String!) { + updateProductPrice(upc: $upc) { + upc + reviews { + body + authorWithoutProvides { + username + } + } + } + }`, + queryVariables{"upc": "top-4"}, 1, t) + require.Equal(t, 1, len(messages)) + + // Assert entire cache log — should contain a delete for the Product entity key + cacheLog := defaultCache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "delete", Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-4"}}`}}}, // Subscription key-only event triggers L2 delete + }, cacheLog) + + // Assert entire callback data — exactly 1 invalidation call + mu.Lock() + defer mu.Unlock() + assert.Equal(t, []struct { + entityType string + keys []string + }{ + { + entityType: "Product", + keys: []string{ + `{"__typename":"Product","key":{"upc":"top-4"}}`, + }, + }, + }, invalidateCalls) + }) +} diff --git a/execution/engine/federation_caching_test.go b/execution/engine/federation_caching_test.go new file mode 100644 index 0000000000..2928349d5a --- /dev/null +++ b/execution/engine/federation_caching_test.go @@ -0,0 +1,1107 @@ +package engine_test + +import ( + "context" + "net/http" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// TestFederationCaching_BasicMissThenHit verifies the fundamental L2 cache flow: +// first request misses cache and populates it, second request hits cache and skips subgraph calls. +func TestFederationCaching_BasicMissThenHit(t *testing.T) { + t.Parallel() + t.Run("two subgraphs - miss then hit", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Enable caching for L2 tests (opt-in per-subgraph) + // Explicitly configure which subgraphs cache which root fields and entity types + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway(withCachingEnableART(false), withCachingLoaderCache(caches), withHTTPClient(trackingClient), withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), withSubgraphEntityCachingConfigs(subgraphCachingConfigs))) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames for tracking (URL.Host includes host:port) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + // First query - should miss cache and then set + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query MultipleServersWithoutProvides { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + // Cache operations: Query.topProducts (get/set), Product entities (get/set), User entities (get/set) + // With root field caching enabled, Query.topProducts is now cached too. + // Cache operations: get+set for root field, Products, Users = 6 total + assert.Equal(t, 6, len(logAfterFirst)) + + // Verify the exact cache access log (order may vary for keys within each operation) + wantLogFirst := []CacheLogEntry{ + // Root field Query.topProducts + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}}}, + // Product entity fetches (reviews data for each product) + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }}, + // User entity fetches (author data) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst)) + + // Subgraph calls: each called once (cold cache) + productsCallsFirst := tracker.GetCount(productsHost) + reviewsCallsFirst := tracker.GetCount(reviewsHost) + accountsCallsFirst := tracker.GetCount(accountsHost) + assert.Equal(t, 1, productsCallsFirst) + assert.Equal(t, 1, reviewsCallsFirst) + assert.Equal(t, 1, accountsCallsFirst) + + // Second query - should hit cache and then set + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query MultipleServersWithoutProvides { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterSecond := defaultCache.GetLog() + // All cache operations should be gets with hits: Query.topProducts, Product entities, User entities + // With root field caching enabled, all 3 types should hit cache + // All cache operations should be gets with hits + assert.Equal(t, 3, len(logAfterSecond)) + + wantLogSecond := []CacheLogEntry{ + // Root field Query.topProducts - HIT + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + // Product entity fetches - HITS + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }}, + // User entity fetches - HITS + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond)) + + // Subgraph calls: all skipped (warm cache) + productsCallsSecond := tracker.GetCount(productsHost) + reviewsCallsSecond := tracker.GetCount(reviewsHost) + accountsCallsSecond := tracker.GetCount(accountsHost) + assert.Equal(t, 0, productsCallsSecond) + assert.Equal(t, 0, reviewsCallsSecond) + assert.Equal(t, 0, accountsCallsSecond) + }) + + t.Run("two subgraphs - partial fields then full fields", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Enable caching for L2 tests (opt-in per-subgraph) + // Configure root field caching for products and entity caching for reviews/accounts + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway(withCachingEnableART(false), withCachingLoaderCache(caches), withHTTPClient(trackingClient), withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), withSubgraphEntityCachingConfigs(subgraphCachingConfigs))) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames for tracking (URL.Host includes host:port) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + // First query - only ask for name field (products subgraph only) + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query { + topProducts { + name + } + }`, nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby"},{"name":"Fedora"}]}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + // With root field caching enabled: get miss + set for Query.topProducts + // Root field caching: get miss + set = 2 operations + assert.Equal(t, 2, len(logAfterFirst)) + + // Verify the exact cache access log for first query + wantLogFirst := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogFirst), sortCacheLogEntries(logAfterFirst)) + + // Subgraph calls: only products called (name-only query) + productsCallsFirst := tracker.GetCount(productsHost) + reviewsCallsFirst := tracker.GetCount(reviewsHost) + accountsCallsFirst := tracker.GetCount(accountsHost) + assert.Equal(t, 1, productsCallsFirst) + assert.Equal(t, 0, reviewsCallsFirst) + assert.Equal(t, 0, accountsCallsFirst) + + // Second query - ask for full fields including reviews (products + reviews + accounts) + defaultCache.ClearLog() + tracker.Reset() + secondQuery := `query { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + } + } + } + }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, secondQuery, nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterSecond := defaultCache.GetLog() + // Cache operations with root field caching: + // - Root field Query.topProducts: get (miss - different query shape) + set + // - Product entities: get miss + set + // - User entities: get miss + set + // Note: The first query only requested 'name', second query requests 'name' and 'reviews'. + // These are different query operations, so different cache keys. + // Root field hit + re-set, Products miss + set, Users miss + set = 6 operations + assert.Equal(t, 6, len(logAfterSecond)) + + // Verify the exact cache access log for second query + // Note: Root field Query.topProducts is a HIT because cache key doesn't include selected fields + // The first query already cached this root field, so the second query reuses it + wantLogSecond := []CacheLogEntry{ + // Root field Query.topProducts - HIT (same cache key, different selection doesn't matter) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + // Still need to set because cache returns partial data that needs merging + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}}}, + // Product entity fetches - MISS (first time fetching these) + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }}, + // User entity fetches - MISS (first time fetching these) + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond)) + + // Subgraph calls: all called (new entity types needed) + productsCallsSecond := tracker.GetCount(productsHost) + reviewsCallsSecond := tracker.GetCount(reviewsHost) + accountsCallsSecond := tracker.GetCount(accountsHost) + assert.Equal(t, 1, productsCallsSecond) + assert.Equal(t, 1, reviewsCallsSecond) + assert.Equal(t, 1, accountsCallsSecond) + + // Third query - repeat the second query (full fields) + defaultCache.ClearLog() + tracker.Reset() + thirdQuery := `query { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + } + } + } + }` + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, thirdQuery, nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterThird := defaultCache.GetLog() + // All cache operations should be gets with hits: root field, Product entities, User entities + // Third query is same as second query, so all should hit cache + // All hits: 3 get operations + assert.Equal(t, 3, len(logAfterThird)) + + // Verify the exact cache access log for third query (all hits) + wantLogThird := []CacheLogEntry{ + // Root field Query.topProducts - HIT + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + // Product entity fetches - HITS + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }}, + // User entity fetches - HITS + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogThird), sortCacheLogEntries(logAfterThird)) + + // Subgraph calls: all skipped (warm cache) + productsCallsThird := tracker.GetCount(productsHost) + reviewsCallsThird := tracker.GetCount(reviewsHost) + accountsCallsThird := tracker.GetCount(accountsHost) + assert.Equal(t, 0, productsCallsThird) + assert.Equal(t, 0, reviewsCallsThird) + assert.Equal(t, 0, accountsCallsThird) + }) + + t.Run("two subgraphs - with subgraph header prefix", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with tracking + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Create mock SubgraphHeadersBuilder that returns a fixed hash for each subgraph + // Subgraph names are used as keys for the header hash lookup: + // - "accounts" -> prefix 33333 for User entity cache keys + // - "products" -> prefix 11111 for Query cache keys + // - "reviews" -> prefix 22222 for Product entity cache keys + mockHeadersBuilder := &mockSubgraphHeadersBuilder{ + hashes: map[string]uint64{ + "accounts": 33333, + "products": 11111, + "reviews": 22222, + }, + } + + // Enable root field and entity caching with subgraph header prefix for L2 tests (opt-in per-subgraph caching) + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: true}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: true}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: true}, + }, + }, + } + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withSubgraphHeadersBuilder(mockHeadersBuilder), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames for tracking (URL.Host includes host:port) + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + productsHost := productsURLParsed.Host + reviewsHost := reviewsURLParsed.Host + + // First query - should miss cache and then set with prefixed keys + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query MultipleServersWithoutProvides { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterFirst := defaultCache.GetLog() + // Cache operations: products (get/set), reviews (get/set), accounts User entity (get/set) + assert.Equal(t, 6, len(logAfterFirst)) + + wantLog := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `11111:{"__typename":"Query","field":"topProducts"}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `11111:{"__typename":"Query","field":"topProducts"}`, TTL: 30 * time.Second}}}, + {Operation: "get", Items: []CacheLogItem{ + {Key: `22222:{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `22222:{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }}, + {Operation: "set", Items: []CacheLogItem{ + {Key: `22222:{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `22222:{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + }}, + // User entity resolution from accounts (author.username requires entity fetch) + {Operation: "get", Items: []CacheLogItem{{Key: `33333:{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `33333:{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLog), sortCacheLogEntries(logAfterFirst)) + + // Verify subgraph calls for first query + productsCallsFirst := tracker.GetCount(productsHost) + reviewsCallsFirst := tracker.GetCount(reviewsHost) + accountsCallsFirst := tracker.GetCount(accountsHost) + + // Subgraph calls: each called once (cold cache) + assert.Equal(t, 1, productsCallsFirst) + assert.Equal(t, 1, reviewsCallsFirst) + assert.Equal(t, 1, accountsCallsFirst) + + // Second query - should hit cache with prefixed keys + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query MultipleServersWithoutProvides { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + logAfterSecond := defaultCache.GetLog() + // All hits: 3 get operations with prefixed keys + assert.Equal(t, 3, len(logAfterSecond)) + + wantLogSecond := []CacheLogEntry{ + // Root field Query.topProducts - HIT with prefix + {Operation: "get", Items: []CacheLogItem{{Key: `11111:{"__typename":"Query","field":"topProducts"}`, Hit: true}}}, + // Product entities - HIT with prefix + {Operation: "get", Items: []CacheLogItem{ + {Key: `22222:{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `22222:{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + }}, + // User entities - HIT with prefix + {Operation: "get", Items: []CacheLogItem{{Key: `33333:{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogSecond), sortCacheLogEntries(logAfterSecond)) + + // Verify subgraph calls for second query - all should be skipped due to cache hits + productsCallsSecond := tracker.GetCount(productsHost) + reviewsCallsSecond := tracker.GetCount(reviewsHost) + accountsCallsSecond := tracker.GetCount(accountsHost) + + // Subgraph calls: all skipped (warm cache) + assert.Equal(t, 0, productsCallsSecond) + assert.Equal(t, 0, reviewsCallsSecond) + assert.Equal(t, 0, accountsCallsSecond) + }) +} + +// TestFederationCaching_MutationSkipsL2Read verifies that mutations never read from L2 cache +// (always fetch fresh data) and optionally populate L2 when EnableEntityL2CachePopulation is set. +func TestFederationCaching_MutationSkipsL2Read(t *testing.T) { + t.Parallel() + t.Run("mutation skips L2 cache read and writes updated entity", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + MutationFieldCaching: plan.MutationFieldCacheConfigurations{ + {FieldName: "addReview", EnableEntityL2CachePopulation: true}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // Step 1: Query populates L2 cache. + // The query fetches me.reviews.authorWithoutProvides.username, which triggers + // User entity resolution from accounts. L2 cache is empty → miss → fetch → set. + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query MeReviewsWithoutProvides { + me { + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, nil, t) + assert.Equal(t, `{"data":{"me":{"reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}}}`, string(resp)) + + logAfterQuery1 := defaultCache.GetLog() + assert.Equal(t, 2, len(logAfterQuery1), "Step 1: should have exactly 2 cache operations (get miss + set for User)") + wantLogQuery1 := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogQuery1), sortCacheLogEntries(logAfterQuery1), "Step 1: cache log should show get miss then set for User") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Step 1: should call accounts subgraph exactly once for User entity resolution") + + // Step 2: Mutation skips L2 read, still writes to L2. + // The mutation guard in tryL2CacheLoad checks l.info.OperationType != Query, + // so L2 read is bypassed. After the entity fetch completes, updateL2Cache + // writes fresh data (cacheMustBeUpdated=true). + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, `mutation AddReviewWithoutProvides($authorID: String!, $upc: String!, $review: String!) { + addReview(authorID: $authorID, upc: $upc, review: $review) { + body + authorWithoutProvides { + username + } + } +}`, queryVariables{ + "authorID": "1234", + "upc": "top-1", + "review": "Great!", + }, t) + assert.Equal(t, `{"data":{"addReview":{"body":"Great!","authorWithoutProvides":{"username":"Me"}}}}`, string(resp)) + + logAfterMutation := defaultCache.GetLog() + assert.Equal(t, 1, len(logAfterMutation), "Step 2: should have exactly 1 cache operation (set only, NO get)") + wantLogMutation := []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogMutation), sortCacheLogEntries(logAfterMutation), "Step 2: mutation should only set to L2, never get") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Step 2: mutation should call accounts subgraph (not served from cache)") + + // Step 3: Query reads from L2 (hit). + // Same query as step 1. User entity is in L2 from the mutation's write → HIT. + // No accounts call needed (entity resolution fully served from L2). + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query MeReviewsWithoutProvides { + me { + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, nil, t) + assert.Equal(t, `{"data":{"me":{"reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}},{"body":"Great!","authorWithoutProvides":{"username":"Me"}}]}}}`, string(resp)) + + logAfterQuery2 := defaultCache.GetLog() + assert.Equal(t, 1, len(logAfterQuery2), "Step 3: should have exactly 1 cache operation (get hit)") + wantLogQuery2 := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogQuery2), sortCacheLogEntries(logAfterQuery2), "Step 3: query should hit L2 cache for User") + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Step 3: query should NOT call accounts subgraph (L2 cache hit)") + }) + + t.Run("mutation with no prior cache writes to L2 for subsequent query", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + MutationFieldCaching: plan.MutationFieldCacheConfigurations{ + {FieldName: "addReview", EnableEntityL2CachePopulation: true}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // Step 1: Mutation first (no prior cache) + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, `mutation AddReviewWithoutProvides($authorID: String!, $upc: String!, $review: String!) { + addReview(authorID: $authorID, upc: $upc, review: $review) { + body + authorWithoutProvides { + username + } + } +}`, queryVariables{ + "authorID": "1234", + "upc": "top-1", + "review": "Great!", + }, t) + assert.Equal(t, `{"data":{"addReview":{"body":"Great!","authorWithoutProvides":{"username":"Me"}}}}`, string(resp)) + + logAfterMutation := defaultCache.GetLog() + assert.Equal(t, 1, len(logAfterMutation), "Step 1: should have exactly 1 cache operation (set only)") + wantLogMutation := []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogMutation), sortCacheLogEntries(logAfterMutation), "Step 1: mutation should only set to L2") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Step 1: should call accounts subgraph exactly once") + + // Step 2: Query reads from L2 (hit from mutation's write) + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query MeReviewsWithoutProvides { + me { + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, nil, t) + assert.Equal(t, `{"data":{"me":{"reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}},{"body":"Great!","authorWithoutProvides":{"username":"Me"}}]}}}`, string(resp)) + + logAfterQuery := defaultCache.GetLog() + assert.Equal(t, 1, len(logAfterQuery), "Step 2: should have exactly 1 cache operation (get hit)") + wantLogQuery := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogQuery), sortCacheLogEntries(logAfterQuery), "Step 2: query should hit L2 cache for User") + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Step 2: query should NOT call accounts subgraph (L2 cache hit)") + }) + + t.Run("consecutive mutations never read from L2 cache", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + MutationFieldCaching: plan.MutationFieldCacheConfigurations{ + {FieldName: "addReview", EnableEntityL2CachePopulation: true}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // Step 1: First mutation + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, `mutation AddReviewWithoutProvides($authorID: String!, $upc: String!, $review: String!) { + addReview(authorID: $authorID, upc: $upc, review: $review) { + body + authorWithoutProvides { + username + } + } +}`, queryVariables{ + "authorID": "1234", + "upc": "top-1", + "review": "Great!", + }, t) + assert.Equal(t, `{"data":{"addReview":{"body":"Great!","authorWithoutProvides":{"username":"Me"}}}}`, string(resp)) + + logAfterMutation1 := defaultCache.GetLog() + assert.Equal(t, 1, len(logAfterMutation1), "Step 1: should have exactly 1 cache operation (set only)") + wantLogMutation1 := []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogMutation1), sortCacheLogEntries(logAfterMutation1), "Step 1: first mutation should only set to L2") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Step 1: should call accounts subgraph exactly once") + + // Step 2: Second mutation (same author, different review) + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, `mutation AddReviewWithoutProvides($authorID: String!, $upc: String!, $review: String!) { + addReview(authorID: $authorID, upc: $upc, review: $review) { + body + authorWithoutProvides { + username + } + } +}`, queryVariables{ + "authorID": "1234", + "upc": "top-2", + "review": "Also great!", + }, t) + assert.Equal(t, `{"data":{"addReview":{"body":"Also great!","authorWithoutProvides":{"username":"Me"}}}}`, string(resp)) + + logAfterMutation2 := defaultCache.GetLog() + assert.Equal(t, 1, len(logAfterMutation2), "Step 2: should have exactly 1 cache operation (set only, NO get even though L2 has data)") + wantLogMutation2 := []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogMutation2), sortCacheLogEntries(logAfterMutation2), "Step 2: second mutation should only set to L2, never get") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Step 2: should call accounts subgraph exactly once (not from cache)") + }) + + t.Run("query with different fields after mutation hits L2 cache", func(t *testing.T) { + t.Parallel() + // A mutation that triggers entity resolution for User populates L2 with the fields + // the mutation selected. A subsequent query selecting a superset of fields gets a + // PARTIAL hit on L2 (the cached key is present but missing some requested fields), + // and the loader still fetches from accounts to fill the missing fields. + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true, EnableCacheAnalytics: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "reviews", + MutationFieldCaching: plan.MutationFieldCacheConfigurations{ + {FieldName: "addReview", EnableEntityL2CachePopulation: true}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // Step 1: Mutation writes User entity data to L2 (skips L2 read). + // The mutation guard in tryL2CacheLoad bypasses L2 reads for non-query operations. + // After entity resolution, updateL2Cache writes fresh User data to L2. + defaultCache.ClearLog() + tracker.Reset() + resp, headers := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, `mutation AddReviewWithoutProvides($authorID: String!, $upc: String!, $review: String!) { + addReview(authorID: $authorID, upc: $upc, review: $review) { + body + authorWithoutProvides { + username + } + } +}`, queryVariables{ + "authorID": "1234", + "upc": "top-1", + "review": "Great!", + }, t) + assert.Equal(t, `{"data":{"addReview":{"body":"Great!","authorWithoutProvides":{"username":"Me"}}}}`, string(resp)) + + logAfterMutation := defaultCache.GetLog() + assert.Equal(t, 1, len(logAfterMutation), "Step 1: should have exactly 1 cache operation (set only)") + wantLogMutation := []CacheLogEntry{ + // updateL2Cache writes fresh User data after entity resolution (mutation skipped L2 read). + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogMutation), sortCacheLogEntries(logAfterMutation), "Step 1: mutation should only set to L2") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Step 1: should call accounts subgraph exactly once") + + // Analytics snapshot attributes the L2 write to the accounts subgraph / User entity + // (this is the documented attribution channel; the old Caller field has been removed). + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Writes: []resolve.CacheWriteEvent{ + { + CacheKey: `{"__typename":"User","key":{"id":"1234"}}`, + EntityType: "User", + ByteSize: 49, + DataSource: "accounts", + CacheLevel: resolve.CacheLevelL2, + TTL: 30 * time.Second, + Source: resolve.CacheSourceMutation, // Mutation-triggered L2 write after User entity resolution + }, + }, + FieldHashes: []resolve.EntityFieldHash{ + {EntityType: "User", FieldName: "username", FieldHash: 4957449860898447395, KeyRaw: `{"id":"1234"}`}, // addReview.authorWithoutProvides.username = "Me" + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 1, UniqueKeys: 1}, // Mutation resolved 1 User entity + }, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + + // Step 2: Query requests different fields (username + nickname). + // The query plan has two fetch nodes for the User cache key: one entity resolution for + // `authorWithoutProvides` and one root fetch for `me`. The entity L2 read is a PARTIAL + // hit (cached key present but missing `nickname`), and the `me` fetch to accounts + // (called once) provides the full User data which `updateL2Cache` writes back. + defaultCache.ClearLog() + tracker.Reset() + resp, headers = gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, `query MeReviewsWithoutProvidesWithNickname { + me { + reviews { + body + authorWithoutProvides { + username + nickname + } + } + } +}`, nil, t) + assert.Equal(t, `{"data":{"me":{"reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me","nickname":"nick-Me"}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me","nickname":"nick-Me"}},{"body":"Great!","authorWithoutProvides":{"username":"Me","nickname":"nick-Me"}}]}}}`, string(resp)) + + logAfterQuery := defaultCache.GetLog() + assert.Equal(t, 2, len(logAfterQuery), "Step 2: should have exactly 2 cache operations (get hit + set)") + wantLogQuery := []CacheLogEntry{ + // Entity resolution for authorWithoutProvides checks L2 → cache key present (FakeLoaderCache + // only tracks key presence; the analytics layer classifies this as a PartialHit because the + // cached entry is missing the `nickname` field). + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + // A separate fetch to accounts (me root query) fetches User data and writes it to L2. + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogQuery), sortCacheLogEntries(logAfterQuery), "Step 2: cache key is present (partial hit) plus writeback") + // Accounts is called once for the me root query (not cached), but NOT for entity resolution (L2 hit) + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Step 2: accounts called once for me root query, entity resolution served from L2 cache") + + // Analytics snapshot attributes both the L2 read (partial hit) and the L2 writeback to + // accounts / User — this is the documented attribution channel replacing the old Caller field. + // The L2 hit is a PARTIAL hit: the mutation's cache entry only contains `username`, but this + // query also selects `nickname`, so the fetch still needs to go to accounts for the missing field. + assert.Equal(t, normalizeSnapshot(resolve.CacheAnalyticsSnapshot{ + L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: `{"__typename":"User","key":{"id":"1234"}}`, EntityType: "User", Kind: resolve.CacheKeyPartialHit, DataSource: "accounts"}, // Cached entity has username but not nickname + }, + L2Writes: []resolve.CacheWriteEvent{ + {CacheKey: `{"__typename":"User","key":{"id":"1234"}}`, EntityType: "User", ByteSize: 70, DataSource: "accounts", CacheLevel: resolve.CacheLevelL2, TTL: 30 * time.Second, Source: resolve.CacheSourceQuery}, // Writeback includes both username and nickname after the accounts fetch + }, + FieldHashes: []resolve.EntityFieldHash{ + // Three nickname values (one per review's author) and three username values. + {EntityType: "User", FieldName: "nickname", FieldHash: 10005559372589796850, KeyRaw: `{"id":"1234"}`}, + {EntityType: "User", FieldName: "nickname", FieldHash: 10005559372589796850, KeyRaw: `{"id":"1234"}`}, + {EntityType: "User", FieldName: "nickname", FieldHash: 10005559372589796850, KeyRaw: `{"id":"1234"}`}, + {EntityType: "User", FieldName: "username", FieldHash: 4957449860898447395, KeyRaw: `{"id":"1234"}`}, + {EntityType: "User", FieldName: "username", FieldHash: 4957449860898447395, KeyRaw: `{"id":"1234"}`}, + {EntityType: "User", FieldName: "username", FieldHash: 4957449860898447395, KeyRaw: `{"id":"1234"}`}, + }, + EntityTypes: []resolve.EntityTypeInfo{ + {TypeName: "User", Count: 4, UniqueKeys: 2}, // me User + 3 authors + }, + }), normalizeSnapshot(parseCacheAnalytics(t, headers))) + }) + + t.Run("mutation skips L2 write by default without EnableEntityL2CachePopulation", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{"default": defaultCache} + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{Transport: tracker} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // Step 1: Query populates L2 cache (flag does not affect queries). + defaultCache.ClearLog() + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query MeReviewsWithoutProvides { + me { + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, nil, t) + assert.Equal(t, `{"data":{"me":{"reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}}}`, string(resp)) + + logAfterQuery1 := defaultCache.GetLog() + assert.Equal(t, 2, len(logAfterQuery1), "Step 1: should have exactly 2 cache operations (get miss + set)") + wantLogQuery1 := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: false}}}, + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogQuery1), sortCacheLogEntries(logAfterQuery1), "Step 1: query should miss then set") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Step 1: should call accounts subgraph exactly once") + + // Step 2: Mutation produces zero cache operations (read skipped because mutation, write skipped because flag). + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, `mutation AddReviewWithoutProvides($authorID: String!, $upc: String!, $review: String!) { + addReview(authorID: $authorID, upc: $upc, review: $review) { + body + authorWithoutProvides { + username + } + } +}`, queryVariables{ + "authorID": "1234", + "upc": "top-1", + "review": "Great!", + }, t) + assert.Equal(t, `{"data":{"addReview":{"body":"Great!","authorWithoutProvides":{"username":"Me"}}}}`, string(resp)) + + logAfterMutation := defaultCache.GetLog() + assert.Equal(t, 0, len(logAfterMutation), "Step 2: should have zero cache operations (no read AND no write)") + assert.Equal(t, 1, tracker.GetCount(accountsHost), "Step 2: should call accounts subgraph (not cached)") + + // Step 3: Query still hits L2 from step 1's write (mutation didn't overwrite it). + defaultCache.ClearLog() + tracker.Reset() + resp = gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query MeReviewsWithoutProvides { + me { + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, nil, t) + assert.Equal(t, `{"data":{"me":{"reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}},{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}},{"body":"Great!","authorWithoutProvides":{"username":"Me"}}]}}}`, string(resp)) + + logAfterQuery2 := defaultCache.GetLog() + assert.Equal(t, 1, len(logAfterQuery2), "Step 3: should have exactly 1 cache operation (get hit)") + wantLogQuery2 := []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, Hit: true}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLogQuery2), sortCacheLogEntries(logAfterQuery2), "Step 3: query should hit L2 from step 1's write") + assert.Equal(t, 0, tracker.GetCount(accountsHost), "Step 3: should NOT call accounts subgraph (L2 cache hit)") + }) +} + +// TestFederationCaching_PlanTimeTypeName verifies that entity cache keys use the type name +// from the query plan when __typename is missing from the subgraph response data. +// This tests the fallback path: a non-compliant subgraph omits __typename from its response, +// but the cache key should still use the correct entity type name (e.g. "Product") +// rather than a generic fallback. +func TestFederationCaching_PlanTimeTypeName(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + + // Transport that strips __typename from the products subgraph response. + // This simulates a non-compliant subgraph that omits __typename from entity data. + // The resolver should fall back to the plan-time entity type name for cache keys. + strippingTransport := &typenameStrippingTransport{ + inner: http.DefaultTransport, + } + trackingClient := &http.Client{Transport: strippingTransport} + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + // Record the products URL so the transport knows which responses to strip + productsURL, _ := url.Parse(setup.ProductsUpstreamServer.URL) + strippingTransport.targetHost = productsURL.Host + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + defaultCache.ClearLog() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, + `query { topProducts { name reviews { body } } }`, nil, t) + + // The query should still succeed — missing __typename doesn't crash resolution. + // reviews is null because stripping __typename from the products response means + // the planner cannot build an Entity representation to fetch reviews. + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":null},{"name":"Fedora","reviews":null}]}}`, string(resp)) + + // Cache keys should use "Product" from the query plan, not "Entity". + // Only entity caching for reviews/Product is configured, so we get a single L2 get + // with both product cache keys using the plan-time type name as fallback. + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }}, + }), sortCacheLogEntries(defaultCache.GetLog())) +} diff --git a/execution/engine/federation_caching_trace_test.go b/execution/engine/federation_caching_trace_test.go new file mode 100644 index 0000000000..30aa60df6a --- /dev/null +++ b/execution/engine/federation_caching_trace_test.go @@ -0,0 +1,295 @@ +package engine_test + +import ( + "context" + "encoding/json" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// verifyAndClearRemainingTTL checks that each entity in the trace has a RemainingTTLSeconds +// within the expected range (0, maxTTL], then zeros it for deterministic struct comparison. +// +// Fuzzy bounds are intentional here: RemainingTTLSeconds is wall-clock-dependent +// (decays between L2 write and the moment the trace is serialized), so an exact +// equality assertion is not possible. The normalizer then zeros the field so the +// surrounding struct-level assertion can use assert.Equal. +func verifyAndClearRemainingTTL(t *testing.T, ct *resolve.CacheTrace, maxTTL float64, msg string) { + t.Helper() + for i := range ct.Entities { + if ct.Entities[i].Source == "l2" { + if ct.Entities[i].RemainingTTLSeconds <= 0.0 || ct.Entities[i].RemainingTTLSeconds > maxTTL { + t.Fatalf("%s: entity %d remaining TTL %v outside expected range (0,%v]", msg, i, ct.Entities[i].RemainingTTLSeconds, maxTTL) + } + ct.Entities[i].RemainingTTLSeconds = 0 // zero for deterministic comparison + } + } +} + +// extractResponseData parses a GraphQL response and returns the serialized `data` +// field as a deterministic JSON string. The surrounding `extensions.trace` contains +// non-deterministic values (timestamps, durations, byte sizes, ephemeral ports) and +// is asserted separately via collectCacheTraces / CacheTrace struct comparisons. +func extractResponseData(t *testing.T, resp []byte) string { + t.Helper() + var response map[string]json.RawMessage + require.NoError(t, json.Unmarshal(resp, &response)) + data, ok := response["data"] + require.True(t, ok, "response should contain a data field") + return string(data) +} + +func parseTraceFromResponse(t *testing.T, resp []byte) map[string]any { + t.Helper() + var response map[string]any + require.NoError(t, json.Unmarshal(resp, &response)) + extensions, ok := response["extensions"].(map[string]any) + if !ok { + return nil + } + trace, ok := extensions["trace"].(map[string]any) + if !ok { + return nil + } + return trace +} + +func collectCacheTraces(t *testing.T, trace map[string]any) []resolve.CacheTrace { + t.Helper() + var results []resolve.CacheTrace + fetches, ok := trace["fetches"].(map[string]any) + if !ok { + return nil + } + walkFetchNode(t, fetches, &results) + return results +} + +func walkFetchNode(t *testing.T, node map[string]any, results *[]resolve.CacheTrace) { + t.Helper() + if fetch, ok := node["fetch"].(map[string]any); ok { + if traceData, ok := fetch["trace"].(map[string]any); ok { + if ctRaw, ok := traceData["cache_trace"].(map[string]any); ok { + ctJSON, err := json.Marshal(ctRaw) + require.NoError(t, err) + var ct resolve.CacheTrace + require.NoError(t, json.Unmarshal(ctJSON, &ct)) + *results = append(*results, ct) + } + } + // Also check traces array (for batch/entity fetches with multiple traces) + if traces, ok := fetch["traces"].([]any); ok { + for _, traceItem := range traces { + if traceMap, ok := traceItem.(map[string]any); ok { + if ctRaw, ok := traceMap["cache_trace"].(map[string]any); ok { + ctJSON, err := json.Marshal(ctRaw) + require.NoError(t, err) + var ct resolve.CacheTrace + require.NoError(t, json.Unmarshal(ctJSON, &ct)) + *results = append(*results, ct) + } + } + } + } + } + if children, ok := node["children"].([]any); ok { + for _, child := range children { + if childMap, ok := child.(map[string]any); ok { + walkFetchNode(t, childMap, results) + } + } + } +} + +// TestFederationCaching_CacheTraceInExtensions verifies that cache trace data (hit/miss/TTL) +// is correctly embedded in response extensions when tracing is enabled. +func TestFederationCaching_CacheTraceInExtensions(t *testing.T) { + t.Parallel() + t.Run("L2 miss then hit shows cache_trace in extensions.trace", func(t *testing.T) { + t.Parallel() + tracker := newSubgraphCallTracker(http.DefaultTransport) + + setup := federationtesting.NewFederationSetup(addCachingGateway( + withCachingEnableART(true), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": NewFakeLoaderCache()}), + withHTTPClient(&http.Client{Transport: tracker}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + {SubgraphName: "products", RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + }}, + {SubgraphName: "reviews", EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, + }}, + {SubgraphName: "accounts", EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }}, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // --- Request 1: all L2 misses — cache is empty, all fetches go to subgraphs --- + tracker.Reset() + resp1, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { topProducts { name reviews { body author: authorWithoutProvides { username } } } }`, nil, t) + assert.Equal(t, `{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","author":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","author":{"username":"Me"}}]}]}`, extractResponseData(t, resp1)) + + trace1 := parseTraceFromResponse(t, resp1) + require.NotNil(t, trace1, "Response should contain extensions.trace") + + cacheTraces1 := collectCacheTraces(t, trace1) + require.Equal(t, 3, len(cacheTraces1), "Should have 3 cache traces: products root field, reviews entities, accounts entities") + + assert.Equal(t, resolve.CacheTrace{ + DurationSinceStartNano: 1, // predictable timing + DurationSinceStartPretty: "1ns", + DurationNano: 1, + DurationPretty: "1ns", + L2Enabled: true, + CacheName: "default", + TTLSeconds: 30, + EntityCount: 1, // 1 root field key + L2Miss: 1, // 1 root field miss: Query.topProducts + L2GetDurationNano: 1, + L2GetDurationPretty: "1ns", + L2SetDurationNano: 1, // L2 Set happened after fetch + L2SetDurationPretty: "1ns", + Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, + }, cacheTraces1[0], "products root field: L2 miss, populated after fetch") + + assert.Equal(t, resolve.CacheTrace{ + DurationSinceStartNano: 1, + DurationSinceStartPretty: "1ns", + DurationNano: 1, + DurationPretty: "1ns", + L2Enabled: true, + CacheName: "default", + TTLSeconds: 30, + EntityCount: 2, // 2 Product entity keys + L2Miss: 2, // 2 Product entities missed + L2GetDurationNano: 1, + L2GetDurationPretty: "1ns", + L2SetDurationNano: 1, + L2SetDurationPretty: "1ns", + Keys: []string{ + `{"__typename":"Product","key":{"upc":"top-1"}}`, + `{"__typename":"Product","key":{"upc":"top-2"}}`, + }, + }, cacheTraces1[1], "reviews entities: 2 Product entities missed") + + assert.Equal(t, resolve.CacheTrace{ + DurationSinceStartNano: 1, + DurationSinceStartPretty: "1ns", + DurationNano: 1, + DurationPretty: "1ns", + L2Enabled: true, + CacheName: "default", + TTLSeconds: 30, + EntityCount: 2, // 2 User entity keys (same user for 2 reviews) + L2Miss: 2, // 2 User entity lookups missed (same user for 2 reviews, deduplicated in batch but 2 cache keys) + L2GetDurationNano: 1, + L2GetDurationPretty: "1ns", + L2SetDurationNano: 1, + L2SetDurationPretty: "1ns", + Keys: []string{ + `{"__typename":"User","key":{"id":"1234"}}`, + `{"__typename":"User","key":{"id":"1234"}}`, + }, + }, cacheTraces1[2], "accounts entities: User 1234 missed (2 lookups for 2 reviews)") + + // --- Request 2: all L2 hits — cache was populated by Request 1 --- + tracker.Reset() + resp2, _ := gqlClient.QueryStringWithHeaders(ctx, setup.GatewayServer.URL, + `query { topProducts { name reviews { body author: authorWithoutProvides { username } } } }`, nil, t) + assert.Equal(t, `{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","author":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","author":{"username":"Me"}}]}]}`, extractResponseData(t, resp2)) + + trace2 := parseTraceFromResponse(t, resp2) + require.NotNil(t, trace2, "Response should contain extensions.trace on second request") + + cacheTraces2 := collectCacheTraces(t, trace2) + require.Equal(t, 3, len(cacheTraces2), "Should have 3 cache traces on second request") + + // Verify remaining TTL is present for L2 hits, then zero for deterministic comparison + verifyAndClearRemainingTTL(t, &cacheTraces2[0], 30, "products root field") + verifyAndClearRemainingTTL(t, &cacheTraces2[1], 30, "reviews entities") + verifyAndClearRemainingTTL(t, &cacheTraces2[2], 30, "accounts entities") + + assert.Equal(t, resolve.CacheTrace{ + DurationSinceStartNano: 1, + DurationSinceStartPretty: "1ns", + DurationNano: 1, + DurationPretty: "1ns", + L2Enabled: true, + CacheName: "default", + TTLSeconds: 30, + EntityCount: 1, // 1 root field key + L2Hit: 1, // root field hit from L2 + L2GetDurationNano: 1, + L2GetDurationPretty: "1ns", + Entities: []resolve.CacheTraceEntity{ + {Key: `{"__typename":"Query","field":"topProducts"}`, Source: "l2", ByteSize: 127}, + }, + Keys: []string{`{"__typename":"Query","field":"topProducts"}`}, + }, cacheTraces2[0], "products root field: L2 hit, no Set") + + assert.Equal(t, resolve.CacheTrace{ + DurationSinceStartNano: 1, + DurationSinceStartPretty: "1ns", + DurationNano: 1, + DurationPretty: "1ns", + L2Enabled: true, + CacheName: "default", + TTLSeconds: 30, + EntityCount: 2, // 2 Product entity keys + L2Hit: 2, // both Product entities hit + L2GetDurationNano: 1, + L2GetDurationPretty: "1ns", + Entities: []resolve.CacheTraceEntity{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Source: "l2", ByteSize: 132}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Source: "l2", ByteSize: 188}, + }, + Keys: []string{ + `{"__typename":"Product","key":{"upc":"top-1"}}`, + `{"__typename":"Product","key":{"upc":"top-2"}}`, + }, + }, cacheTraces2[1], "reviews entities: both Products from L2") + + assert.Equal(t, resolve.CacheTrace{ + DurationSinceStartNano: 1, + DurationSinceStartPretty: "1ns", + DurationNano: 1, + DurationPretty: "1ns", + L2Enabled: true, + CacheName: "default", + TTLSeconds: 30, + EntityCount: 2, // 2 User entity keys (same user for 2 reviews) + L2Hit: 2, // both User lookups hit (same user, 2 cache key lookups) + L2GetDurationNano: 1, + L2GetDurationPretty: "1ns", + Entities: []resolve.CacheTraceEntity{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, Source: "l2", ByteSize: 49}, + {Key: `{"__typename":"User","key":{"id":"1234"}}`, Source: "l2", ByteSize: 49}, + }, + Keys: []string{ + `{"__typename":"User","key":{"id":"1234"}}`, + `{"__typename":"User","key":{"id":"1234"}}`, + }, + }, cacheTraces2[2], "accounts entities: User 1234 from L2 (2 lookups)") + + // On full cache hit, no subgraph calls should be made + assert.Equal(t, map[string]int{}, tracker.GetCounts(), "no subgraph calls expected on full cache hit") + }) +} diff --git a/execution/engine/federation_integration_static_test.go b/execution/engine/federation_integration_static_test.go index 65574463fd..c9ed70ced0 100644 --- a/execution/engine/federation_integration_static_test.go +++ b/execution/engine/federation_integration_static_test.go @@ -14,6 +14,22 @@ import ( "github.com/wundergraph/graphql-go-tools/v2/pkg/testing/flags" ) +// mustRecvMessage receives a single subscription message from ch with a timeout. +// Fails the test if the channel is closed unexpectedly or the timeout elapses. +func mustRecvMessage(t *testing.T, ch <-chan string, timeout time.Duration) string { + t.Helper() + select { + case m, ok := <-ch: + if !ok { + t.Fatalf("message channel closed unexpectedly") + } + return m + case <-time.After(timeout): + t.Fatalf("timed out after %s waiting for subscription message", timeout) + return "" + } +} + func TestExecutionEngine_FederationAndSubscription_IntegrationTest(t *testing.T) { t.Parallel() @@ -34,6 +50,7 @@ func TestExecutionEngine_FederationAndSubscription_IntegrationTest(t *testing.T) require.NoError(t, err) t.Run("should successfully execute a federation operation", func(t *testing.T) { + t.Parallel() gqlRequest := &graphql.Request{ OperationName: "", Variables: nil, @@ -44,8 +61,7 @@ func TestExecutionEngine_FederationAndSubscription_IntegrationTest(t *testing.T) require.NoError(t, err) require.True(t, validationResult.Valid) - execCtx, execCtxCancelFn := context.WithCancel(context.Background()) - defer execCtxCancelFn() + execCtx := t.Context() resultWriter := graphql.NewEngineResultWriter() err = engine.Execute(execCtx, gqlRequest, &resultWriter) @@ -61,7 +77,7 @@ func TestExecutionEngine_FederationAndSubscription_IntegrationTest(t *testing.T) t.Run("subscription", func(t *testing.T) { t.Parallel() ctx, cancelFn := context.WithCancel(context.Background()) - setup := federationtesting.NewFederationSetup() + setup := federationtesting.NewManualFederationSetup() t.Cleanup(func() { cancelFn() setup.Close() @@ -72,6 +88,8 @@ func TestExecutionEngine_FederationAndSubscription_IntegrationTest(t *testing.T) t.Run("should successfully execute a federation subscription", func(t *testing.T) { + t.Parallel() + query := ` subscription UpdatedPrice { updatedPrice { @@ -97,8 +115,7 @@ subscription UpdatedPrice { require.NoError(t, err) require.True(t, validationResult.Valid) - execCtx, execCtxCancelFn := context.WithCancel(context.Background()) - defer execCtxCancelFn() + execCtx := t.Context() message := make(chan string) resultWriter := graphql.NewEngineResultWriter() @@ -110,19 +127,19 @@ subscription UpdatedPrice { _ = engine.Execute(execCtx, gqlRequest, &resultWriter) }() - assert.Eventuallyf(t, func() bool { - msg := `{"data":{"updatedPrice":{"name":"Boater","price":%d,"reviews":[{"body":"This is the last straw. Hat you will wear. 11/10","author":{"id":"7777","username":"User 7777"}}]}}}` - price := 10 + trigger, err := setup.NextProductSubscription(ctx) + require.NoError(t, err) + + trigger.Emit() + trigger.Emit() + + msg := `{"data":{"updatedPrice":{"name":"Boater","price":%d,"reviews":[{"body":"This is the last straw. Hat you will wear. 11/10","author":{"id":"7777","username":"User 7777"}}]}}}` - firstMessage := <-message - expectedFirstMessage := fmt.Sprintf(msg, price) - assert.Equal(t, expectedFirstMessage, firstMessage) + firstMessage := mustRecvMessage(t, message, 5*time.Second) + assert.Equal(t, fmt.Sprintf(msg, 10), firstMessage) - secondMessage := <-message - expectedSecondMessage := fmt.Sprintf(msg, price+1) - assert.Equal(t, expectedSecondMessage, secondMessage) - return true - }, time.Second*20, 10*time.Millisecond, "did not receive expected messages") + secondMessage := mustRecvMessage(t, message, 5*time.Second) + assert.Equal(t, fmt.Sprintf(msg, 11), secondMessage) }) }) } diff --git a/execution/engine/federation_integration_test.go b/execution/engine/federation_integration_test.go index 9b749cda2b..4189d4e6ca 100644 --- a/execution/engine/federation_integration_test.go +++ b/execution/engine/federation_integration_test.go @@ -19,9 +19,33 @@ import ( "github.com/wundergraph/graphql-go-tools/execution/federationtesting" "github.com/wundergraph/graphql-go-tools/execution/federationtesting/gateway" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" ) -func addGateway(enableART bool) func(setup *federationtesting.FederationSetup) *httptest.Server { +type gatewayOptions struct { + enableART bool + loaderCache map[string]resolve.LoaderCache +} + +func withEnableART(enableART bool) func(*gatewayOptions) { + return func(opts *gatewayOptions) { + opts.enableART = enableART + } +} + +func withLoaderCache(loaderCache map[string]resolve.LoaderCache) func(*gatewayOptions) { + return func(opts *gatewayOptions) { + opts.loaderCache = loaderCache + } +} + +type gatewayOptionsToFunc func(opts *gatewayOptions) + +func addGateway(options ...gatewayOptionsToFunc) func(setup *federationtesting.FederationSetup) *httptest.Server { + opts := &gatewayOptions{} + for _, option := range options { + option(opts) + } return func(setup *federationtesting.FederationSetup) *httptest.Server { httpClient := http.DefaultClient @@ -31,7 +55,7 @@ func addGateway(enableART bool) func(setup *federationtesting.FederationSetup) * {Name: "reviews", URL: setup.ReviewsUpstreamServer.URL}, }, httpClient) - gtw := gateway.Handler(abstractlogger.NoopLogger, poller, httpClient, enableART) + gtw := gateway.Handler(abstractlogger.NoopLogger, poller, httpClient, opts.enableART, opts.loaderCache, nil) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() @@ -46,11 +70,10 @@ func testQueryPath(name string) string { } func TestFederationIntegrationTestWithArt(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + t.Parallel() - setup := federationtesting.NewFederationSetup(addGateway(true)) - defer setup.Close() + setup := federationtesting.NewFederationSetup(addGateway(withEnableART(true))) + t.Cleanup(setup.Close) gqlClient := NewGraphqlClient(http.DefaultClient) @@ -58,15 +81,60 @@ func TestFederationIntegrationTestWithArt(t *testing.T) { rex, err := regexp.Compile(`http://127.0.0.1:\d+`) require.NoError(t, err) resp = rex.ReplaceAllString(resp, "http://localhost/graphql") + + // Normalize timing values that shift under parallel execution load + rexNanos, err := regexp.Compile(`"duration_since_start_nanoseconds":\s*\d+`) + require.NoError(t, err) + resp = rexNanos.ReplaceAllString(resp, `"duration_since_start_nanoseconds":0`) + + rexPretty, err := regexp.Compile(`"duration_since_start_pretty":\s*"[^"]*"`) + require.NoError(t, err) + resp = rexPretty.ReplaceAllString(resp, `"duration_since_start_pretty":""`) + + rexStartTime, err := regexp.Compile(`"trace_start_time":\s*"[^"]*"`) + require.NoError(t, err) + resp = rexStartTime.ReplaceAllString(resp, `"trace_start_time":"0"`) + + rexEndTime, err := regexp.Compile(`"trace_start_unix":\s*"[^"]*"`) + require.NoError(t, err) + resp = rexEndTime.ReplaceAllString(resp, `"trace_start_unix":"0"`) + + // Normalize remaining timing fields that can shift under load + rexDurationNanos, err := regexp.Compile(`"duration_nanoseconds":\s*\d+`) + require.NoError(t, err) + resp = rexDurationNanos.ReplaceAllString(resp, `"duration_nanoseconds":0`) + + rexDurationPretty, err := regexp.Compile(`"duration_pretty":\s*"[^"]*"`) + require.NoError(t, err) + resp = rexDurationPretty.ReplaceAllString(resp, `"duration_pretty":""`) + + rexLoadNanos, err := regexp.Compile(`"duration_load_nanoseconds":\s*\d+`) + require.NoError(t, err) + resp = rexLoadNanos.ReplaceAllString(resp, `"duration_load_nanoseconds":0`) + + rexLoadPretty, err := regexp.Compile(`"duration_load_pretty":\s*"[^"]*"`) + require.NoError(t, err) + resp = rexLoadPretty.ReplaceAllString(resp, `"duration_load_pretty":""`) + + rexIdleNanos, err := regexp.Compile(`"idle_time_nanoseconds":\s*\d+`) + require.NoError(t, err) + resp = rexIdleNanos.ReplaceAllString(resp, `"idle_time_nanoseconds":0`) + + rexIdlePretty, err := regexp.Compile(`"idle_time_pretty":\s*"[^"]*"`) + require.NoError(t, err) + resp = rexIdlePretty.ReplaceAllString(resp, `"idle_time_pretty":""`) + return resp } t.Run("single upstream query operation with ART", func(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) resp := gqlClient.Query(ctx, setup.GatewayServer.URL, testQueryPath("queries/complex_nesting.graphql"), nil, t) respString := normalizeResponse(string(resp)) - assert.Contains(t, respString, `{"data":{"me":{"id":"1234","username":"Me"`) - assert.Contains(t, respString, `"extensions":{"trace":{"version":"1","info":{"trace_start_time"`) + assert.Equal(t, `{"data":{"me":{"id":"1234","username":"Me","history":[{"wallet":{"currency":"USD"}},{"location":"Germany","product":{"upc":"top-2","name":"Fedora"}},{"wallet":{"currency":"USD"}}],"reviews":[{"__typename":"Review","attachments":[{"__typename":"Question","body":"How do I turn it on?","upc":"top-1"}]},{"__typename":"Review","attachments":[{"__typename":"Rating","upc":"top-2","body":"The best hat I have ever bought in my life."},{"__typename":"Video","upc":"top-2","size":13.37}]}]}},"extensions":{"trace":{"version":"1","info":{"trace_start_time":"0","trace_start_unix":0,"parse_stats":{"duration_nanoseconds":0,"duration_pretty":"","duration_since_start_nanoseconds":0,"duration_since_start_pretty":""},"normalize_stats":{"duration_nanoseconds":0,"duration_pretty":"","duration_since_start_nanoseconds":0,"duration_since_start_pretty":""},"validate_stats":{"duration_nanoseconds":0,"duration_pretty":"","duration_since_start_nanoseconds":0,"duration_since_start_pretty":""},"planner_stats":{"duration_nanoseconds":0,"duration_pretty":"","duration_since_start_nanoseconds":0,"duration_since_start_pretty":""}},"fetches":{"kind":"Sequence","children":[{"kind":"Single","fetch":{"kind":"Single","path":"","source_id":"0","source_name":"accounts","trace":{"raw_input_data":{},"input":{"body":{"query":"{me {id username history {__typename ... on Purchase {wallet {currency}} ... on Sale {location product {upc __typename}}} __typename}}"},"header":{},"method":"POST","url":"http://localhost/graphql"},"output":{"data":{"me":{"id":"1234","username":"Me","history":[{"__typename":"Purchase","wallet":{"currency":"USD"}},{"__typename":"Sale","location":"Germany","product":{"upc":"top-2","__typename":"Product"}},{"__typename":"Purchase","wallet":{"currency":"USD"}}],"__typename":"User"}},"extensions":{"trace":{"request":{"method":"POST","url":"http://localhost/graphql","headers":{"Accept":["application/json"],"Accept-Encoding":["gzip","deflate"],"Content-Type":["application/json"]}},"response":{"status_code":200,"status":"200 OK","headers":{"Content-Length":["277"],"Content-Type":["application/json"]},"body_size":277}}}},"duration_since_start_nanoseconds":0,"duration_since_start_pretty":"","duration_load_nanoseconds":0,"duration_load_pretty":"","single_flight_used":true,"single_flight_shared_response":false,"load_skipped":false,"load_stats":{"get_conn":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":"","host_port":""},"got_conn":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":"","reused":false,"was_idle":false,"idle_time_nanoseconds":0,"idle_time_pretty":""},"got_first_response_byte":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":""},"dns_start":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":"","host":""},"dns_done":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":""},"connect_start":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":"","network":"","addr":""},"connect_done":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":"","network":"","addr":""},"tls_handshake_start":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":""},"tls_handshake_done":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":""},"wrote_headers":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":""},"wrote_request":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":""}},"cache_trace":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":"","duration_nanoseconds":0,"duration_pretty":"","l1_enabled":false,"l2_enabled":false,"entity_count":0,"l1_hit":0,"l1_miss":0,"l2_hit":0,"l2_miss":0}}}},{"kind":"Parallel","children":[{"kind":"Single","fetch":{"kind":"BatchEntity","path":"me.history.@.product","source_id":"1","source_name":"products","trace":{"raw_input_data":{"upc":"top-2","__typename":"Product"},"input":{"body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){... on Product {__typename name}}}","variables":{"representations":[{"__typename":"Product","upc":"top-2"}]}},"header":{},"method":"POST","url":"http://localhost/graphql"},"output":{"data":{"_entities":[{"__typename":"Product","name":"Fedora"}]},"extensions":{"trace":{"request":{"method":"POST","url":"http://localhost/graphql","headers":{"Accept":["application/json"],"Accept-Encoding":["gzip","deflate"],"Content-Type":["application/json"]}},"response":{"status_code":200,"status":"200 OK","headers":{"Content-Length":["65"],"Content-Type":["application/json"]},"body_size":65}}}},"duration_since_start_nanoseconds":0,"duration_since_start_pretty":"","duration_load_nanoseconds":0,"duration_load_pretty":"","single_flight_used":true,"single_flight_shared_response":false,"load_skipped":false,"load_stats":{"get_conn":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":"","host_port":""},"got_conn":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":"","reused":false,"was_idle":false,"idle_time_nanoseconds":0,"idle_time_pretty":""},"got_first_response_byte":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":""},"dns_start":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":"","host":""},"dns_done":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":""},"connect_start":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":"","network":"","addr":""},"connect_done":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":"","network":"","addr":""},"tls_handshake_start":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":""},"tls_handshake_done":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":""},"wrote_headers":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":""},"wrote_request":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":""}},"cache_trace":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":"","duration_nanoseconds":0,"duration_pretty":"","l1_enabled":false,"l2_enabled":false,"entity_count":0,"l1_hit":0,"l1_miss":0,"l2_hit":0,"l2_miss":0}}}},{"kind":"Single","fetch":{"kind":"Entity","path":"me","source_id":"2","source_name":"reviews","trace":{"raw_input_data":{"id":"1234","username":"Me","history":[{"__typename":"Purchase","wallet":{"currency":"USD"}},{"__typename":"Sale","location":"Germany","product":{"upc":"top-2","__typename":"Product"}},{"__typename":"Purchase","wallet":{"currency":"USD"}}],"__typename":"User"},"input":{"body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){... on User {__typename reviews {__typename attachments {__typename ... on Question {body upc} ... on Video {upc size} ... on Rating {upc body}}}}}}","variables":{"representations":[{"__typename":"User","id":"1234"}]}},"header":{},"method":"POST","url":"http://localhost/graphql"},"output":{"data":{"_entities":[{"__typename":"User","reviews":[{"__typename":"Review","attachments":[{"__typename":"Question","body":"How do I turn it on?","upc":"top-1"}]},{"__typename":"Review","attachments":[{"__typename":"Rating","upc":"top-2","body":"The best hat I have ever bought in my life."},{"__typename":"Video","upc":"top-2","size":13.37}]}]}]},"extensions":{"trace":{"request":{"method":"POST","url":"http://localhost/graphql","headers":{"Accept":["application/json"],"Accept-Encoding":["gzip","deflate"],"Content-Type":["application/json"]}},"response":{"status_code":200,"status":"200 OK","headers":{"Content-Length":["349"],"Content-Type":["application/json"]},"body_size":349}}}},"duration_since_start_nanoseconds":0,"duration_since_start_pretty":"","duration_load_nanoseconds":0,"duration_load_pretty":"","single_flight_used":true,"single_flight_shared_response":false,"load_skipped":false,"load_stats":{"get_conn":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":"","host_port":""},"got_conn":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":"","reused":false,"was_idle":false,"idle_time_nanoseconds":0,"idle_time_pretty":""},"got_first_response_byte":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":""},"dns_start":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":"","host":""},"dns_done":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":""},"connect_start":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":"","network":"","addr":""},"connect_done":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":"","network":"","addr":""},"tls_handshake_start":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":""},"tls_handshake_done":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":""},"wrote_headers":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":""},"wrote_request":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":""}},"cache_trace":{"duration_since_start_nanoseconds":0,"duration_since_start_pretty":"","duration_nanoseconds":0,"duration_pretty":"","l1_enabled":false,"l2_enabled":false,"entity_count":0,"l1_hit":0,"l1_miss":0,"l2_hit":0,"l2_miss":0}}}}]}]}}}}`, respString) buf := &bytes.Buffer{} _ = json.Indent(buf, []byte(respString), "", " ") @@ -78,7 +146,7 @@ func TestFederationIntegrationTest(t *testing.T) { t.Parallel() // Shared setup for all read-only tests (minimizes open ports) - setup := federationtesting.NewFederationSetup(addGateway(false)) + setup := federationtesting.NewManualFederationSetup(addGateway(withEnableART(false))) t.Cleanup(setup.Close) gqlClient := NewGraphqlClient(http.DefaultClient) @@ -101,7 +169,7 @@ func TestFederationIntegrationTest(t *testing.T) { // Mutation test needs its own setup because AddReview modifies the reviews resolver state t.Run("mutation operation with variables", func(t *testing.T) { t.Parallel() - mutSetup := federationtesting.NewFederationSetup(addGateway(false)) + mutSetup := federationtesting.NewFederationSetup(addGateway(withEnableART(false))) t.Cleanup(mutSetup.Close) mutClient := NewGraphqlClient(http.DefaultClient) ctx, cancel := context.WithCancel(context.Background()) @@ -136,12 +204,29 @@ func TestFederationIntegrationTest(t *testing.T) { t.Cleanup(cancel) wsAddr := strings.ReplaceAll(setup.GatewayServer.URL, "http://", "ws://") - messages := gqlClient.Subscription(ctx, wsAddr, testQueryPath("subscriptions/subscription.query"), queryVariables{ + messages, closeSubscription := gqlClient.Subscription(ctx, wsAddr, testQueryPath("subscriptions/subscription.query"), queryVariables{ "upc": "top-1", }, t) + t.Cleanup(closeSubscription) + + trigger, err := setup.NextProductSubscription(ctx) + require.NoError(t, err) + trigger.Emit() + trigger.Emit() + + // Guard channel reads: a broken subscription should fail the test fast, not hang it. + recv := func() string { + select { + case msg := <-messages: + return string(msg) + case <-time.After(5 * time.Second): + t.Fatal("timed out waiting for subscription message") + return "" + } + } - assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-1","name":"Trilby","price":1}}}}`, string(<-messages)) - assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-1","name":"Trilby","price":2}}}}`, string(<-messages)) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-1","name":"Trilby","price":1}}}}`, recv()) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-1","name":"Trilby","price":2}}}}`, recv()) }) t.Run("Multiple queries and nested fragments", func(t *testing.T) { diff --git a/execution/engine/federation_subscription_caching_test.go b/execution/engine/federation_subscription_caching_test.go new file mode 100644 index 0000000000..025afe7f6b --- /dev/null +++ b/execution/engine/federation_subscription_caching_test.go @@ -0,0 +1,2441 @@ +package engine_test + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// toWSAddr converts an HTTP URL to a WebSocket URL. +func toWSAddr(httpURL string) string { + return strings.ReplaceAll(httpURL, "http://", "ws://") +} + +// mustRecvMessage receives a single subscription message from ch with a timeout. +// Fails the test if the channel is closed unexpectedly or the timeout elapses. +func mustRecvMessage(t *testing.T, ch <-chan []byte, timeout time.Duration) []byte { + t.Helper() + select { + case m, ok := <-ch: + if !ok { + t.Fatalf("message channel closed unexpectedly") + } + return m + case <-time.After(timeout): + t.Fatalf("timed out after %s waiting for subscription message", timeout) + return nil + } +} + +func boolToInt(v bool) int { + if v { + return 1 + } + return 0 +} + +// collectSubscriptionMessages subscribes and collects exactly count messages. +func collectSubscriptionMessages(ctx context.Context, gqlClient *GraphqlClient, setup *federationtesting.FederationSetup, wsAddr, queryPath string, + variables queryVariables, count int, t *testing.T) []string { + t.Helper() + + messages, closeSubscription := gqlClient.Subscription(ctx, wsAddr, queryPath, variables, t) + defer closeSubscription() + + trigger, err := setup.NextProductSubscription(ctx) + require.NoError(t, err) + + var result []string + for i := range count { + trigger.Emit() + + select { + case msg, ok := <-messages: + if !ok { + t.Fatalf("subscription channel closed after %d messages, expected %d", i, count) + } + result = append(result, string(msg)) + case <-time.After(5 * time.Second): + t.Fatalf("timeout waiting for subscription message %d of %d", i+1, count) + } + } + + return result +} + +// TestFederationSubscriptionCaching verifies subscription-driven entity cache population: +// subscription events write entity data to L2, which subsequent queries can hit. +// +//nolint:tparallel // Timing-sensitive subscription cache tests need a few subtests to run before parallel siblings. +func TestFederationSubscriptionCaching(t *testing.T) { + // ===================================================================== + // Category 1: Child fetch L2 read/write within subscription events + // ===================================================================== + + t.Run("child entity fetch - L2 miss then hit across events", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Configure entity caching for User entities in accounts subgraph + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + wsAddr := toWSAddr(setup.GatewayServer.URL) + + // Subscribe to product "top-4" which has 2 reviews by different authors + defaultCache.ClearLog() + tracker.Reset() + + messages := collectSubscriptionMessages(ctx, gqlClient, setup, wsAddr, + `subscription UpdatePriceWithReviews($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, + queryVariables{"upc": "top-4"}, 2, t) + + // Event 1: should resolve User entities (L2 miss → fetch → L2 set) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","name":"Bowler","price":1,"reviews":[{"body":"Perfect summer hat.","authorWithoutProvides":{"username":"User 5678"}},{"body":"A bit too fancy for my taste.","authorWithoutProvides":{"username":"User 8888"}}]}}}}`, messages[0]) + + // Event 2: should hit L2 cache for User entities + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","name":"Bowler","price":2,"reviews":[{"body":"Perfect summer hat.","authorWithoutProvides":{"username":"User 5678"}},{"body":"A bit too fancy for my taste.","authorWithoutProvides":{"username":"User 8888"}}]}}}}`, messages[1]) + + // Verify accounts was called exactly once (event 1 fetched, event 2 hit cache) + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, "accounts should be called exactly once (L2 miss on event 1, hit on event 2)") + + // Verify cache log + cacheLog := defaultCache.GetLog() + + // Event 1: get (miss for User 1234 and 7777), set (both users) + // Event 2: get (hit for User 1234 and 7777) + // Total: 3 operations + assert.Equal(t, 3, len(cacheLog), "should have exactly 3 cache operations") + + wantLog := []CacheLogEntry{ + {Operation: CacheOperationGet, Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"5678"}}`, Hit: false}, + {Key: `{"__typename":"User","key":{"id":"8888"}}`, Hit: false}, + }}, + {Operation: CacheOperationSet, Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"5678"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"User","key":{"id":"8888"}}`, TTL: 30 * time.Second}, + }}, + {Operation: CacheOperationGet, Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"5678"}}`, Hit: true}, + {Key: `{"__typename":"User","key":{"id":"8888"}}`, Hit: true}, + }}, + } + assert.Equal(t, sortCacheLogEntries(wantLog), sortCacheLogEntries(cacheLog), "cache log should show miss+set on event 1, hit on event 2") + }) + + t.Run("L2 pre-populated - subscription child fetch hits L2", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // Pre-populate L2 with User entities that match top-4's review authors + err := defaultCache.Set(ctx, []*resolve.CacheEntry{ + {Key: `{"__typename":"User","key":{"id":"5678"}}`, Value: []byte(`{"id":"5678","username":"User 5678"}`), TTL: 30 * time.Second}, + {Key: `{"__typename":"User","key":{"id":"8888"}}`, Value: []byte(`{"id":"8888","username":"User 8888"}`), TTL: 30 * time.Second}, + }) + require.NoError(t, err) + seedLog := defaultCache.GetLog() + assert.Equal(t, []CacheLogEntry{ + { + Operation: "set", + Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"5678"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"User","key":{"id":"8888"}}`, TTL: 30 * time.Second}, + }, + }, + }, seedLog) + + // Subscribe - User entities should hit L2 from pre-populated cache + defaultCache.ClearLog() + tracker.Reset() + + messages := collectSubscriptionMessages(ctx, gqlClient, setup, toWSAddr(setup.GatewayServer.URL), + `subscription UpdatePriceWithReviews($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, + queryVariables{"upc": "top-4"}, 1, t) + + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","name":"Bowler","price":1,"reviews":[{"body":"Perfect summer hat.","authorWithoutProvides":{"username":"User 5678"}},{"body":"A bit too fancy for my taste.","authorWithoutProvides":{"username":"User 8888"}}]}}}}`, messages[0]) + + // Accounts should NOT be called during subscription (L2 hit) + subAccountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 0, subAccountsCalls, "subscription should not call accounts (L2 pre-populated)") + + // Cache log should show L2 get with hits + cacheLog := defaultCache.GetLog() + wantLog := []CacheLogEntry{ + {Operation: CacheOperationGet, Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"5678"}}`, Hit: true}, + {Key: `{"__typename":"User","key":{"id":"8888"}}`, Hit: true}, + }}, + } + assert.Equal(t, sortCacheLogEntries(wantLog), sortCacheLogEntries(cacheLog), "cache log should show L2 hits for pre-populated users") + }) + + t.Run("child entity fetch L2 TTL expiry across events", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // Short TTL for testing expiry + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 150 * time.Millisecond}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + wsAddr := toWSAddr(setup.GatewayServer.URL) + + defaultCache.setCurrentTime(time.Unix(0, 0)) + + // Collect 3 events: + // Event 1 (~100ms): L2 miss → accounts called → L2 set + // Event 2 (~200ms): Within TTL → L2 hit → no call + // Event 3 (~300ms): After TTL expiry → L2 miss → accounts called again + tracker.Reset() + messages, closeSubscription := gqlClient.Subscription(ctx, wsAddr, `subscription UpdatePriceWithReviews($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, queryVariables{"upc": "top-4"}, t) + t.Cleanup(closeSubscription) + + trigger, err := setup.NextProductSubscription(ctx) + require.NoError(t, err) + + trigger.Emit() + first := mustRecvMessage(t, messages, 5*time.Second) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","name":"Bowler","price":1,"reviews":[{"body":"Perfect summer hat.","authorWithoutProvides":{"username":"User 5678"}},{"body":"A bit too fancy for my taste.","authorWithoutProvides":{"username":"User 8888"}}]}}}}`, string(first)) + + trigger.Emit() + second := mustRecvMessage(t, messages, 5*time.Second) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","name":"Bowler","price":2,"reviews":[{"body":"Perfect summer hat.","authorWithoutProvides":{"username":"User 5678"}},{"body":"A bit too fancy for my taste.","authorWithoutProvides":{"username":"User 8888"}}]}}}}`, string(second)) + + defaultCache.setCurrentTime(time.Unix(0, 0).Add(151 * time.Millisecond)) + _, ok1 := defaultCache.Peek(`{"__typename":"User","key":{"id":"5678"}}`) + _, ok2 := defaultCache.Peek(`{"__typename":"User","key":{"id":"8888"}}`) + assert.Equal(t, false, ok1, "user 5678 L2 entry should expire after TTL") + assert.Equal(t, false, ok2, "user 8888 L2 entry should expire after TTL") + trigger.Emit() + third := mustRecvMessage(t, messages, 5*time.Second) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","name":"Bowler","price":3,"reviews":[{"body":"Perfect summer hat.","authorWithoutProvides":{"username":"User 5678"}},{"body":"A bit too fancy for my taste.","authorWithoutProvides":{"username":"User 8888"}}]}}}}`, string(third)) + + // Accounts should be called exactly 2 times (event 1 and event 3) + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 2, accountsCalls, "accounts should be called exactly twice (miss, hit, miss after TTL expiry)") + }) + + t.Run("entity caching not configured - no cache operations", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // No entity caching configured for accounts + subgraphCachingConfigs := engine.SubgraphCachingConfigs{} + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + wsAddr := toWSAddr(setup.GatewayServer.URL) + + defaultCache.ClearLog() + tracker.Reset() + + messages := collectSubscriptionMessages(ctx, gqlClient, setup, wsAddr, + `subscription UpdatePriceWithReviews($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, + queryVariables{"upc": "top-4"}, 2, t) + + require.Equal(t, 2, len(messages)) + + // Accounts should be called on every event (no caching) + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 2, accountsCalls, "accounts should be called on every event (no caching configured)") + + // Cache log should be empty for entity operations + cacheLog := defaultCache.GetLog() + assert.Equal(t, 0, len(cacheLog), "no cache operations expected when caching not configured") + }) + + // ===================================================================== + // Category 2: Subscription root entity populates L2 + // ===================================================================== + + t.Run("subscription entity populates L2 - verified via cache", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + SubscriptionEntityPopulation: plan.SubscriptionEntityPopulationConfigurations{ + {TypeName: "Product", FieldName: "updateProductPrice", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + wsAddr := toWSAddr(setup.GatewayServer.URL) + + // Subscribe to product updates - selects name, price beyond @key(upc) → populate mode + defaultCache.ClearLog() + + messages := collectSubscriptionMessages(ctx, gqlClient, setup, wsAddr, + `subscription UpdatePrice($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + } +}`, + queryVariables{"upc": "top-4"}, 1, t) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","name":"Bowler","price":1}}}}`, messages[0]) + + // Verify L2 was populated by subscription via cache log + subLog := defaultCache.GetLog() + wantLog := []CacheLogEntry{ + {Operation: CacheOperationSet, Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-4"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLog), sortCacheLogEntries(subLog), "subscription should populate L2 with Product entity") + + // Verify the cached data directly + entries, err := defaultCache.Get(ctx, []string{`{"__typename":"Product","key":{"upc":"top-4"}}`}) + require.NoError(t, err) + require.Equal(t, 1, len(entries)) + require.NotNil(t, entries[0], "Product entity should be in L2 cache") + assert.Equal(t, `{"upc":"top-4","name":"Bowler","price":1,"__typename":"Product"}`, string(entries[0].Value)) + }) + + t.Run("subscription populates L2 - cached data has only selected fields", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + SubscriptionEntityPopulation: plan.SubscriptionEntityPopulationConfigurations{ + {TypeName: "Product", FieldName: "updateProductPrice", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + wsAddr := toWSAddr(setup.GatewayServer.URL) + + // Subscribe with subscription_product_only.query which selects {upc, name, price} + // but NOT inStock. The subscription should populate L2 with only these fields. + defaultCache.ClearLog() + + messages := collectSubscriptionMessages(ctx, gqlClient, setup, wsAddr, + `subscription UpdatePrice($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + } +}`, + queryVariables{"upc": "top-4"}, 1, t) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","name":"Bowler","price":1}}}}`, messages[0]) + + // Verify L2 was populated + subLog := defaultCache.GetLog() + wantLog := []CacheLogEntry{ + {Operation: CacheOperationSet, Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-4"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLog), sortCacheLogEntries(subLog), "subscription should populate L2") + + // Verify the cached entity has upc, name, price but NOT inStock + entries, err := defaultCache.Get(ctx, []string{`{"__typename":"Product","key":{"upc":"top-4"}}`}) + require.NoError(t, err) + require.Equal(t, 1, len(entries)) + require.NotNil(t, entries[0], "Product entity should be in L2 cache") + assert.Equal(t, `{"upc":"top-4","name":"Bowler","price":1,"__typename":"Product"}`, string(entries[0].Value)) + }) + + t.Run("subscription entity list populates L2 - multiple entities cached", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + SubscriptionEntityPopulation: plan.SubscriptionEntityPopulationConfigurations{ + {TypeName: "Product", FieldName: "updatedPrices", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + wsAddr := toWSAddr(setup.GatewayServer.URL) + + // Subscribe to updatedPrices which returns a list of products (top-1, top-2, top-3) + defaultCache.ClearLog() + + messages := collectSubscriptionMessages(ctx, gqlClient, setup, wsAddr, + `subscription AllPricesWithReviews { + updatedPrices { + upc + name + price + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, + nil, 1, t) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updatedPrices":[{"upc":"top-1","name":"Trilby","price":1,"reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"upc":"top-2","name":"Fedora","price":2,"reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]},{"upc":"top-3","name":"Boater","price":3,"reviews":[{"body":"This is the last straw. Hat you will wear. 11/10","authorWithoutProvides":{"username":"User 7777"}}]}]}}}`, messages[0]) + + // Verify L2 was populated with all 3 product entities + subLog := defaultCache.GetLog() + wantLog := []CacheLogEntry{ + {Operation: CacheOperationSet, Items: []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-3"}}`, TTL: 30 * time.Second}, + }}, + } + assert.Equal(t, sortCacheLogEntries(wantLog), sortCacheLogEntries(subLog), "subscription should populate L2 with Product entities") + + // Verify exact cached values for all 3 products + entityKeys := []string{ + `{"__typename":"Product","key":{"upc":"top-1"}}`, + `{"__typename":"Product","key":{"upc":"top-2"}}`, + `{"__typename":"Product","key":{"upc":"top-3"}}`, + } + entries, err := defaultCache.Get(ctx, entityKeys) + require.NoError(t, err) + require.Equal(t, 3, len(entries)) + require.NotNil(t, entries[0]) + assert.Equal(t, `{"upc":"top-1","name":"Trilby","price":1,"__typename":"Product"}`, string(entries[0].Value)) + require.NotNil(t, entries[1]) + assert.Equal(t, `{"upc":"top-2","name":"Fedora","price":2,"__typename":"Product"}`, string(entries[1].Value)) + require.NotNil(t, entries[2]) + assert.Equal(t, `{"upc":"top-3","name":"Boater","price":3,"__typename":"Product"}`, string(entries[2].Value)) + }) + + t.Run("subscription entity population not configured - no L2 writes from subscription", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + // No SubscriptionEntityPopulation configured + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + productsURLParsed, _ := url.Parse(setup.ProductsUpstreamServer.URL) + productsHost := productsURLParsed.Host + + wsAddr := toWSAddr(setup.GatewayServer.URL) + + // Subscribe without entity population config + defaultCache.ClearLog() + tracker.Reset() + + messages := collectSubscriptionMessages(ctx, gqlClient, setup, wsAddr, + `subscription UpdatePrice($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + } +}`, + queryVariables{"upc": "top-4"}, 1, t) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","name":"Bowler","price":1}}}}`, messages[0]) + + // No cache operations from subscription (entity population not configured) + subLog := defaultCache.GetLog() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry(nil)), sortCacheLogEntries(subLog), "no cache operations when entity population not configured") + + // Query should miss L2 and call products subgraph + tracker.Reset() + + productQuery := `query { product(upc: "top-4") { upc name price } }` + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, productQuery, nil, t) + assert.Equal(t, `{"data":{"product":{"upc":"top-4","name":"Bowler","price":64}}}`, string(resp)) + + productsCallsQuery := tracker.GetCount(productsHost) + assert.Equal(t, 1, productsCallsQuery, "products should be called (no subscription entity population)") + }) + + t.Run("subscription entity + child fetch caching combined", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + SubscriptionEntityPopulation: plan.SubscriptionEntityPopulationConfigurations{ + {TypeName: "Product", FieldName: "updateProductPrice", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + wsAddr := toWSAddr(setup.GatewayServer.URL) + + // Subscribe with product entity population AND child entity caching for User + // Collect 2 events to verify both Product population and User L2 caching + tracker.Reset() + + messages := collectSubscriptionMessages(ctx, gqlClient, setup, wsAddr, + `subscription UpdatePriceWithReviews($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, + queryVariables{"upc": "top-4"}, 2, t) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","name":"Bowler","price":1,"reviews":[{"body":"Perfect summer hat.","authorWithoutProvides":{"username":"User 5678"}},{"body":"A bit too fancy for my taste.","authorWithoutProvides":{"username":"User 8888"}}]}}}}`, messages[0]) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","name":"Bowler","price":2,"reviews":[{"body":"Perfect summer hat.","authorWithoutProvides":{"username":"User 5678"}},{"body":"A bit too fancy for my taste.","authorWithoutProvides":{"username":"User 8888"}}]}}}}`, messages[1]) + + // Accounts called once (event 1 L2 miss, event 2 L2 hit for User entities) + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, "accounts called once (event 2 hits L2 from event 1)") + + // Verify Product entity was populated in L2 by subscription + productEntries, err := defaultCache.Get(ctx, []string{`{"__typename":"Product","key":{"upc":"top-4"}}`}) + require.NoError(t, err) + require.Equal(t, 1, len(productEntries)) + require.NotNil(t, productEntries[0], "Product entity should be in L2 cache") + assert.Equal(t, `{"upc":"top-4","name":"Bowler","price":2,"__typename":"Product"}`, string(productEntries[0].Value)) + + // Verify User entities were populated in L2 by child entity caching + userEntries, err := defaultCache.Get(ctx, []string{ + `{"__typename":"User","key":{"id":"5678"}}`, + `{"__typename":"User","key":{"id":"8888"}}`, + }) + require.NoError(t, err) + require.Equal(t, 2, len(userEntries)) + require.NotNil(t, userEntries[0], "User 5678 should be in L2 cache") + require.NotNil(t, userEntries[1], "User 8888 should be in L2 cache") + }) + + t.Run("subscription entity population with header prefix", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + mockHeadersBuilder := &mockSubgraphHeadersBuilder{ + hashes: map[string]uint64{ + "products": 11111, + "accounts": 33333, + "reviews": 22222, + }, + } + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + SubscriptionEntityPopulation: plan.SubscriptionEntityPopulationConfigurations{ + {TypeName: "Product", FieldName: "updateProductPrice", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: true}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withSubgraphHeadersBuilder(mockHeadersBuilder), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + wsAddr := toWSAddr(setup.GatewayServer.URL) + + defaultCache.ClearLog() + + messages := collectSubscriptionMessages(ctx, gqlClient, setup, wsAddr, + `subscription UpdatePrice($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + } +}`, + queryVariables{"upc": "top-4"}, 1, t) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","name":"Bowler","price":1}}}}`, messages[0]) + + // Verify the L2 set used a prefixed key + subLog := defaultCache.GetLog() + wantLog := []CacheLogEntry{ + {Operation: CacheOperationSet, Items: []CacheLogItem{{Key: `11111:{"__typename":"Product","key":{"upc":"top-4"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLog), sortCacheLogEntries(subLog), "subscription should populate L2 with prefixed key") + + // Verify the cached data directly using the prefixed key + entries, err := defaultCache.Get(ctx, []string{`11111:{"__typename":"Product","key":{"upc":"top-4"}}`}) + require.NoError(t, err) + require.Equal(t, 1, len(entries)) + require.NotNil(t, entries[0], "Product entity should be in L2 cache with prefixed key") + assert.Equal(t, `{"upc":"top-4","name":"Bowler","price":1,"__typename":"Product"}`, string(entries[0].Value)) + }) + + // ===================================================================== + // Category 3: Subscription entity invalidation (key-only mode) + // ===================================================================== + + t.Run("key-only subscription invalidates L2 cache", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + SubscriptionEntityPopulation: plan.SubscriptionEntityPopulationConfigurations{ + {TypeName: "Product", FieldName: "updateProductPrice", CacheName: "default", TTL: 30 * time.Second, EnableInvalidationOnKeyOnly: true}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + entityKey := `{"__typename":"Product","key":{"upc":"top-4"}}` + + // Pre-populate L2 directly with entity cache key + err := defaultCache.Set(ctx, []*resolve.CacheEntry{ + {Key: entityKey, Value: []byte(`{"upc":"top-4","name":"Bowler","price":64,"__typename":"Product"}`), TTL: 30 * time.Second}, + }) + require.NoError(t, err) + + // Verify product is in cache + entries, err := defaultCache.Get(ctx, []string{entityKey}) + require.NoError(t, err) + require.NotNil(t, entries[0], "Product should be in L2 cache before subscription") + seedLog := defaultCache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-4"}}`, TTL: 30 * time.Second}}}, + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-4"}}`, Hit: true}}}, + }, seedLog) + + // Subscribe with key-only query → invalidation mode + defaultCache.ClearLog() + + wsAddr := toWSAddr(setup.GatewayServer.URL) + messages := collectSubscriptionMessages(ctx, gqlClient, setup, wsAddr, + `subscription UpdatePriceKeyOnly($upc: String!) { + updateProductPrice(upc: $upc) { + upc + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, + queryVariables{"upc": "top-4"}, 1, t) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","reviews":[{"body":"Perfect summer hat.","authorWithoutProvides":{"username":"User 5678"}},{"body":"A bit too fancy for my taste.","authorWithoutProvides":{"username":"User 8888"}}]}}}}`, messages[0]) + + // Verify cache delete + User entity resolution + subLog := defaultCache.GetLog() + wantLog := []CacheLogEntry{ + {Operation: CacheOperationDelete, Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-4"}}`}}}, + {Operation: CacheOperationGet, Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"5678"}}`, Hit: false}, + {Key: `{"__typename":"User","key":{"id":"8888"}}`, Hit: false}, + }}, + {Operation: CacheOperationSet, Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"5678"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"User","key":{"id":"8888"}}`, TTL: 30 * time.Second}, + }}, + } + assert.Equal(t, sortCacheLogEntries(wantLog), sortCacheLogEntries(subLog), "subscription should delete Product and resolve Users") + + // Verify Product is gone from cache + entries, err = defaultCache.Get(ctx, []string{entityKey}) + require.NoError(t, err) + assert.Nil(t, entries[0], "Product should be deleted from L2 cache after invalidation") + + // Verify User entities are cached + userEntries, err := defaultCache.Get(ctx, []string{ + `{"__typename":"User","key":{"id":"5678"}}`, + `{"__typename":"User","key":{"id":"8888"}}`, + }) + require.NoError(t, err) + require.Equal(t, 2, len(userEntries)) + require.NotNil(t, userEntries[0]) + assert.Equal(t, `{"__typename":"User","id":"5678","username":"User 5678"}`, string(userEntries[0].Value)) + require.NotNil(t, userEntries[1]) + assert.Equal(t, `{"__typename":"User","id":"8888","username":"User 8888"}`, string(userEntries[1].Value)) + }) + + t.Run("key-only subscription WITHOUT invalidation flag - no cache operation", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + SubscriptionEntityPopulation: plan.SubscriptionEntityPopulationConfigurations{ + {TypeName: "Product", FieldName: "updateProductPrice", CacheName: "default", TTL: 30 * time.Second, EnableInvalidationOnKeyOnly: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + entityKey := `{"__typename":"Product","key":{"upc":"top-4"}}` + + // Pre-populate L2 directly with entity cache key + err := defaultCache.Set(ctx, []*resolve.CacheEntry{ + {Key: entityKey, Value: []byte(`{"upc":"top-4","name":"Bowler","price":64,"__typename":"Product"}`), TTL: 30 * time.Second}, + }) + require.NoError(t, err) + seedLog := defaultCache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-4"}}`, TTL: 30 * time.Second}}}, + }, seedLog) + + // Subscribe with key-only query but invalidation disabled + defaultCache.ClearLog() + + wsAddr := toWSAddr(setup.GatewayServer.URL) + messages := collectSubscriptionMessages(ctx, gqlClient, setup, wsAddr, + `subscription UpdatePriceKeyOnly($upc: String!) { + updateProductPrice(upc: $upc) { + upc + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, + queryVariables{"upc": "top-4"}, 1, t) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","reviews":[{"body":"Perfect summer hat.","authorWithoutProvides":{"username":"User 5678"}},{"body":"A bit too fancy for my taste.","authorWithoutProvides":{"username":"User 8888"}}]}}}}`, messages[0]) + + // No delete for Product (invalidation disabled), only User entity resolution + subLog := defaultCache.GetLog() + wantLog := []CacheLogEntry{ + {Operation: CacheOperationGet, Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"5678"}}`, Hit: false}, + {Key: `{"__typename":"User","key":{"id":"8888"}}`, Hit: false}, + }}, + {Operation: CacheOperationSet, Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"5678"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"User","key":{"id":"8888"}}`, TTL: 30 * time.Second}, + }}, + } + assert.Equal(t, sortCacheLogEntries(wantLog), sortCacheLogEntries(subLog), "no delete for Product, only User entity resolution") + + // Verify Product is still in cache (not invalidated) + entries, err := defaultCache.Get(ctx, []string{entityKey}) + require.NoError(t, err) + require.NotNil(t, entries[0]) + assert.Equal(t, `{"upc":"top-4","name":"Bowler","price":64,"__typename":"Product"}`, string(entries[0].Value)) + + // Verify User entities are cached + userEntries, err := defaultCache.Get(ctx, []string{ + `{"__typename":"User","key":{"id":"5678"}}`, + `{"__typename":"User","key":{"id":"8888"}}`, + }) + require.NoError(t, err) + require.Equal(t, 2, len(userEntries)) + require.NotNil(t, userEntries[0]) + assert.Equal(t, `{"__typename":"User","id":"5678","username":"User 5678"}`, string(userEntries[0].Value)) + require.NotNil(t, userEntries[1]) + assert.Equal(t, `{"__typename":"User","id":"8888","username":"User 8888"}`, string(userEntries[1].Value)) + }) + + t.Run("invalidation on every event", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + SubscriptionEntityPopulation: plan.SubscriptionEntityPopulationConfigurations{ + {TypeName: "Product", FieldName: "updateProductPrice", CacheName: "default", TTL: 30 * time.Second, EnableInvalidationOnKeyOnly: true}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + entityKey := `{"__typename":"Product","key":{"upc":"top-4"}}` + entityValue := []byte(`{"upc":"top-4","name":"Bowler","price":64,"__typename":"Product"}`) + + // Pre-populate L2 + err := defaultCache.Set(ctx, []*resolve.CacheEntry{ + {Key: entityKey, Value: entityValue, TTL: 30 * time.Second}, + }) + require.NoError(t, err) + seedLog := defaultCache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-4"}}`, TTL: 30 * time.Second}}}, + }, seedLog) + + // Subscribe with key-only query → invalidation mode, collect 2 events + defaultCache.ClearLog() + + wsAddr := toWSAddr(setup.GatewayServer.URL) + messages, closeSubscription := gqlClient.Subscription(ctx, wsAddr, `subscription UpdatePriceKeyOnly($upc: String!) { + updateProductPrice(upc: $upc) { + upc + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, queryVariables{"upc": "top-4"}, t) + t.Cleanup(closeSubscription) + + handle, err := setup.NextProductSubscription(ctx) + require.NoError(t, err) + + handle.Emit() + firstMessage := mustRecvMessage(t, messages, 5*time.Second) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","reviews":[{"body":"Perfect summer hat.","authorWithoutProvides":{"username":"User 5678"}},{"body":"A bit too fancy for my taste.","authorWithoutProvides":{"username":"User 8888"}}]}}}}`, string(firstMessage)) + + handle.Emit() + secondMessage := mustRecvMessage(t, messages, 5*time.Second) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","reviews":[{"body":"Perfect summer hat.","authorWithoutProvides":{"username":"User 5678"}},{"body":"A bit too fancy for my taste.","authorWithoutProvides":{"username":"User 8888"}}]}}}}`, string(secondMessage)) + + // Verify 2 delete operations (one per event) + User entity resolution + subLog := defaultCache.GetLog() + wantLog := []CacheLogEntry{ + {Operation: CacheOperationDelete, Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-4"}}`}}}, + {Operation: CacheOperationGet, Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"5678"}}`, Hit: false}, + {Key: `{"__typename":"User","key":{"id":"8888"}}`, Hit: false}, + }}, + {Operation: CacheOperationSet, Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"5678"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"User","key":{"id":"8888"}}`, TTL: 30 * time.Second}, + }}, + {Operation: CacheOperationDelete, Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-4"}}`}}}, + {Operation: CacheOperationGet, Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"5678"}}`, Hit: true}, + {Key: `{"__typename":"User","key":{"id":"8888"}}`, Hit: true}, + }}, + } + assert.Equal(t, sortCacheLogEntries(wantLog), sortCacheLogEntries(subLog), "should have 2 delete operations (one per event) + User entity resolution") + + // Verify Product is gone after both events + entries, err := defaultCache.Get(ctx, []string{entityKey}) + require.NoError(t, err) + assert.Nil(t, entries[0], "Product should be deleted from L2 after invalidation events") + + // Verify User entities are still cached (set on event 1, hit on event 2) + userEntries, err := defaultCache.Get(ctx, []string{ + `{"__typename":"User","key":{"id":"5678"}}`, + `{"__typename":"User","key":{"id":"8888"}}`, + }) + require.NoError(t, err) + require.Equal(t, 2, len(userEntries)) + require.NotNil(t, userEntries[0]) + assert.Equal(t, `{"__typename":"User","id":"5678","username":"User 5678"}`, string(userEntries[0].Value)) + require.NotNil(t, userEntries[1]) + assert.Equal(t, `{"__typename":"User","id":"8888","username":"User 8888"}`, string(userEntries[1].Value)) + }) + + // ===================================================================== + // Category 4: Root field caching NOT applied to subscriptions + // ===================================================================== + + t.Run("root field cache config does not apply to subscription root", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second}, + {TypeName: "Subscription", FieldName: "updateProductPrice", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + wsAddr := toWSAddr(setup.GatewayServer.URL) + + defaultCache.ClearLog() + + messages := collectSubscriptionMessages(ctx, gqlClient, setup, wsAddr, + `subscription UpdatePriceWithReviews($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, + queryVariables{"upc": "top-4"}, 1, t) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","name":"Bowler","price":1,"reviews":[{"body":"Perfect summer hat.","authorWithoutProvides":{"username":"User 5678"}},{"body":"A bit too fancy for my taste.","authorWithoutProvides":{"username":"User 8888"}}]}}}}`, messages[0]) + + // Verify no root field cache operations for subscription trigger. + // Even with a Subscription.updateProductPrice root-field cache configured, + // it must NOT apply — subscriptions are never cached as root fields. + cacheLog := defaultCache.GetLog() + wantLog := []CacheLogEntry{ + {Operation: CacheOperationGet, Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"5678"}}`, Hit: false}, + {Key: `{"__typename":"User","key":{"id":"8888"}}`, Hit: false}, + }}, + {Operation: CacheOperationSet, Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"5678"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"User","key":{"id":"8888"}}`, TTL: 30 * time.Second}, + }}, + } + assert.Equal(t, sortCacheLogEntries(wantLog), sortCacheLogEntries(cacheLog), "no root field cache, only User entity caching") + + // Verify User entities are cached with correct values + userEntries, err := defaultCache.Get(ctx, []string{ + `{"__typename":"User","key":{"id":"5678"}}`, + `{"__typename":"User","key":{"id":"8888"}}`, + }) + require.NoError(t, err) + require.Equal(t, 2, len(userEntries)) + require.NotNil(t, userEntries[0]) + assert.Equal(t, `{"__typename":"User","id":"5678","username":"User 5678"}`, string(userEntries[0].Value)) + require.NotNil(t, userEntries[1]) + assert.Equal(t, `{"__typename":"User","id":"8888","username":"User 8888"}`, string(userEntries[1].Value)) + }) + + // ===================================================================== + // Category 5: Edge cases + // ===================================================================== + + t.Run("multiple subscription events share L2 - second event skips fetch", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + wsAddr := toWSAddr(setup.GatewayServer.URL) + + tracker.Reset() + + messages := collectSubscriptionMessages(ctx, gqlClient, setup, wsAddr, + `subscription UpdatePriceWithReviews($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, + queryVariables{"upc": "top-4"}, 2, t) + + require.Equal(t, 2, len(messages)) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","name":"Bowler","price":1,"reviews":[{"body":"Perfect summer hat.","authorWithoutProvides":{"username":"User 5678"}},{"body":"A bit too fancy for my taste.","authorWithoutProvides":{"username":"User 8888"}}]}}}}`, messages[0]) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","name":"Bowler","price":2,"reviews":[{"body":"Perfect summer hat.","authorWithoutProvides":{"username":"User 5678"}},{"body":"A bit too fancy for my taste.","authorWithoutProvides":{"username":"User 8888"}}]}}}}`, messages[1]) + + // Accounts called exactly once (event 1), event 2 uses L2 + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 1, accountsCalls, "accounts called once (event 2 uses L2 from event 1)") + }) + + t.Run("subscription with @provides skips entity resolution", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + tracker := newSubgraphCallTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withHTTPClient(trackingClient), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + wsAddr := toWSAddr(setup.GatewayServer.URL) + + defaultCache.ClearLog() + tracker.Reset() + + // Uses author (with @provides) - no entity resolution for User + messages := collectSubscriptionMessages(ctx, gqlClient, setup, wsAddr, + `subscription UpdatePriceWithProvides($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + reviews { + body + author { + username + } + } + } +}`, + queryVariables{"upc": "top-4"}, 2, t) + + require.Equal(t, 2, len(messages)) + + // Accounts should never be called (@provides means reviews subgraph provides username) + accountsCalls := tracker.GetCount(accountsHost) + assert.Equal(t, 0, accountsCalls, "accounts never called with @provides") + + // No cache operations at all (no entity resolution with @provides) + cacheLog := defaultCache.GetLog() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry(nil)), sortCacheLogEntries(cacheLog), "no cache operations with @provides") + }) + + // ===================================================================== + // Category 6: Alias and union edge cases + // ===================================================================== + + t.Run("subscription root field alias - entity population works", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + SubscriptionEntityPopulation: plan.SubscriptionEntityPopulationConfigurations{ + {TypeName: "Product", FieldName: "updateProductPrice", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + wsAddr := toWSAddr(setup.GatewayServer.URL) + + defaultCache.ClearLog() + + // Uses alias: "priceUpdate: updateProductPrice(upc: $upc)" + messages := collectSubscriptionMessages(ctx, gqlClient, setup, wsAddr, + `subscription UpdatePriceAlias($upc: String!) { + priceUpdate: updateProductPrice(upc: $upc) { + upc + name + price + } +}`, + queryVariables{"upc": "top-4"}, 1, t) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"priceUpdate":{"upc":"top-4","name":"Bowler","price":1}}}}`, messages[0]) + + // Verify L2 was populated by subscription (alias doesn't break entity population) + subLog := defaultCache.GetLog() + wantLog := []CacheLogEntry{ + {Operation: CacheOperationSet, Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-4"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLog), sortCacheLogEntries(subLog), "subscription with alias should populate L2 with Product entity") + + // Verify cached data + entries, err := defaultCache.Get(ctx, []string{`{"__typename":"Product","key":{"upc":"top-4"}}`}) + require.NoError(t, err) + require.Equal(t, 1, len(entries)) + require.NotNil(t, entries[0], "Product entity should be in L2 cache") + assert.Equal(t, `{"upc":"top-4","name":"Bowler","price":1,"__typename":"Product"}`, string(entries[0].Value)) + }) + + t.Run("subscription union return type - entity population works", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + // Configure for concrete type "Product", not the union "ProductUpdate" + SubscriptionEntityPopulation: plan.SubscriptionEntityPopulationConfigurations{ + {TypeName: "Product", FieldName: "updateProductPriceUnion", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + wsAddr := toWSAddr(setup.GatewayServer.URL) + + defaultCache.ClearLog() + + // Uses union return type: updateProductPriceUnion returns ProductUpdate union + messages := collectSubscriptionMessages(ctx, gqlClient, setup, wsAddr, + `subscription UpdatePriceUnion($upc: String!) { + updateProductPriceUnion(upc: $upc) { + ... on Product { + upc + name + price + } + } +}`, + queryVariables{"upc": "top-4"}, 1, t) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPriceUnion":{"upc":"top-4","name":"Bowler","price":1}}}}`, messages[0]) + + // Verify L2 was populated (planner resolves union → Product member) + subLog := defaultCache.GetLog() + wantLog := []CacheLogEntry{ + {Operation: CacheOperationSet, Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-4"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLog), sortCacheLogEntries(subLog), "subscription with union return type should populate L2 with Product entity") + + // Verify cached data + entries, err := defaultCache.Get(ctx, []string{`{"__typename":"Product","key":{"upc":"top-4"}}`}) + require.NoError(t, err) + require.Equal(t, 1, len(entries)) + require.NotNil(t, entries[0], "Product entity should be in L2 cache") + assert.Equal(t, `{"__typename":"Product","upc":"top-4","name":"Bowler","price":1}`, string(entries[0].Value)) + }) + + t.Run("subscription interface return type - entity population works", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + // Configure for concrete type "Product", not the interface "ProductInterface" + SubscriptionEntityPopulation: plan.SubscriptionEntityPopulationConfigurations{ + {TypeName: "Product", FieldName: "updateProductPriceInterface", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + wsAddr := toWSAddr(setup.GatewayServer.URL) + + defaultCache.ClearLog() + + // Uses interface return type: updateProductPriceInterface returns ProductInterface + messages := collectSubscriptionMessages(ctx, gqlClient, setup, wsAddr, + `subscription UpdatePriceInterface($upc: String!) { + updateProductPriceInterface(upc: $upc) { + ... on Product { + upc + name + price + } + } +}`, + queryVariables{"upc": "top-4"}, 1, t) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPriceInterface":{"upc":"top-4","name":"Bowler","price":1}}}}`, messages[0]) + + // Verify L2 was populated (planner resolves interface → Product implementor) + subLog := defaultCache.GetLog() + wantLog := []CacheLogEntry{ + {Operation: CacheOperationSet, Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-4"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLog), sortCacheLogEntries(subLog), "subscription with interface return type should populate L2 with Product entity") + + // Verify cached data + entries, err := defaultCache.Get(ctx, []string{`{"__typename":"Product","key":{"upc":"top-4"}}`}) + require.NoError(t, err) + require.Equal(t, 1, len(entries)) + require.NotNil(t, entries[0], "Product entity should be in L2 cache") + assert.Equal(t, `{"__typename":"Product","upc":"top-4","name":"Bowler","price":1}`, string(entries[0].Value)) + }) + + t.Run("subscription union return type - unconfigured type not cached", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Configure entity population for Product only, NOT DigitalProduct. + // The union ProductUpdate = Product | DigitalProduct, but the planner picks + // Product's config. At runtime, DigitalProduct is returned and its __typename + // doesn't match → filtered out → no L2 cache write. + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + SubscriptionEntityPopulation: plan.SubscriptionEntityPopulationConfigurations{ + {TypeName: "Product", FieldName: "updateDigitalProductPriceUnion", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + wsAddr := toWSAddr(setup.GatewayServer.URL) + + defaultCache.ClearLog() + + // Subscribe via union field that returns DigitalProduct (not Product) + messages := collectSubscriptionMessages(ctx, gqlClient, setup, wsAddr, + `subscription UpdateDigitalProductPriceUnion($upc: String!) { + updateDigitalProductPriceUnion(upc: $upc) { + ... on DigitalProduct { + upc + name + price + } + } +}`, + queryVariables{"upc": "digital-1"}, 1, t) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateDigitalProductPriceUnion":{"upc":"digital-1","name":"eBook: GraphQL in Action","price":1}}}}`, messages[0]) + + // No cache operations: DigitalProduct's __typename doesn't match configured "Product" + subLog := defaultCache.GetLog() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry(nil)), sortCacheLogEntries(subLog), "no cache operations for unconfigured DigitalProduct type") + + // Verify neither Product nor DigitalProduct keys are in cache + productEntries, err := defaultCache.Get(ctx, []string{`{"__typename":"Product","key":{"upc":"digital-1"}}`}) + require.NoError(t, err) + assert.Nil(t, productEntries[0], "Product key should not be in cache") + + digitalEntries, err := defaultCache.Get(ctx, []string{`{"__typename":"DigitalProduct","key":{"upc":"digital-1"}}`}) + require.NoError(t, err) + assert.Nil(t, digitalEntries[0], "DigitalProduct key should not be in cache") + }) + + t.Run("subscription interface return type - unconfigured type not cached", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Configure entity population for Product only, NOT DigitalProduct. + // The interface ProductInterface is implemented by Product and DigitalProduct, + // but the planner picks Product's config. At runtime, DigitalProduct is returned + // and its __typename doesn't match → filtered out → no L2 cache write. + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + SubscriptionEntityPopulation: plan.SubscriptionEntityPopulationConfigurations{ + {TypeName: "Product", FieldName: "updateDigitalProductPriceInterface", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + wsAddr := toWSAddr(setup.GatewayServer.URL) + + defaultCache.ClearLog() + + // Subscribe via interface field that returns DigitalProduct (not Product) + messages := collectSubscriptionMessages(ctx, gqlClient, setup, wsAddr, + `subscription UpdateDigitalProductPriceInterface($upc: String!) { + updateDigitalProductPriceInterface(upc: $upc) { + ... on DigitalProduct { + upc + name + price + } + } +}`, + queryVariables{"upc": "digital-1"}, 1, t) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateDigitalProductPriceInterface":{"upc":"digital-1","name":"eBook: GraphQL in Action","price":1}}}}`, messages[0]) + + // No cache operations: DigitalProduct's __typename doesn't match configured "Product" + subLog := defaultCache.GetLog() + assert.Equal(t, sortCacheLogEntries([]CacheLogEntry(nil)), sortCacheLogEntries(subLog), "no cache operations for unconfigured DigitalProduct type") + + // Verify neither Product nor DigitalProduct keys are in cache + productEntries, err := defaultCache.Get(ctx, []string{`{"__typename":"Product","key":{"upc":"digital-1"}}`}) + require.NoError(t, err) + assert.Nil(t, productEntries[0], "Product key should not be in cache") + + digitalEntries, err := defaultCache.Get(ctx, []string{`{"__typename":"DigitalProduct","key":{"upc":"digital-1"}}`}) + require.NoError(t, err) + assert.Nil(t, digitalEntries[0], "DigitalProduct key should not be in cache") + }) + + // ===================================================================== + // Category 7: Trigger-level cache deduplication + // ===================================================================== + + t.Run("entity population happens once per trigger event with multiple subscriptions", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + SubscriptionEntityPopulation: plan.SubscriptionEntityPopulationConfigurations{ + {TypeName: "Product", FieldName: "updateProductPrice", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + wsAddr := toWSAddr(setup.GatewayServer.URL) + vars := queryVariables{"upc": "top-4"} + + // Start 2 subscriptions to the same query/variables (same trigger) + messages1, close1 := gqlClient.Subscription(ctx, wsAddr, `subscription UpdatePrice($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + } +}`, vars, t) + t.Cleanup(close1) + messages2, close2 := gqlClient.Subscription(ctx, wsAddr, `subscription UpdatePrice($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + } +}`, vars, t) + t.Cleanup(close2) + + handle, err := setup.NextProductSubscription(ctx) + require.NoError(t, err) + + // Shared-trigger subscriptions are attached asynchronously after the upstream + // handle is created. Warm up until both clients have observed at least one event. + firstSeen := [2]bool{} + warmupEmits := 0 + warmupCtx, warmupCancel := context.WithTimeout(ctx, 5*time.Second) + defer warmupCancel() + for !firstSeen[0] || !firstSeen[1] { + handle.Emit() + warmupEmits++ + + settleTimer := time.NewTimer(200 * time.Millisecond) + collectWarmup: + for { + select { + case _, ok := <-messages1: + if !ok { + t.Fatalf("messages1 channel closed unexpectedly during warm-up") + } + firstSeen[0] = true + if !settleTimer.Stop() { + select { + case <-settleTimer.C: + default: + } + } + settleTimer.Reset(200 * time.Millisecond) + case _, ok := <-messages2: + if !ok { + t.Fatalf("messages2 channel closed unexpectedly during warm-up") + } + firstSeen[1] = true + if !settleTimer.Stop() { + select { + case <-settleTimer.C: + default: + } + } + settleTimer.Reset(200 * time.Millisecond) + case <-settleTimer.C: + break collectWarmup + case <-warmupCtx.Done(): + t.Fatalf("timeout waiting for first messages, received %d of 2", boolToInt(firstSeen[0])+boolToInt(firstSeen[1])) + } + } + } + + // Drain any extra warm-up messages from already-attached clients so the next + // emit is the only source of messages in the measured phase. + drainTimer := time.NewTimer(200 * time.Millisecond) + drainWarmup: + for { + select { + case _, ok := <-messages1: + if !ok { + t.Fatalf("messages1 channel closed unexpectedly during drain") + } + if !drainTimer.Stop() { + select { + case <-drainTimer.C: + default: + } + } + drainTimer.Reset(200 * time.Millisecond) + case _, ok := <-messages2: + if !ok { + t.Fatalf("messages2 channel closed unexpectedly during drain") + } + if !drainTimer.Stop() { + select { + case <-drainTimer.C: + default: + } + } + drainTimer.Reset(200 * time.Millisecond) + case <-drainTimer.C: + break drainWarmup + } + } + + // ClearLog and collect second event to measure deduplication + defaultCache.ClearLog() + setNotification := defaultCache.WaitForOperation(CacheOperationSet, []string{`{"__typename":"Product","key":{"upc":"top-4"}}`}) + + handle.Emit() + + var msg1b, msg2b string + for msg1b == "" || msg2b == "" { + select { + case m, ok := <-messages1: + if !ok { + t.Fatalf("messages1 channel closed unexpectedly") + } + msg1b = string(m) + case m, ok := <-messages2: + if !ok { + t.Fatalf("messages2 channel closed unexpectedly") + } + msg2b = string(m) + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for second messages") + } + } + + assert.Equal(t, msg1b, msg2b, "both clients should receive the same event") + assert.Equal(t, fmt.Sprintf(`{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","name":"Bowler","price":%d}}}}`, warmupEmits+1), msg1b) + + // Close subscriptions before cache log assertions + close1() + close2() + + select { + case entry, ok := <-setNotification: + require.True(t, ok, "set notification channel should be closed after delivery") + assert.Equal(t, CacheLogEntry{ + Operation: CacheOperationSet, + Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-4"}}`, TTL: 30 * time.Second}}, + }, entry) + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for Product cache population") + } + + // Verify exactly 1 set operation (deduplicated, not 2) + subLog := defaultCache.GetLog() + wantLog := []CacheLogEntry{ + {Operation: CacheOperationSet, Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-4"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLog), sortCacheLogEntries(subLog), "should have exactly 1 L2 set for Product (deduplicated, not 2)") + + // Verify cached Product value + entries, err := defaultCache.Get(ctx, []string{`{"__typename":"Product","key":{"upc":"top-4"}}`}) + require.NoError(t, err) + require.Equal(t, 1, len(entries)) + require.NotNil(t, entries[0]) + assert.Equal(t, fmt.Sprintf(`{"upc":"top-4","name":"Bowler","price":%d,"__typename":"Product"}`, warmupEmits+1), string(entries[0].Value)) + }) + + t.Run("entity invalidation happens once per trigger event with multiple subscriptions", func(t *testing.T) { + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + SubscriptionEntityPopulation: plan.SubscriptionEntityPopulationConfigurations{ + {TypeName: "Product", FieldName: "updateProductPrice", CacheName: "default", TTL: 30 * time.Second, EnableInvalidationOnKeyOnly: true}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + entityKey := `{"__typename":"Product","key":{"upc":"top-4"}}` + + // Pre-populate L2 + err := defaultCache.Set(ctx, []*resolve.CacheEntry{ + {Key: entityKey, Value: []byte(`{"upc":"top-4","name":"Bowler","price":64,"__typename":"Product"}`), TTL: 30 * time.Second}, + }) + require.NoError(t, err) + seedLog := defaultCache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-4"}}`, TTL: 30 * time.Second}}}, + }, seedLog) + + wsAddr := toWSAddr(setup.GatewayServer.URL) + vars := queryVariables{"upc": "top-4"} + + // Start 2 subscriptions to the same key-only query (same trigger) + messages1, close1 := gqlClient.Subscription(ctx, wsAddr, `subscription UpdatePriceKeyOnly($upc: String!) { + updateProductPrice(upc: $upc) { + upc + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, vars, t) + t.Cleanup(close1) + messages2, close2 := gqlClient.Subscription(ctx, wsAddr, `subscription UpdatePriceKeyOnly($upc: String!) { + updateProductPrice(upc: $upc) { + upc + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, vars, t) + t.Cleanup(close2) + + handle, err := setup.NextProductSubscription(ctx) + require.NoError(t, err) + + // Shared-trigger subscriptions are attached asynchronously after the upstream + // handle is created. Warm up until both clients have observed at least one event. + firstSeen := [2]bool{} + warmupCtx, warmupCancel := context.WithTimeout(ctx, 5*time.Second) + defer warmupCancel() + for !firstSeen[0] || !firstSeen[1] { + handle.Emit() + + settleTimer := time.NewTimer(200 * time.Millisecond) + collectWarmup: + for { + select { + case _, ok := <-messages1: + if !ok { + t.Fatalf("messages1 channel closed unexpectedly during warm-up") + } + firstSeen[0] = true + if !settleTimer.Stop() { + select { + case <-settleTimer.C: + default: + } + } + settleTimer.Reset(200 * time.Millisecond) + case _, ok := <-messages2: + if !ok { + t.Fatalf("messages2 channel closed unexpectedly during warm-up") + } + firstSeen[1] = true + if !settleTimer.Stop() { + select { + case <-settleTimer.C: + default: + } + } + settleTimer.Reset(200 * time.Millisecond) + case <-settleTimer.C: + break collectWarmup + case <-warmupCtx.Done(): + t.Fatalf("timeout waiting for first messages, received %d of 2", boolToInt(firstSeen[0])+boolToInt(firstSeen[1])) + } + } + } + + // Drain any extra warm-up messages from already-attached clients so the next + // emit is the only source of messages in the measured phase. + drainTimer := time.NewTimer(200 * time.Millisecond) + drainWarmup: + for { + select { + case _, ok := <-messages1: + if !ok { + t.Fatalf("messages1 channel closed unexpectedly during drain") + } + if !drainTimer.Stop() { + select { + case <-drainTimer.C: + default: + } + } + drainTimer.Reset(200 * time.Millisecond) + case _, ok := <-messages2: + if !ok { + t.Fatalf("messages2 channel closed unexpectedly during drain") + } + if !drainTimer.Stop() { + select { + case <-drainTimer.C: + default: + } + } + drainTimer.Reset(200 * time.Millisecond) + case <-drainTimer.C: + break drainWarmup + } + } + + // ClearLog and collect second event to measure deduplication + defaultCache.ClearLog() + deleteNotification := defaultCache.WaitForOperation(CacheOperationDelete, []string{entityKey}) + + handle.Emit() + + var msg1b, msg2b string + for msg1b == "" || msg2b == "" { + select { + case m, ok := <-messages1: + if !ok { + t.Fatalf("messages1 channel closed unexpectedly") + } + msg1b = string(m) + case m, ok := <-messages2: + if !ok { + t.Fatalf("messages2 channel closed unexpectedly") + } + msg2b = string(m) + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for second messages") + } + } + + assert.Equal(t, msg1b, msg2b, "both clients should receive the same event") + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","reviews":[{"body":"Perfect summer hat.","authorWithoutProvides":{"username":"User 5678"}},{"body":"A bit too fancy for my taste.","authorWithoutProvides":{"username":"User 8888"}}]}}}}`, msg1b) + + // Close subscriptions before cache log assertions + close1() + close2() + + select { + case entry, ok := <-deleteNotification: + require.True(t, ok, "delete notification channel should be closed after delivery") + assert.Equal(t, CacheLogEntry{ + Operation: CacheOperationDelete, + Items: []CacheLogItem{{Key: entityKey, TTL: 0}}, + }, entry) + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for Product cache invalidation") + } + + // Verify exactly 1 delete (deduplicated) + User entity resolution with L2 hits + wantLog := []CacheLogEntry{ + {Operation: CacheOperationDelete, Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-4"}}`}}}, + {Operation: CacheOperationGet, Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"5678"}}`, Hit: true}, + {Key: `{"__typename":"User","key":{"id":"8888"}}`, Hit: true}, + }}, + {Operation: CacheOperationGet, Items: []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"5678"}}`, Hit: true}, + {Key: `{"__typename":"User","key":{"id":"8888"}}`, Hit: true}, + }}, + } + subLog := defaultCache.GetLog() + assert.Equal(t, sortCacheLogEntries(wantLog), sortCacheLogEntries(subLog), "should have exactly 1 L2 delete for Product (deduplicated, not 2)") + + // Verify entity is gone from cache + entries, err := defaultCache.Get(ctx, []string{entityKey}) + require.NoError(t, err) + assert.Nil(t, entries[0], "Product should be deleted from L2 cache after invalidation") + }) + + t.Run("three clients - cache operations still happen once", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + subgraphCachingConfigs := engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + SubscriptionEntityPopulation: plan.SubscriptionEntityPopulationConfigurations{ + {TypeName: "Product", FieldName: "updateProductPrice", CacheName: "default", TTL: 30 * time.Second}, + }, + }, + } + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(caches), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(subgraphCachingConfigs), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + wsAddr := toWSAddr(setup.GatewayServer.URL) + vars := queryVariables{"upc": "top-4"} + + // Start 3 subscriptions to the same query/variables (same trigger) + messages1, close1 := gqlClient.Subscription(ctx, wsAddr, `subscription UpdatePrice($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + } +}`, vars, t) + t.Cleanup(close1) + messages2, close2 := gqlClient.Subscription(ctx, wsAddr, `subscription UpdatePrice($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + } +}`, vars, t) + t.Cleanup(close2) + messages3, close3 := gqlClient.Subscription(ctx, wsAddr, `subscription UpdatePrice($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + } +}`, vars, t) + t.Cleanup(close3) + + handle, err := setup.NextProductSubscription(ctx) + require.NoError(t, err) + + // Shared-trigger subscriptions are attached asynchronously after the upstream + // handle is created. On Windows, the third client can miss an immediate first + // emit, so warm up until all three clients have observed at least one event. + firstSeen := [3]bool{} + warmupEmits := 0 + warmupCtx, warmupCancel := context.WithTimeout(ctx, 5*time.Second) + defer warmupCancel() + for !firstSeen[0] || !firstSeen[1] || !firstSeen[2] { + handle.Emit() + warmupEmits++ + + settleTimer := time.NewTimer(200 * time.Millisecond) + collectWarmup: + for { + select { + case _, ok := <-messages1: + if !ok { + t.Fatalf("messages1 channel closed unexpectedly during warm-up") + } + firstSeen[0] = true + if !settleTimer.Stop() { + select { + case <-settleTimer.C: + default: + } + } + settleTimer.Reset(200 * time.Millisecond) + case _, ok := <-messages2: + if !ok { + t.Fatalf("messages2 channel closed unexpectedly during warm-up") + } + firstSeen[1] = true + if !settleTimer.Stop() { + select { + case <-settleTimer.C: + default: + } + } + settleTimer.Reset(200 * time.Millisecond) + case _, ok := <-messages3: + if !ok { + t.Fatalf("messages3 channel closed unexpectedly during warm-up") + } + firstSeen[2] = true + if !settleTimer.Stop() { + select { + case <-settleTimer.C: + default: + } + } + settleTimer.Reset(200 * time.Millisecond) + case <-settleTimer.C: + break collectWarmup + case <-warmupCtx.Done(): + t.Fatalf("timeout waiting for first messages, received %d of 3", boolToInt(firstSeen[0])+boolToInt(firstSeen[1])+boolToInt(firstSeen[2])) + } + } + } + + // Drain any extra warm-up messages from already-attached clients so the next + // emit is the only source of messages in the measured phase. + drainTimer := time.NewTimer(200 * time.Millisecond) + drainWarmup: + for { + select { + case _, ok := <-messages1: + if !ok { + t.Fatalf("messages1 channel closed unexpectedly during drain") + } + if !drainTimer.Stop() { + select { + case <-drainTimer.C: + default: + } + } + drainTimer.Reset(200 * time.Millisecond) + case _, ok := <-messages2: + if !ok { + t.Fatalf("messages2 channel closed unexpectedly during drain") + } + if !drainTimer.Stop() { + select { + case <-drainTimer.C: + default: + } + } + drainTimer.Reset(200 * time.Millisecond) + case _, ok := <-messages3: + if !ok { + t.Fatalf("messages3 channel closed unexpectedly during drain") + } + if !drainTimer.Stop() { + select { + case <-drainTimer.C: + default: + } + } + drainTimer.Reset(200 * time.Millisecond) + case <-drainTimer.C: + break drainWarmup + } + } + + // ClearLog and collect second event to measure deduplication + defaultCache.ClearLog() + setNotification := defaultCache.WaitForOperation(CacheOperationSet, []string{`{"__typename":"Product","key":{"upc":"top-4"}}`}) + + handle.Emit() + + received := 0 + for received < 3 { + select { + case _, ok := <-messages1: + if !ok { + t.Fatalf("messages1 channel closed unexpectedly") + } + received++ + case _, ok := <-messages2: + if !ok { + t.Fatalf("messages2 channel closed unexpectedly") + } + received++ + case _, ok := <-messages3: + if !ok { + t.Fatalf("messages3 channel closed unexpectedly") + } + received++ + case <-time.After(5 * time.Second): + t.Fatalf("timeout waiting for second messages, received %d of 3", received) + } + } + + // Close subscriptions before cache log assertions + close1() + close2() + close3() + + select { + case entry, ok := <-setNotification: + require.True(t, ok, "set notification channel should be closed after delivery") + assert.Equal(t, CacheLogEntry{ + Operation: CacheOperationSet, + Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-4"}}`, TTL: 30 * time.Second}}, + }, entry) + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for Product cache population") + } + + // Verify exactly 1 set operation (deduplicated, not 3) + subLog := defaultCache.GetLog() + wantLog := []CacheLogEntry{ + {Operation: CacheOperationSet, Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-4"}}`, TTL: 30 * time.Second}}}, + } + assert.Equal(t, sortCacheLogEntries(wantLog), sortCacheLogEntries(subLog), "should have exactly 1 L2 set for Product (deduplicated, not 3)") + + // Verify cached Product value + entries, err := defaultCache.Get(ctx, []string{`{"__typename":"Product","key":{"upc":"top-4"}}`}) + require.NoError(t, err) + require.Equal(t, 1, len(entries)) + require.NotNil(t, entries[0]) + assert.Equal(t, fmt.Sprintf(`{"upc":"top-4","name":"Bowler","price":%d,"__typename":"Product"}`, warmupEmits+1), string(entries[0].Value)) + }) + + // ===================================================================== + // Category 5: Tier 1 field-name disambiguation + // ===================================================================== + + t.Run("subscription field-name disambiguation - updateProductPrice uses 30s TTL", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + SubscriptionEntityPopulation: plan.SubscriptionEntityPopulationConfigurations{ + // Two configs for the same entity type, disambiguated by FieldName (Tier 1) + {TypeName: "Product", FieldName: "updateProductPrice", CacheName: "default", TTL: 30 * time.Second}, + {TypeName: "Product", FieldName: "updatedPrice", CacheName: "default", TTL: 60 * time.Second}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + defaultCache.ClearLog() + + messages := collectSubscriptionMessages(ctx, gqlClient, setup, toWSAddr(setup.GatewayServer.URL), + `subscription UpdatePrice($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + } +}`, + queryVariables{"upc": "top-4"}, 1, t) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updateProductPrice":{"upc":"top-4","name":"Bowler","price":1}}}}`, messages[0]) + + log := defaultCache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: CacheOperationSet, Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-4"}}`, TTL: 30 * time.Second}}}, // Tier 1 match: updateProductPrice config selected (30s), not updatedPrice (60s) + }, log) + }) + + t.Run("subscription field-name disambiguation - updatedPrice uses 60s TTL", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + + setup := federationtesting.NewManualFederationSetup(addCachingGateway( + withCachingEnableART(false), + withCachingLoaderCache(map[string]resolve.LoaderCache{"default": defaultCache}), + withCachingOptionsFunc(resolve.CachingOptions{EnableL2Cache: true}), + withSubgraphEntityCachingConfigs(engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + SubscriptionEntityPopulation: plan.SubscriptionEntityPopulationConfigurations{ + // Same two configs — this time exercising the updatedPrice field + {TypeName: "Product", FieldName: "updateProductPrice", CacheName: "default", TTL: 30 * time.Second}, + {TypeName: "Product", FieldName: "updatedPrice", CacheName: "default", TTL: 60 * time.Second}, + }, + }, + }), + )) + t.Cleanup(setup.Close) + + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + defaultCache.ClearLog() + + messages := collectSubscriptionMessages(ctx, gqlClient, setup, toWSAddr(setup.GatewayServer.URL), + `subscription UpdatedPrice { + updatedPrice { + upc + name + price + } +}`, + nil, 1, t) + assert.Equal(t, `{"id":"1","type":"data","payload":{"data":{"updatedPrice":{"upc":"top-3","name":"Boater","price":10}}}}`, messages[0]) + + log := defaultCache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: CacheOperationSet, Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-3"}}`, TTL: 60 * time.Second}}}, // Tier 1 match: updatedPrice config selected (60s), not updateProductPrice (30s) + }, log) + }) +} diff --git a/execution/engine/graphql_client_test.go b/execution/engine/graphql_client_test.go index 23ed0c6e37..0413ef3572 100644 --- a/execution/engine/graphql_client_test.go +++ b/execution/engine/graphql_client_test.go @@ -8,6 +8,9 @@ import ( "net" "net/http" "os" + "strings" + "sync" + "sync/atomic" "testing" "github.com/gobwas/ws" @@ -19,7 +22,7 @@ import ( "github.com/wundergraph/graphql-go-tools/execution/subscription" ) -type queryVariables map[string]interface{} +type queryVariables map[string]any func requestBody(t *testing.T, query string, variables queryVariables) []byte { var variableJsonBytes []byte @@ -58,8 +61,9 @@ type GraphqlClient struct { httpClient *http.Client } -func (g *GraphqlClient) Query(ctx context.Context, addr, queryFilePath string, variables queryVariables, t *testing.T) []byte { - reqBody := loadQuery(t, queryFilePath, variables) +// executeQuery performs the shared POST/read/assert flow used by Query, +// QueryWithHeaders, QueryString, and QueryStringWithHeaders. +func (g *GraphqlClient) executeQuery(ctx context.Context, addr string, reqBody []byte, t *testing.T) ([]byte, http.Header) { req, err := http.NewRequest(http.MethodPost, addr, bytes.NewBuffer(reqBody)) require.NoError(t, err) req = req.WithContext(ctx) @@ -69,9 +73,29 @@ func (g *GraphqlClient) Query(ctx context.Context, addr, queryFilePath string, v responseBodyBytes, err := io.ReadAll(resp.Body) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.StatusCode) - assert.Contains(t, resp.Header.Get("Content-Type"), "application/json") + assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) + return responseBodyBytes, resp.Header +} - return responseBodyBytes +func (g *GraphqlClient) Query(ctx context.Context, addr, queryFilePath string, variables queryVariables, t *testing.T) []byte { + body, _ := g.executeQuery(ctx, addr, loadQuery(t, queryFilePath, variables), t) + return body +} + +// QueryWithHeaders returns both the response body and headers for a file-based query. +func (g *GraphqlClient) QueryWithHeaders(ctx context.Context, addr, queryFilePath string, variables queryVariables, t *testing.T) ([]byte, http.Header) { + return g.executeQuery(ctx, addr, loadQuery(t, queryFilePath, variables), t) +} + +func (g *GraphqlClient) QueryString(ctx context.Context, addr, query string, variables queryVariables, t *testing.T) []byte { + body, _ := g.executeQuery(ctx, addr, requestBody(t, query, variables), t) + return body +} + +// QueryStringWithHeaders returns both the response body and headers. +// Useful for testing cache stats exposed via headers. +func (g *GraphqlClient) QueryStringWithHeaders(ctx context.Context, addr, query string, variables queryVariables, t *testing.T) ([]byte, http.Header) { + return g.executeQuery(ctx, addr, requestBody(t, query, variables), t) } func (g *GraphqlClient) QueryStatusCode(ctx context.Context, addr, queryFilePath string, variables queryVariables, expectedStatusCode int, t *testing.T) []byte { @@ -87,7 +111,7 @@ func (g *GraphqlClient) QueryStatusCode(ctx context.Context, addr, queryFilePath return responseBodyBytes } -func (g *GraphqlClient) Subscription(ctx context.Context, addr, queryFilePath string, variables queryVariables, t *testing.T) chan []byte { +func (g *GraphqlClient) Subscription(ctx context.Context, addr, queryOrFilePath string, variables queryVariables, t *testing.T) (chan []byte, func()) { messageCh := make(chan []byte) conn, _, _, err := ws.Dial(ctx, addr) @@ -105,31 +129,65 @@ func (g *GraphqlClient) Subscription(ctx context.Context, addr, queryFilePath st serverMessage := g.readMessageFromServer(t, conn) assert.Equal(t, `{"id":"","type":"connection_ack","payload":null}`, string(serverMessage)) // 3. send `start` message with subscription operation + trimmedQuery := strings.TrimSpace(queryOrFilePath) + var payload []byte + if strings.HasPrefix(trimmedQuery, "subscription") || + strings.HasPrefix(trimmedQuery, "query") || + strings.HasPrefix(trimmedQuery, "mutation") || + strings.HasPrefix(trimmedQuery, "{") { + payload = requestBody(t, queryOrFilePath, variables) + } else { + payload = loadQuery(t, queryOrFilePath, variables) + } //nolint:staticcheck startSubscriptionMessage := subscription.Message{ Id: "1", Type: subscription.MessageTypeStart, - Payload: loadQuery(t, queryFilePath, variables), + Payload: payload, } err = g.sendMessageToServer(conn, startSubscriptionMessage) require.NoError(t, err) + var closed atomic.Bool + var closeOnce sync.Once + done := make(chan struct{}) + + // closeFn signals the reader goroutine to exit. `done` unblocks a pending + // send on messageCh that conn.Close() cannot reach; `closed` tells the + // read loop the resulting read error is expected. + closeFn := func() { + closeOnce.Do(func() { + closed.Store(true) + close(done) + _ = conn.Close() + }) + } + // 4. start receiving messages from subscription go func() { - defer conn.Close() defer close(messageCh) for { msgBytes, _, err := wsutil.ReadServerData(conn) - require.NoError(t, err) - - messageCh <- msgBytes + if err != nil { + if !closed.Load() { + t.Errorf("unexpected subscription read error: %v", err) + } + return + } + select { + case messageCh <- msgBytes: + case <-done: + return + case <-ctx.Done(): + return + } } }() - return messageCh + return messageCh, closeFn } //nolint:staticcheck diff --git a/execution/engine/json_assert_test.go b/execution/engine/json_assert_test.go new file mode 100644 index 0000000000..1a676be7b3 --- /dev/null +++ b/execution/engine/json_assert_test.go @@ -0,0 +1,20 @@ +package engine_test + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" +) + +func compactJSONForAssert(t testing.TB, input string) string { + t.Helper() + + var value any + err := json.Unmarshal([]byte(input), &value) + require.NoError(t, err) + + normalized, err := json.Marshal(value) + require.NoError(t, err) + return string(normalized) +} diff --git a/execution/engine/local_type_field_extractor_test.go b/execution/engine/local_type_field_extractor_test.go index 4d46d23ba7..00eea63a5b 100644 --- a/execution/engine/local_type_field_extractor_test.go +++ b/execution/engine/local_type_field_extractor_test.go @@ -21,6 +21,7 @@ func sortNodesAndFields(nodes []plan.TypeField) { } func TestLocalTypeFieldExtractor_GetAllNodes(t *testing.T) { + t.Parallel() run := func(t *testing.T, SDL string, expectedRoot, expectedChild []plan.TypeField) { t.Helper() @@ -38,6 +39,7 @@ func TestLocalTypeFieldExtractor_GetAllNodes(t *testing.T) { } t.Run("only root operation", func(t *testing.T) { + t.Parallel() run(t, ` extend type Query { me: User @@ -66,6 +68,7 @@ func TestLocalTypeFieldExtractor_GetAllNodes(t *testing.T) { }) }) t.Run("orphan pair", func(t *testing.T) { + t.Parallel() run(t, ` extend type Query { me: User @@ -94,6 +97,7 @@ func TestLocalTypeFieldExtractor_GetAllNodes(t *testing.T) { }) }) t.Run("orphan cycle", func(t *testing.T) { + t.Parallel() run(t, ` extend type Query { me: User @@ -123,6 +127,7 @@ func TestLocalTypeFieldExtractor_GetAllNodes(t *testing.T) { }) }) t.Run("nested child nodes", func(t *testing.T) { + t.Parallel() run(t, ` extend type Query { me: User @@ -151,6 +156,7 @@ func TestLocalTypeFieldExtractor_GetAllNodes(t *testing.T) { }) }) t.Run("child node only available via nested child", func(t *testing.T) { + t.Parallel() run(t, ` extend type Query { me: User @@ -179,6 +185,7 @@ func TestLocalTypeFieldExtractor_GetAllNodes(t *testing.T) { }) }) t.Run("interface", func(t *testing.T) { + t.Parallel() run(t, ` extend type Query { me: User @@ -221,6 +228,7 @@ func TestLocalTypeFieldExtractor_GetAllNodes(t *testing.T) { }) }) t.Run("interface with key directive", func(t *testing.T) { + t.Parallel() run(t, ` extend type Query { me: User @@ -266,6 +274,7 @@ func TestLocalTypeFieldExtractor_GetAllNodes(t *testing.T) { }) }) t.Run("extended interface", func(t *testing.T) { + t.Parallel() t.Log("Bug: The concrete types that implement an interface should also be included") run(t, ` @@ -310,6 +319,7 @@ func TestLocalTypeFieldExtractor_GetAllNodes(t *testing.T) { }) }) t.Run("union", func(t *testing.T) { + t.Parallel() run(t, ` extend type Query { me: User @@ -347,6 +357,7 @@ func TestLocalTypeFieldExtractor_GetAllNodes(t *testing.T) { }) }) t.Run("union + interface", func(t *testing.T) { + t.Parallel() run(t, ` type Query { histories: [History] @@ -381,6 +392,7 @@ func TestLocalTypeFieldExtractor_GetAllNodes(t *testing.T) { }) }) t.Run("extended union", func(t *testing.T) { + t.Parallel() run(t, ` extend type Query { me: User @@ -418,6 +430,7 @@ func TestLocalTypeFieldExtractor_GetAllNodes(t *testing.T) { }) }) t.Run("local union extension", func(t *testing.T) { + t.Parallel() run(t, ` extend type Query { me: User @@ -463,6 +476,7 @@ func TestLocalTypeFieldExtractor_GetAllNodes(t *testing.T) { }) }) t.Run("nested Entity definition", func(t *testing.T) { + t.Parallel() run(t, ` extend type Query { me: User @@ -488,6 +502,7 @@ func TestLocalTypeFieldExtractor_GetAllNodes(t *testing.T) { }) }) t.Run("local type extension", func(t *testing.T) { + t.Parallel() run(t, ` extend type Query { reviews(IDs: [ID!]!): [Review!] @@ -530,6 +545,7 @@ func TestLocalTypeFieldExtractor_GetAllNodes(t *testing.T) { }) }) t.Run("local type extension defined before local type", func(t *testing.T) { + t.Parallel() run(t, ` extend type Query { reviews(IDs: [ID!]!): [Review!] @@ -572,6 +588,7 @@ func TestLocalTypeFieldExtractor_GetAllNodes(t *testing.T) { }) }) t.Run("union types", func(t *testing.T) { + t.Parallel() run(t, ` extend type Query { search(name: String!): SearchResult @@ -612,6 +629,7 @@ func TestLocalTypeFieldExtractor_GetAllNodes(t *testing.T) { }) }) t.Run("interface types", func(t *testing.T) { + t.Parallel() run(t, ` extend type Query { search(name: String!): Character @@ -652,7 +670,7 @@ func BenchmarkGetAllNodes(b *testing.B) { b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { extractor := NewLocalTypeFieldExtractor(&document) extractor.GetAllNodes() } diff --git a/execution/engine/lookup_test.go b/execution/engine/lookup_test.go index 345690e29a..863c3294e2 100644 --- a/execution/engine/lookup_test.go +++ b/execution/engine/lookup_test.go @@ -9,17 +9,21 @@ import ( ) func TestCreateTypeFieldLookupKey(t *testing.T) { + t.Parallel() lookupKey := CreateTypeFieldLookupKey("Query", "hello") assert.Equal(t, TypeFieldLookupKey("Query.hello"), lookupKey) } func TestCreateTypeFieldArgumentsLookupMap(t *testing.T) { + t.Parallel() t.Run("should return nil if slice is empty", func(t *testing.T) { + t.Parallel() lookupMap := CreateTypeFieldArgumentsLookupMap([]graphql.TypeFieldArguments{}) assert.Nil(t, lookupMap) }) t.Run("should return a lookup map", func(t *testing.T) { + t.Parallel() typeFieldArgs := []graphql.TypeFieldArguments{ { TypeName: "Query", diff --git a/execution/engine/partial_cache_test.go b/execution/engine/partial_cache_test.go new file mode 100644 index 0000000000..0f830d5216 --- /dev/null +++ b/execution/engine/partial_cache_test.go @@ -0,0 +1,360 @@ +package engine_test + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "sync" + "testing" + "time" + + "github.com/jensneuse/abstractlogger" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/execution/engine" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting" + "github.com/wundergraph/graphql-go-tools/execution/federationtesting/gateway" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// subgraphRequestTracker tracks requests to subgraphs and captures their bodies +type subgraphRequestTracker struct { + mu sync.RWMutex + requests map[string][]string // host -> list of request bodies + original http.RoundTripper +} + +func newSubgraphRequestTracker(original http.RoundTripper) *subgraphRequestTracker { + return &subgraphRequestTracker{ + requests: make(map[string][]string), + original: original, + } +} + +func (t *subgraphRequestTracker) RoundTrip(req *http.Request) (*http.Response, error) { + // Capture request body + var bodyBytes []byte + if req.Body != nil { + var err error + bodyBytes, err = io.ReadAll(req.Body) + _ = req.Body.Close() + if err != nil { + return nil, fmt.Errorf("reading request body: %w", err) + } + req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) + } + + t.mu.Lock() + host := req.URL.Host + t.requests[host] = append(t.requests[host], string(bodyBytes)) + t.mu.Unlock() + + return t.original.RoundTrip(req) +} + +func (t *subgraphRequestTracker) GetRequests(host string) []string { + t.mu.RLock() + defer t.mu.RUnlock() + result := make([]string, len(t.requests[host])) + copy(result, t.requests[host]) + return result +} + +func (t *subgraphRequestTracker) GetRequestCount(host string) int { + t.mu.RLock() + defer t.mu.RUnlock() + return len(t.requests[host]) +} + +func (t *subgraphRequestTracker) Reset() { + t.mu.Lock() + defer t.mu.Unlock() + t.requests = make(map[string][]string) +} + +// TestPartialCacheLoading tests the EnablePartialCacheLoad feature for entity caching. +// When enabled, only cache-missed entities are fetched from subgraphs. +// When disabled (default), all entities are fetched if any are missing. +// TestFederationCaching_PartialLoading verifies partial cache loading end-to-end: when some +// entities in a batch are cached, only the uncached ones are fetched from the subgraph. +func TestFederationCaching_PartialLoading(t *testing.T) { + t.Parallel() + t.Run("L2 partial cache loading enabled - only missing entities fetched", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with request body tracking + tracker := newSubgraphRequestTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + setup := federationtesting.NewFederationSetup(func(setup *federationtesting.FederationSetup) *httptest.Server { + poller := gateway.NewDatasource([]gateway.ServiceConfig{ + {Name: "accounts", URL: setup.AccountsUpstreamServer.URL}, + {Name: "products", URL: setup.ProductsUpstreamServer.URL, WS: strings.ReplaceAll(setup.ProductsUpstreamServer.URL, "http:", "ws:")}, + {Name: "reviews", URL: setup.ReviewsUpstreamServer.URL}, + }, trackingClient) + gtw := gateway.HandlerWithCaching(abstractlogger.NoopLogger, poller, trackingClient, false, caches, nil, resolve.CachingOptions{EnableL2Cache: true}, engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + // KEY: EnablePartialCacheLoad is TRUE + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false, EnablePartialCacheLoad: true}, + }, + }, + }, false) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + poller.Run(ctx) + return httptest.NewServer(gtw) + }) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames for tracking + accountsURLParsed, _ := url.Parse(setup.AccountsUpstreamServer.URL) + accountsHost := accountsURLParsed.Host + + // Pre-populate cache with User entity for id "1234" + // The query will need this user (same user for both reviews via authorWithoutProvides) + userData := `{"__typename":"User","id":"1234","username":"Me"}` + err := defaultCache.Set(context.Background(), []*resolve.CacheEntry{ + {Key: `{"__typename":"User","key":{"id":"1234"}}`, Value: []byte(userData), TTL: 30 * time.Second}, + }) + require.NoError(t, err) + seedLog := defaultCache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"1234"}}`, TTL: 30 * time.Second}}}, + }, seedLog) + + // First query - User is already cached, so accounts subgraph should NOT be called + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query MultipleServersWithoutProvides { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, nil, t) + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + // Verify accounts subgraph was NOT called (all Users were cached) + accountsRequests := tracker.GetRequests(accountsHost) + assert.Equal(t, 0, len(accountsRequests), "accounts subgraph should not be called when all User entities are cached") + }) + + t.Run("L2 partial cache loading enabled - partial cache hit fetches only missing", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with request body tracking + tracker := newSubgraphRequestTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + setup := federationtesting.NewFederationSetup(func(setup *federationtesting.FederationSetup) *httptest.Server { + poller := gateway.NewDatasource([]gateway.ServiceConfig{ + {Name: "accounts", URL: setup.AccountsUpstreamServer.URL}, + {Name: "products", URL: setup.ProductsUpstreamServer.URL, WS: strings.ReplaceAll(setup.ProductsUpstreamServer.URL, "http:", "ws:")}, + {Name: "reviews", URL: setup.ReviewsUpstreamServer.URL}, + }, trackingClient) + gtw := gateway.HandlerWithCaching(abstractlogger.NoopLogger, poller, trackingClient, false, caches, nil, resolve.CachingOptions{EnableL2Cache: true}, engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + // KEY: EnablePartialCacheLoad is TRUE + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false, EnablePartialCacheLoad: true}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + }, false) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + poller.Run(ctx) + return httptest.NewServer(gtw) + }) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames for tracking + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + reviewsHost := reviewsURLParsed.Host + + // Pre-populate cache with ONLY ONE of the two Product entities (top-1) + // top-2 is NOT cached + // IMPORTANT: Must use 'authorWithoutProvides' as that's what the query fetches (not 'author' which has @provides) + product1Data := `{"__typename":"Product","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}` + err := defaultCache.Set(context.Background(), []*resolve.CacheEntry{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Value: []byte(product1Data), TTL: 30 * time.Second}, + }) + require.NoError(t, err) + seedLog := defaultCache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}}}, + }, seedLog) + + // Query - should only fetch top-2 from reviews subgraph (top-1 is cached) + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query MultipleServersWithoutProvides { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, nil, t) + + // Response should still be complete + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + // Verify reviews subgraph was called with ONLY the missing entity (top-2) + reviewsRequests := tracker.GetRequests(reviewsHost) + require.Equal(t, 1, len(reviewsRequests), "reviews subgraph should be called exactly once") + + // The request should only contain top-2, NOT top-1 (partial cache load = only fetch missing) + // Using exact assertion to verify the request body structure + assert.Equal(t, `{"query":"query($representations: [_Any!]!){_entities(representations: $representations){... on Product {__typename reviews {body authorWithoutProvides {__typename id}}}}}","variables":{"representations":[{"__typename":"Product","upc":"top-2"}]}}`, reviewsRequests[0], "reviews request should fetch ONLY top-2 (top-1 is cached)") + }) + + t.Run("L2 partial cache loading disabled - all entities fetched even with partial cache hit", func(t *testing.T) { + t.Parallel() + defaultCache := NewFakeLoaderCache() + caches := map[string]resolve.LoaderCache{ + "default": defaultCache, + } + + // Create HTTP client with request body tracking + tracker := newSubgraphRequestTracker(http.DefaultTransport) + trackingClient := &http.Client{ + Transport: tracker, + } + + setup := federationtesting.NewFederationSetup(func(setup *federationtesting.FederationSetup) *httptest.Server { + poller := gateway.NewDatasource([]gateway.ServiceConfig{ + {Name: "accounts", URL: setup.AccountsUpstreamServer.URL}, + {Name: "products", URL: setup.ProductsUpstreamServer.URL, WS: strings.ReplaceAll(setup.ProductsUpstreamServer.URL, "http:", "ws:")}, + {Name: "reviews", URL: setup.ReviewsUpstreamServer.URL}, + }, trackingClient) + gtw := gateway.HandlerWithCaching(abstractlogger.NoopLogger, poller, trackingClient, false, caches, nil, resolve.CachingOptions{EnableL2Cache: true}, engine.SubgraphCachingConfigs{ + { + SubgraphName: "products", + RootFieldCaching: plan.RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "topProducts", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + { + SubgraphName: "reviews", + EntityCaching: plan.EntityCacheConfigurations{ + // KEY: EnablePartialCacheLoad is FALSE (default) + {TypeName: "Product", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false, EnablePartialCacheLoad: false}, + }, + }, + { + SubgraphName: "accounts", + EntityCaching: plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second, IncludeSubgraphHeaderPrefix: false}, + }, + }, + }, false) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + poller.Run(ctx) + return httptest.NewServer(gtw) + }) + t.Cleanup(setup.Close) + gqlClient := NewGraphqlClient(http.DefaultClient) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Extract hostnames for tracking + reviewsURLParsed, _ := url.Parse(setup.ReviewsUpstreamServer.URL) + reviewsHost := reviewsURLParsed.Host + + // Pre-populate cache with ONLY ONE of the two Product entities (top-1) + // top-2 is NOT cached + // IMPORTANT: Must use 'authorWithoutProvides' as that's what the query fetches (not 'author' which has @provides) + product1Data := `{"__typename":"Product","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"__typename":"User","id":"1234"}}]}` + err := defaultCache.Set(context.Background(), []*resolve.CacheEntry{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Value: []byte(product1Data), TTL: 30 * time.Second}, + }) + require.NoError(t, err) + seedLog := defaultCache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}}}, + }, seedLog) + + // Query - with partial loading DISABLED, should fetch ALL entities (top-1 AND top-2) + tracker.Reset() + resp := gqlClient.QueryString(ctx, setup.GatewayServer.URL, `query MultipleServersWithoutProvides { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + } + } + } +}`, nil, t) + + // Response should still be complete + assert.Equal(t, `{"data":{"topProducts":[{"name":"Trilby","reviews":[{"body":"A highly effective form of birth control.","authorWithoutProvides":{"username":"Me"}}]},{"name":"Fedora","reviews":[{"body":"Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.","authorWithoutProvides":{"username":"Me"}}]}]}}`, string(resp)) + + // Verify reviews subgraph was called with BOTH entities (all-or-nothing behavior) + reviewsRequests := tracker.GetRequests(reviewsHost) + require.Equal(t, 1, len(reviewsRequests), "reviews subgraph should be called exactly once") + + // The request should contain BOTH top-1 AND top-2 (all-or-nothing mode, partial cache disabled) + // Using exact assertion to verify the request body structure + assert.Equal(t, `{"query":"query($representations: [_Any!]!){_entities(representations: $representations){... on Product {__typename reviews {body authorWithoutProvides {__typename id}}}}}","variables":{"representations":[{"__typename":"Product","upc":"top-1"},{"__typename":"Product","upc":"top-2"}]}}`, reviewsRequests[0], "reviews request should fetch BOTH entities (partial cache disabled)") + }) +} diff --git a/execution/engine/request_scoped_widening_e2e_test.go b/execution/engine/request_scoped_widening_e2e_test.go new file mode 100644 index 0000000000..12c777a335 --- /dev/null +++ b/execution/engine/request_scoped_widening_e2e_test.go @@ -0,0 +1,660 @@ +package engine + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "sync" + "testing" + + "github.com/jensneuse/abstractlogger" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/execution/graphql" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/graphql_datasource" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +type requestScopedE2EServer struct { + server *httptest.Server + + mu sync.Mutex + requests []requestScopedE2ERequest + unexpectedRequests []requestScopedE2ERequest +} + +type requestScopedE2ERequest struct { + Query string + Variables string +} + +func newRequestScopedE2EServer(t *testing.T, responder func(request requestScopedE2ERequest) (response string, ok bool)) *requestScopedE2EServer { + t.Helper() + + s := &requestScopedE2EServer{} + s.server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Helper() + + body, err := io.ReadAll(r.Body) + if !assert.NoError(t, err) { + http.Error(w, `{"errors":[{"message":"invalid request body"}]}`, http.StatusBadRequest) + return + } + + var payload struct { + Query string `json:"query"` + Variables json.RawMessage `json:"variables"` + } + if !assert.NoError(t, json.Unmarshal(body, &payload)) { + http.Error(w, `{"errors":[{"message":"invalid graphql payload"}]}`, http.StatusBadRequest) + return + } + + request := requestScopedE2ERequest{ + Query: payload.Query, + Variables: normalizeRequestScopedVariables(t, payload.Variables), + } + + s.mu.Lock() + s.requests = append(s.requests, request) + s.mu.Unlock() + + response, ok := responder(request) + if !ok { + s.mu.Lock() + s.unexpectedRequests = append(s.unexpectedRequests, request) + s.mu.Unlock() + response = `{"errors":[{"message":"unexpected upstream query"}]}` + } + + w.Header().Set("Content-Type", "application/json") + _, err = w.Write([]byte(response)) + assert.NoError(t, err) + })) + + t.Cleanup(s.server.Close) + return s +} + +// normalizeRequestScopedVariables runs on the httptest handler goroutine, so it +// must not use require/FailNow-family assertions. It inlines the compact-JSON +// logic with non-fatal assert.NoError; on marshal failure it falls through with +// the raw bytes so any test assertion can still diff against a recognizable +// value. +func normalizeRequestScopedVariables(t *testing.T, raw json.RawMessage) string { + t.Helper() + + if len(raw) == 0 || string(raw) == "null" { + return "" + } + + var value any + if !assert.NoError(t, json.Unmarshal(raw, &value)) { + return string(raw) + } + normalized, err := json.Marshal(value) + if !assert.NoError(t, err) { + return string(raw) + } + return string(normalized) +} + +func (s *requestScopedE2EServer) URL() string { + return s.server.URL +} + +func (s *requestScopedE2EServer) Requests() []requestScopedE2ERequest { + s.mu.Lock() + defer s.mu.Unlock() + + out := make([]requestScopedE2ERequest, len(s.requests)) + copy(out, s.requests) + return out +} + +func (s *requestScopedE2EServer) AssertExactRequests(t *testing.T, expected ...requestScopedE2ERequest) { + t.Helper() + + s.mu.Lock() + defer s.mu.Unlock() + + assert.Equal(t, expected, s.requests) + assert.Equal(t, []requestScopedE2ERequest(nil), s.unexpectedRequests) +} + +type requestScopedE2EDataSourceSpec struct { + name string + url string + sdl string + + rootNodes []plan.TypeField + childNodes []plan.TypeField + federationMetaData plan.FederationMetaData +} + +func newRequestScopedExecutionEngine( + t *testing.T, + specs ...requestScopedE2EDataSourceSpec, +) *ExecutionEngine { + t.Helper() + + ctx := context.Background() + + subgraphs := make([]SubgraphConfiguration, 0, len(specs)) + for _, spec := range specs { + subgraphs = append(subgraphs, SubgraphConfiguration{ + Name: spec.name, + URL: spec.url, + SDL: spec.sdl, + }) + } + + factory := NewFederationEngineConfigFactory(ctx, subgraphs) + engineConfig, err := factory.BuildEngineConfiguration() + require.NoError(t, err) + + httpClient := http.DefaultClient + subscriptionClient := graphql_datasource.NewGraphQLSubscriptionClient(httpClient, httpClient, ctx) + graphQLFactory, err := graphql_datasource.NewFactory(ctx, httpClient, subscriptionClient) + require.NoError(t, err) + + dataSources := make([]plan.DataSource, 0, len(specs)) + for _, spec := range specs { + schemaConfig, err := graphql_datasource.NewSchemaConfiguration(spec.sdl, &graphql_datasource.FederationConfiguration{ + Enabled: true, + ServiceSDL: spec.sdl, + }) + require.NoError(t, err) + + customConfig, err := graphql_datasource.NewConfiguration(graphql_datasource.ConfigurationInput{ + Fetch: &graphql_datasource.FetchConfiguration{ + URL: spec.url, + Method: http.MethodPost, + }, + SchemaConfiguration: schemaConfig, + }) + require.NoError(t, err) + + dataSource, err := plan.NewDataSourceConfiguration[graphql_datasource.Configuration]( + spec.name, + graphQLFactory, + &plan.DataSourceMetadata{ + RootNodes: spec.rootNodes, + ChildNodes: spec.childNodes, + FederationMetaData: spec.federationMetaData, + }, + customConfig, + ) + require.NoError(t, err) + + dataSources = append(dataSources, dataSource) + } + + engineConfig.SetDataSources(dataSources) + + executionEngine, err := NewExecutionEngine(ctx, abstractlogger.NoopLogger, engineConfig, resolve.ResolverOptions{ + MaxConcurrency: 1024, + }) + require.NoError(t, err) + + return executionEngine +} + +func executeRequestScopedQuery(t *testing.T, executionEngine *ExecutionEngine, query string) string { + t.Helper() + + request := &graphql.Request{Query: query} + writer := graphql.NewEngineResultWriter() + + err := executionEngine.Execute( + context.Background(), + request, + &writer, + WithCachingOptions(resolve.CachingOptions{EnableL1Cache: true}), + ) + require.NoError(t, err) + + return writer.String() +} + +func viewerRequestScopedSpec( + viewerURL, viewerSDL string, + viewerFields []string, + childNodes []plan.TypeField, + requires plan.FederationFieldConfigurations, +) requestScopedE2EDataSourceSpec { + return requestScopedE2EDataSourceSpec{ + name: "viewer", + url: viewerURL, + sdl: viewerSDL, + rootNodes: []plan.TypeField{ + {TypeName: "Query", FieldNames: []string{"currentViewer"}}, + {TypeName: "Article", FieldNames: []string{"id", "currentViewer"}}, + {TypeName: "Viewer", FieldNames: viewerFields}, + }, + childNodes: childNodes, + federationMetaData: plan.FederationMetaData{ + Keys: plan.FederationFieldConfigurations{ + {TypeName: "Article", SelectionSet: "id"}, + }, + Requires: requires, + RequestScopedFields: []plan.RequestScopedField{ + {TypeName: "Query", FieldName: "currentViewer", L1Key: "viewer.currentViewer"}, + {TypeName: "Article", FieldName: "currentViewer", L1Key: "viewer.currentViewer"}, + }, + }, + } +} + +func viewerRequestScopedRequiresBaseSpec(viewerURL string) requestScopedE2EDataSourceSpec { + return requestScopedE2EDataSourceSpec{ + name: "viewer", + url: viewerURL, + sdl: `directive @requestScoped(key: String!) on FIELD_DEFINITION +type Query { currentViewer: Viewer @requestScoped(key: "viewer") } +type Viewer @key(fields: "id") { id: ID! name: String! } +type Article @key(fields: "id") { id: ID! currentViewer: Viewer @requestScoped(key: "viewer") }`, + rootNodes: []plan.TypeField{ + {TypeName: "Query", FieldNames: []string{"currentViewer"}}, + {TypeName: "Article", FieldNames: []string{"id", "currentViewer"}}, + {TypeName: "Viewer", FieldNames: []string{"id", "name"}}, + }, + childNodes: []plan.TypeField{ + {TypeName: "Viewer", FieldNames: []string{"id", "name"}}, + }, + federationMetaData: plan.FederationMetaData{ + Keys: plan.FederationFieldConfigurations{ + {TypeName: "Viewer", SelectionSet: "id"}, + {TypeName: "Article", SelectionSet: "id"}, + }, + RequestScopedFields: []plan.RequestScopedField{ + {TypeName: "Query", FieldName: "currentViewer", L1Key: "viewer.currentViewer"}, + {TypeName: "Article", FieldName: "currentViewer", L1Key: "viewer.currentViewer"}, + }, + }, + } +} + +func handlesRequestScopedSpec(handlesURL string) requestScopedE2EDataSourceSpec { + return requestScopedE2EDataSourceSpec{ + name: "handles", + url: handlesURL, + sdl: `directive @external on FIELD_DEFINITION +directive @requires(fields: String!) on FIELD_DEFINITION +type Viewer @key(fields: "id") { id: ID! @external name: String! @external handle: String! @requires(fields: "name") }`, + rootNodes: []plan.TypeField{ + {TypeName: "Viewer", FieldNames: []string{"id", "handle"}, ExternalFieldNames: []string{"name"}}, + }, + childNodes: []plan.TypeField{ + {TypeName: "Viewer", FieldNames: []string{"id", "handle"}, ExternalFieldNames: []string{"name"}}, + }, + federationMetaData: plan.FederationMetaData{ + Keys: plan.FederationFieldConfigurations{ + {TypeName: "Viewer", SelectionSet: "id"}, + }, + Requires: plan.FederationFieldConfigurations{ + {TypeName: "Viewer", FieldName: "handle", SelectionSet: "name"}, + }, + }, + } +} + +func articlesRequestScopedSpec(articlesURL, articlesSDL string, queryFields []string) requestScopedE2EDataSourceSpec { + return requestScopedE2EDataSourceSpec{ + name: "articles", + url: articlesURL, + sdl: articlesSDL, + rootNodes: []plan.TypeField{ + {TypeName: "Query", FieldNames: queryFields}, + {TypeName: "Article", FieldNames: []string{"id", "title"}}, + }, + federationMetaData: plan.FederationMetaData{ + Keys: plan.FederationFieldConfigurations{ + {TypeName: "Article", SelectionSet: "id"}, + }, + }, + } +} + +// TestRequestScopedWideningExecution verifies the end-to-end fetch behavior for +// requestScoped widening. +// +// Each subtest asserts two things: +// 1. The client-visible response still matches the original query shape. +// 2. The upstream traffic shows only the widened fetches we expect. +// +// The request recorder is intentionally strict: if the planner or resolver +// regresses and sends an extra entity hop, the test records it as an unexpected +// request and fails. +func TestRequestScopedWideningExecution(t *testing.T) { + t.Parallel() + + t.Run("root fetch widens and skips the entity fetch", func(t *testing.T) { + t.Parallel() + + // Scenario: + // - The root currentViewer selection is narrower than the article.currentViewer selection. + // - requestScoped widening should widen the root fetch to the wider shape. + // + // Expected flow: + // 1. Root fetch to viewer requests {id name email}. + // 2. Root fetch to articles requests the article shell. + // 3. No viewer entity fetch happens for article.currentViewer because the widened + // root value is injected from requestScoped L1. + viewer := newRequestScopedE2EServer(t, func(request requestScopedE2ERequest) (string, bool) { + if request == (requestScopedE2ERequest{Query: `{currentViewer {id name email}}`}) { + return `{"data":{"currentViewer":{"id":"v1","name":"Alice","email":"alice@example.com"}}}`, true + } + return "", false + }) + + articles := newRequestScopedE2EServer(t, func(request requestScopedE2ERequest) (string, bool) { + if request == (requestScopedE2ERequest{Query: `{article {id title __typename}}`}) { + return `{"data":{"article":{"id":"a1","title":"T1","__typename":"Article"}}}`, true + } + return "", false + }) + + executionEngine := newRequestScopedExecutionEngine( + t, + viewerRequestScopedSpec( + viewer.URL(), + `directive @requestScoped(key: String!) on FIELD_DEFINITION +type Query { currentViewer: Viewer @requestScoped(key: "viewer") } +type Viewer { id: ID! name: String! email: String! } +type Article @key(fields: "id") { id: ID! currentViewer: Viewer @requestScoped(key: "viewer") }`, + []string{"id", "name", "email"}, + nil, + nil, + ), + articlesRequestScopedSpec( + articles.URL(), + `type Query { article: Article } +type Article @key(fields: "id") { id: ID! title: String! }`, + []string{"article"}, + ), + ) + + response := executeRequestScopedQuery(t, executionEngine, `query { + currentViewer { + id + name + } + article { + id + title + currentViewer { + id + name + email + } + } + }`) + + // The client response must keep the original narrow root shape and the wider + // article.currentViewer shape even though the upstream root fetch was widened. + assert.Equal(t, + compactJSONForAssert(t, `{"data":{"currentViewer":{"id":"v1","name":"Alice"},"article":{"id":"a1","title":"T1","currentViewer":{"id":"v1","name":"Alice","email":"alice@example.com"}}}}`), + compactJSONForAssert(t, response), + ) + + // Only the widened root fetch and the article shell fetch are allowed. + viewer.AssertExactRequests(t, requestScopedE2ERequest{Query: `{currentViewer {id name email}}`}) + articles.AssertExactRequests(t, requestScopedE2ERequest{Query: `{article {id title __typename}}`}) + }) + + t.Run("requires chain widens the base viewer fetch, skips the requestScoped entity hop, and still feeds the handle subgraph", func(t *testing.T) { + t.Parallel() + + // Scenario: + // - The base viewer subgraph exposes name through currentViewer. + // - The article-side currentViewer participant is requestScoped with the same key. + // - A third handles subgraph owns handle and declares @requires(fields: "name"). + // + // Expected flow: + // 1. The root viewer fetch is widened to include the hidden dependency fields + // needed later: aliased name, __typename, and id. + // 2. The requestScoped entity hop back into the viewer subgraph is skipped. + // 3. The handles entity fetch still runs, receiving representations that include + // the hidden name dependency from the widened root fetch. + viewer := newRequestScopedE2EServer(t, func(request requestScopedE2ERequest) (string, bool) { + if request == (requestScopedE2ERequest{Query: `{currentViewer {viewerName: name __typename id}}`}) { + return `{"data":{"currentViewer":{"viewerName":"Alice","__typename":"Viewer","id":"v1"}}}`, true + } + return "", false + }) + + articles := newRequestScopedE2EServer(t, func(request requestScopedE2ERequest) (string, bool) { + if request == (requestScopedE2ERequest{Query: `{article {id title __typename}}`}) { + return `{"data":{"article":{"id":"a1","title":"T1","__typename":"Article"}}}`, true + } + return "", false + }) + + handlesExpectedVariables := compactJSONForAssert(t, `{"representations":[{"__typename":"Viewer","id":"v1","name":"Alice"}]}`) + handles := newRequestScopedE2EServer(t, func(request requestScopedE2ERequest) (string, bool) { + if request == (requestScopedE2ERequest{ + Query: `query($representations: [_Any!]!){_entities(representations: $representations){... on Viewer {__typename handle}}}`, + Variables: handlesExpectedVariables, + }) { + return `{"data":{"_entities":[{"__typename":"Viewer","handle":"alice-handle"}]}}`, true + } + return "", false + }) + + executionEngine := newRequestScopedExecutionEngine( + t, + viewerRequestScopedRequiresBaseSpec(viewer.URL()), + articlesRequestScopedSpec( + articles.URL(), + `type Query { article: Article } +type Article @key(fields: "id") { id: ID! title: String! }`, + []string{"article"}, + ), + handlesRequestScopedSpec(handles.URL()), + ) + + response := executeRequestScopedQuery(t, executionEngine, `query { + currentViewer { + viewerName: name + } + article { + id + title + currentViewer { + handle + } + } + }`) + + // The response keeps the user-visible alias at the root and only exposes handle + // on the nested branch even though name/id/__typename were fetched behind the scenes. + assert.Equal(t, + compactJSONForAssert(t, `{"data":{"currentViewer":{"viewerName":"Alice"},"article":{"id":"a1","title":"T1","currentViewer":{"handle":"alice-handle"}}}}`), + compactJSONForAssert(t, response), + ) + + // The viewer subgraph must only receive the widened root fetch. The skipped + // requestScoped entity hop would show up here as an unexpected extra request. + viewer.AssertExactRequests(t, requestScopedE2ERequest{Query: `{currentViewer {viewerName: name __typename id}}`}) + articles.AssertExactRequests(t, requestScopedE2ERequest{Query: `{article {id title __typename}}`}) + + // The downstream handles fetch still happens, and its representations must carry + // the hidden name dependency supplied by the widened root fetch. + handles.AssertExactRequests(t, requestScopedE2ERequest{ + Query: `query($representations: [_Any!]!){_entities(representations: $representations){... on Viewer {__typename handle}}}`, + Variables: compactJSONForAssert(t, `{"representations":[{"__typename":"Viewer","id":"v1","name":"Alice"}]}`), + }) + }) + + t.Run("argument conflicts widen through synthetic aliases and still render user-shaped data", func(t *testing.T) { + t.Parallel() + + // Scenario: + // - Two requestScoped participants select the same field with different arguments. + // - The widened upstream fetch must keep both variants distinct with synthetic aliases. + // + // Expected flow: + // 1. Root fetch to viewer requests both posts(first: 1) and posts(first: 2). + // 2. The synthetic aliases keep the two cache entries separate inside requestScoped L1. + // 3. The nested article.currentViewer branch is injected from the widened root value. + viewerExpectedVariables := compactJSONForAssert(t, `{"a":1,"b":2}`) + viewer := newRequestScopedE2EServer(t, func(request requestScopedE2ERequest) (string, bool) { + if request == (requestScopedE2ERequest{ + Query: `query($a: Int!, $b: Int!){currentViewer {id __request_scoped__posts_0: posts(first: $a){id} __request_scoped__posts_1: posts(first: $b){id title}}}`, + Variables: viewerExpectedVariables, + }) { + return `{"data":{"currentViewer":{"id":"v1","__request_scoped__posts_0":[{"id":"p1"}],"__request_scoped__posts_1":[{"id":"p2","title":"Second"}]}}}`, true + } + return "", false + }) + + articles := newRequestScopedE2EServer(t, func(request requestScopedE2ERequest) (string, bool) { + if request == (requestScopedE2ERequest{Query: `{article {id title __typename}}`}) { + return `{"data":{"article":{"id":"a1","title":"T1","__typename":"Article"}}}`, true + } + return "", false + }) + + executionEngine := newRequestScopedExecutionEngine( + t, + viewerRequestScopedSpec( + viewer.URL(), + `directive @requestScoped(key: String!) on FIELD_DEFINITION +type Query { currentViewer: Viewer @requestScoped(key: "viewer") } +type Viewer { id: ID! posts(first: Int!): [Post!]! } +type Post { id: ID! title: String! } +type Article @key(fields: "id") { id: ID! currentViewer: Viewer @requestScoped(key: "viewer") }`, + []string{"id", "posts"}, + []plan.TypeField{{TypeName: "Post", FieldNames: []string{"id", "title"}}}, + nil, + ), + articlesRequestScopedSpec( + articles.URL(), + `type Query { article: Article } +type Article @key(fields: "id") { id: ID! title: String! }`, + []string{"article"}, + ), + ) + + response := executeRequestScopedQuery(t, executionEngine, `query { + currentViewer { + id + posts(first: 1) { + id + } + } + article { + id + title + currentViewer { + id + posts(first: 2) { + id + title + } + } + } + }`) + + // The client still sees the original argument-specific branches rather than the + // synthetic aliases used internally for widening and cache storage. + assert.Equal(t, + compactJSONForAssert(t, `{"data":{"currentViewer":{"id":"v1","posts":[{"id":"p1"}]},"article":{"id":"a1","title":"T1","currentViewer":{"id":"v1","posts":[{"id":"p2","title":"Second"}]}}}}`), + compactJSONForAssert(t, response), + ) + + // The only viewer request allowed is the widened root fetch that carries both + // argument variants. Any later entity hop would fail the exact request assertion. + viewer.AssertExactRequests(t, requestScopedE2ERequest{ + Query: `query($a: Int!, $b: Int!){currentViewer {id __request_scoped__posts_0: posts(first: $a){id} __request_scoped__posts_1: posts(first: $b){id title}}}`, + Variables: compactJSONForAssert(t, `{"a":1,"b":2}`), + }) + articles.AssertExactRequests(t, requestScopedE2ERequest{Query: `{article {id title __typename}}`}) + }) + + t.Run("three conflicting participants widen to one root fetch while each response branch keeps its own shape", func(t *testing.T) { + t.Parallel() + + // Scenario: + // - Three requestScoped participants all want to bind different schema fields into + // the same response position `name`. + // - The widened root fetch must carry all three variants without collapsing them. + // + // Expected flow: + // 1. Root fetch to viewer requests name, email, and handle under distinct synthetic aliases. + // 2. Both article branches fetch only their article shells. + // 3. The nested currentViewer branches are injected from the common widened root value. + viewer := newRequestScopedE2EServer(t, func(request requestScopedE2ERequest) (string, bool) { + if request == (requestScopedE2ERequest{Query: `{currentViewer {id __request_scoped__name_2: name __request_scoped__name_0: email __request_scoped__name_1: handle}}`}) { + return `{"data":{"currentViewer":{"id":"v1","__request_scoped__name_2":"Alice","__request_scoped__name_0":"alice@example.com","__request_scoped__name_1":"alice-handle"}}}`, true + } + return "", false + }) + + articles := newRequestScopedE2EServer(t, func(request requestScopedE2ERequest) (string, bool) { + if request == (requestScopedE2ERequest{Query: `{article {id title __typename} featuredArticle {id title __typename}}`}) { + return `{"data":{"article":{"id":"a1","title":"T1","__typename":"Article"},"featuredArticle":{"id":"a2","title":"T2","__typename":"Article"}}}`, true + } + return "", false + }) + + executionEngine := newRequestScopedExecutionEngine( + t, + viewerRequestScopedSpec( + viewer.URL(), + `directive @requestScoped(key: String!) on FIELD_DEFINITION +type Query { currentViewer: Viewer @requestScoped(key: "viewer") } +type Viewer { id: ID! name: String! email: String! handle: String! } +type Article @key(fields: "id") { id: ID! currentViewer: Viewer @requestScoped(key: "viewer") }`, + []string{"id", "name", "email", "handle"}, + nil, + nil, + ), + articlesRequestScopedSpec( + articles.URL(), + `type Query { article: Article featuredArticle: Article } +type Article @key(fields: "id") { id: ID! title: String! }`, + []string{"article", "featuredArticle"}, + ), + ) + + response := executeRequestScopedQuery(t, executionEngine, `query { + currentViewer { + id + name + } + article { + id + title + currentViewer { + id + name: email + } + } + featuredArticle { + id + title + currentViewer { + id + name: handle + } + } + }`) + + // Even though the upstream fetch uses three distinct aliases, each response branch + // must still render the exact user-visible shape from the original query. + assert.Equal(t, + compactJSONForAssert(t, `{"data":{"currentViewer":{"id":"v1","name":"Alice"},"article":{"id":"a1","title":"T1","currentViewer":{"id":"v1","name":"alice@example.com"}},"featuredArticle":{"id":"a2","title":"T2","currentViewer":{"id":"v1","name":"alice-handle"}}}}`), + compactJSONForAssert(t, response), + ) + + // The root viewer fetch is the only legal viewer request for this scenario. + viewer.AssertExactRequests(t, requestScopedE2ERequest{Query: `{currentViewer {id __request_scoped__name_2: name __request_scoped__name_0: email __request_scoped__name_1: handle}}`}) + articles.AssertExactRequests(t, requestScopedE2ERequest{Query: `{article {id title __typename} featuredArticle {id title __typename}}`}) + }) +} diff --git a/execution/engine/testdata/complex_nesting_query_with_art.json b/execution/engine/testdata/complex_nesting_query_with_art.json index ec85c1e5c1..8efbf54a63 100644 --- a/execution/engine/testdata/complex_nesting_query_with_art.json +++ b/execution/engine/testdata/complex_nesting_query_with_art.json @@ -55,7 +55,7 @@ "trace": { "version": "1", "info": { - "trace_start_time": "", + "trace_start_time": "0", "trace_start_unix": 0, "parse_stats": { "duration_nanoseconds": 0, @@ -76,10 +76,10 @@ "duration_since_start_pretty": "" }, "planner_stats": { - "duration_nanoseconds": 5, - "duration_pretty": "5ns", - "duration_since_start_nanoseconds": 20, - "duration_since_start_pretty": "20ns" + "duration_nanoseconds": 0, + "duration_pretty": "", + "duration_since_start_nanoseconds": 0, + "duration_since_start_pretty": "" } }, "fetches": { @@ -91,7 +91,7 @@ "kind": "Single", "path": "", "source_id": "0", - "source_name": "0", + "source_name": "accounts", "trace": { "raw_input_data": {}, "input": { @@ -166,30 +166,30 @@ } } }, - "duration_since_start_nanoseconds": 1, - "duration_since_start_pretty": "1ns", - "duration_load_nanoseconds": 1, - "duration_load_pretty": "1ns", + "duration_since_start_nanoseconds": 0, + "duration_since_start_pretty": "", + "duration_load_nanoseconds": 0, + "duration_load_pretty": "", "single_flight_used": true, "single_flight_shared_response": false, "load_skipped": false, "load_stats": { "get_conn": { - "duration_since_start_nanoseconds": 1, - "duration_since_start_pretty": "1ns", + "duration_since_start_nanoseconds": 0, + "duration_since_start_pretty": "", "host_port": "" }, "got_conn": { - "duration_since_start_nanoseconds": 1, - "duration_since_start_pretty": "1ns", + "duration_since_start_nanoseconds": 0, + "duration_since_start_pretty": "", "reused": false, "was_idle": false, "idle_time_nanoseconds": 0, "idle_time_pretty": "" }, "got_first_response_byte": { - "duration_since_start_nanoseconds": 1, - "duration_since_start_pretty": "1ns" + "duration_since_start_nanoseconds": 0, + "duration_since_start_pretty": "" }, "dns_start": { "duration_since_start_nanoseconds": 0, @@ -221,13 +221,26 @@ "duration_since_start_pretty": "" }, "wrote_headers": { - "duration_since_start_nanoseconds": 1, - "duration_since_start_pretty": "1ns" + "duration_since_start_nanoseconds": 0, + "duration_since_start_pretty": "" }, "wrote_request": { - "duration_since_start_nanoseconds": 1, - "duration_since_start_pretty": "1ns" + "duration_since_start_nanoseconds": 0, + "duration_since_start_pretty": "" } + }, + "cache_trace": { + "duration_since_start_nanoseconds": 0, + "duration_since_start_pretty": "", + "duration_nanoseconds": 0, + "duration_pretty": "", + "l1_enabled": false, + "l2_enabled": false, + "entity_count": 0, + "l1_hit": 0, + "l1_miss": 0, + "l2_hit": 0, + "l2_miss": 0 } } } @@ -241,7 +254,7 @@ "kind": "BatchEntity", "path": "me.history.@.product", "source_id": "1", - "source_name": "1", + "source_name": "products", "trace": { "raw_input_data": { "upc": "top-2", @@ -306,30 +319,30 @@ } } }, - "duration_since_start_nanoseconds": 1, - "duration_since_start_pretty": "1ns", - "duration_load_nanoseconds": 1, - "duration_load_pretty": "1ns", + "duration_since_start_nanoseconds": 0, + "duration_since_start_pretty": "", + "duration_load_nanoseconds": 0, + "duration_load_pretty": "", "single_flight_used": true, "single_flight_shared_response": false, "load_skipped": false, "load_stats": { "get_conn": { - "duration_since_start_nanoseconds": 1, - "duration_since_start_pretty": "1ns", + "duration_since_start_nanoseconds": 0, + "duration_since_start_pretty": "", "host_port": "" }, "got_conn": { - "duration_since_start_nanoseconds": 1, - "duration_since_start_pretty": "1ns", + "duration_since_start_nanoseconds": 0, + "duration_since_start_pretty": "", "reused": false, "was_idle": false, "idle_time_nanoseconds": 0, "idle_time_pretty": "" }, "got_first_response_byte": { - "duration_since_start_nanoseconds": 1, - "duration_since_start_pretty": "1ns" + "duration_since_start_nanoseconds": 0, + "duration_since_start_pretty": "" }, "dns_start": { "duration_since_start_nanoseconds": 0, @@ -361,13 +374,26 @@ "duration_since_start_pretty": "" }, "wrote_headers": { - "duration_since_start_nanoseconds": 1, - "duration_since_start_pretty": "1ns" + "duration_since_start_nanoseconds": 0, + "duration_since_start_pretty": "" }, "wrote_request": { - "duration_since_start_nanoseconds": 1, - "duration_since_start_pretty": "1ns" + "duration_since_start_nanoseconds": 0, + "duration_since_start_pretty": "" } + }, + "cache_trace": { + "duration_since_start_nanoseconds": 0, + "duration_since_start_pretty": "", + "duration_nanoseconds": 0, + "duration_pretty": "", + "l1_enabled": false, + "l2_enabled": false, + "entity_count": 0, + "l1_hit": 0, + "l1_miss": 0, + "l2_hit": 0, + "l2_miss": 0 } } } @@ -378,7 +404,7 @@ "kind": "Entity", "path": "me", "source_id": "2", - "source_name": "2", + "source_name": "reviews", "trace": { "raw_input_data": { "id": "1234", @@ -492,30 +518,30 @@ } } }, - "duration_since_start_nanoseconds": 1, - "duration_since_start_pretty": "1ns", - "duration_load_nanoseconds": 1, - "duration_load_pretty": "1ns", + "duration_since_start_nanoseconds": 0, + "duration_since_start_pretty": "", + "duration_load_nanoseconds": 0, + "duration_load_pretty": "", "single_flight_used": true, "single_flight_shared_response": false, "load_skipped": false, "load_stats": { "get_conn": { - "duration_since_start_nanoseconds": 1, - "duration_since_start_pretty": "1ns", + "duration_since_start_nanoseconds": 0, + "duration_since_start_pretty": "", "host_port": "" }, "got_conn": { - "duration_since_start_nanoseconds": 1, - "duration_since_start_pretty": "1ns", + "duration_since_start_nanoseconds": 0, + "duration_since_start_pretty": "", "reused": false, "was_idle": false, "idle_time_nanoseconds": 0, "idle_time_pretty": "" }, "got_first_response_byte": { - "duration_since_start_nanoseconds": 1, - "duration_since_start_pretty": "1ns" + "duration_since_start_nanoseconds": 0, + "duration_since_start_pretty": "" }, "dns_start": { "duration_since_start_nanoseconds": 0, @@ -547,13 +573,26 @@ "duration_since_start_pretty": "" }, "wrote_headers": { - "duration_since_start_nanoseconds": 1, - "duration_since_start_pretty": "1ns" + "duration_since_start_nanoseconds": 0, + "duration_since_start_pretty": "" }, "wrote_request": { - "duration_since_start_nanoseconds": 1, - "duration_since_start_pretty": "1ns" + "duration_since_start_nanoseconds": 0, + "duration_since_start_pretty": "" } + }, + "cache_trace": { + "duration_since_start_nanoseconds": 0, + "duration_since_start_pretty": "", + "duration_nanoseconds": 0, + "duration_pretty": "", + "l1_enabled": false, + "l2_enabled": false, + "entity_count": 0, + "l1_hit": 0, + "l1_miss": 0, + "l2_hit": 0, + "l2_miss": 0 } } } diff --git a/execution/federationtesting/accounts/gqlgen.yml b/execution/federationtesting/accounts/gqlgen.yml index 25ebc4e614..854200b255 100644 --- a/execution/federationtesting/accounts/gqlgen.yml +++ b/execution/federationtesting/accounts/gqlgen.yml @@ -2,6 +2,8 @@ schema: - graph/*.graphqls +skip_mod_tidy: true + # Where should the generated server code go? exec: filename: graph/generated/generated.go @@ -53,3 +55,23 @@ models: - github.com/99designs/gqlgen/graphql.Int - github.com/99designs/gqlgen/graphql.Int64 - github.com/99designs/gqlgen/graphql.Int32 + User: + fields: + greeting: + resolver: true + customGreeting: + resolver: true + CacheEntity: + fields: + a: + resolver: false + b: + resolver: false + c: + resolver: false + d: + resolver: false + e: + resolver: false + f: + resolver: false diff --git a/execution/federationtesting/accounts/graph/entity.resolvers.go b/execution/federationtesting/accounts/graph/entity.resolvers.go index 3feaaa3f66..237d97fb60 100644 --- a/execution/federationtesting/accounts/graph/entity.resolvers.go +++ b/execution/federationtesting/accounts/graph/entity.resolvers.go @@ -6,21 +6,53 @@ package graph import ( "context" + "fmt" "github.com/wundergraph/graphql-go-tools/execution/federationtesting/accounts/graph/generated" "github.com/wundergraph/graphql-go-tools/execution/federationtesting/accounts/graph/model" ) +// FindAdminByID is the resolver for the findAdminByID field. +func (r *entityResolver) FindAdminByID(ctx context.Context, id string) (*model.Admin, error) { + name := "Admin " + id + if id == "admin-1" { + name = "SuperAdmin" + } + return &model.Admin{ + ID: id, + Username: name, + Role: "administrator", + }, nil +} + +// FindCacheEntityByID is the resolver for the findCacheEntityByID field. +// Always returns the same deterministic data for any ID. +func (r *entityResolver) FindCacheEntityByID(ctx context.Context, id string) (*model.CacheEntity, error) { + return &model.CacheEntity{ + ID: id, + A: "a-" + id, + B: "b-" + id, + C: "c-" + id, + D: "d-" + id, + E: "e-" + id, + F: "f-" + id, + }, nil +} + // FindUserByID is the resolver for the findUserByID field. func (r *entityResolver) FindUserByID(ctx context.Context, id string) (*model.User, error) { - name := "User " + id - if id == "1234" { - name = "Me" + // Error triggering for cache error handling tests + if id == "error-user" { + return nil, fmt.Errorf("user not found: %s", id) } + name := r.GetUsername(id) + return &model.User{ ID: id, Username: name, + Nickname: "nick-" + name, + RealName: "Real " + name, History: histories, }, nil } diff --git a/execution/federationtesting/accounts/graph/generated/federation.go b/execution/federationtesting/accounts/graph/generated/federation.go index 0dd6da64bf..de56fcf66e 100644 --- a/execution/federationtesting/accounts/graph/generated/federation.go +++ b/execution/federationtesting/accounts/graph/generated/federation.go @@ -153,6 +153,44 @@ func (ec *executionContext) resolveEntity( }() switch typeName { + case "Admin": + resolverName, err := entityResolverNameForAdmin(ctx, rep) + if err != nil { + return nil, fmt.Errorf(`finding resolver for Entity "Admin": %w`, err) + } + switch resolverName { + + case "findAdminByID": + id0, err := ec.unmarshalNID2string(ctx, rep["id"]) + if err != nil { + return nil, fmt.Errorf(`unmarshalling param 0 for findAdminByID(): %w`, err) + } + entity, err := ec.resolvers.Entity().FindAdminByID(ctx, id0) + if err != nil { + return nil, fmt.Errorf(`resolving Entity "Admin": %w`, err) + } + + return entity, nil + } + case "CacheEntity": + resolverName, err := entityResolverNameForCacheEntity(ctx, rep) + if err != nil { + return nil, fmt.Errorf(`finding resolver for Entity "CacheEntity": %w`, err) + } + switch resolverName { + + case "findCacheEntityByID": + id0, err := ec.unmarshalNID2string(ctx, rep["id"]) + if err != nil { + return nil, fmt.Errorf(`unmarshalling param 0 for findCacheEntityByID(): %w`, err) + } + entity, err := ec.resolvers.Entity().FindCacheEntityByID(ctx, id0) + if err != nil { + return nil, fmt.Errorf(`resolving Entity "CacheEntity": %w`, err) + } + + return entity, nil + } case "User": resolverName, err := entityResolverNameForUser(ctx, rep) if err != nil { @@ -198,6 +236,76 @@ func (ec *executionContext) resolveManyEntities( } } +func entityResolverNameForAdmin(ctx context.Context, rep EntityRepresentation) (string, error) { + // we collect errors because a later entity resolver may work fine + // when an entity has multiple keys + entityResolverErrs := []error{} + for { + var ( + m EntityRepresentation + val any + ok bool + ) + _ = val + // if all of the KeyFields values for this resolver are null, + // we shouldn't use use it + allNull := true + m = rep + val, ok = m["id"] + if !ok { + entityResolverErrs = append(entityResolverErrs, + fmt.Errorf("%w due to missing Key Field \"id\" for Admin", ErrTypeNotFound)) + break + } + if allNull { + allNull = val == nil + } + if allNull { + entityResolverErrs = append(entityResolverErrs, + fmt.Errorf("%w due to all null value KeyFields for Admin", ErrTypeNotFound)) + break + } + return "findAdminByID", nil + } + return "", fmt.Errorf("%w for Admin due to %v", ErrTypeNotFound, + errors.Join(entityResolverErrs...).Error()) +} + +func entityResolverNameForCacheEntity(ctx context.Context, rep EntityRepresentation) (string, error) { + // we collect errors because a later entity resolver may work fine + // when an entity has multiple keys + entityResolverErrs := []error{} + for { + var ( + m EntityRepresentation + val any + ok bool + ) + _ = val + // if all of the KeyFields values for this resolver are null, + // we shouldn't use use it + allNull := true + m = rep + val, ok = m["id"] + if !ok { + entityResolverErrs = append(entityResolverErrs, + fmt.Errorf("%w due to missing Key Field \"id\" for CacheEntity", ErrTypeNotFound)) + break + } + if allNull { + allNull = val == nil + } + if allNull { + entityResolverErrs = append(entityResolverErrs, + fmt.Errorf("%w due to all null value KeyFields for CacheEntity", ErrTypeNotFound)) + break + } + return "findCacheEntityByID", nil + } + return "", fmt.Errorf("%w for CacheEntity due to %v", ErrTypeNotFound, + errors.Join(entityResolverErrs...).Error()) +} + func entityResolverNameForUser(ctx context.Context, rep EntityRepresentation) (string, error) { // we collect errors because a later entity resolver may work fine // when an entity has multiple keys diff --git a/execution/federationtesting/accounts/graph/generated/generated.go b/execution/federationtesting/accounts/graph/generated/generated.go index 22fd02edc8..5b7251ef67 100644 --- a/execution/federationtesting/accounts/graph/generated/generated.go +++ b/execution/federationtesting/accounts/graph/generated/generated.go @@ -40,7 +40,9 @@ type Config struct { type ResolverRoot interface { Entity() EntityResolver + Mutation() MutationResolver Query() QueryResolver + User() UserResolver } type DirectiveRoot struct { @@ -51,6 +53,12 @@ type ComplexityRoot struct { Name func(childComplexity int) int } + Admin struct { + ID func(childComplexity int) int + Role func(childComplexity int) int + Username func(childComplexity int) int + } + B struct { Name func(childComplexity int) int } @@ -65,6 +73,16 @@ type ComplexityRoot struct { Middle func(childComplexity int) int } + CacheEntity struct { + A func(childComplexity int) int + B func(childComplexity int) int + C func(childComplexity int) int + D func(childComplexity int) int + E func(childComplexity int) int + F func(childComplexity int) int + ID func(childComplexity int) int + } + Cat struct { Name func(childComplexity int) int } @@ -82,7 +100,13 @@ type ComplexityRoot struct { } Entity struct { - FindUserByID func(childComplexity int, id string) int + FindAdminByID func(childComplexity int, id string) int + FindCacheEntityByID func(childComplexity int, id string) int + FindUserByID func(childComplexity int, id string) int + } + + Mutation struct { + UpdateUsername func(childComplexity int, id string, newUsername string) int } Product struct { @@ -97,15 +121,20 @@ type ComplexityRoot struct { Query struct { AbstractList func(childComplexity int) int + CacheEntity func(childComplexity int, id string) int Cat func(childComplexity int) int Cds func(childComplexity int) int Histories func(childComplexity int) int Identifiable func(childComplexity int) int InterfaceUnion func(childComplexity int, which model.Which) int Me func(childComplexity int) int + MeInterface func(childComplexity int) int + MeUnion func(childComplexity int) int OtherInterfaces func(childComplexity int) int SomeNestedInterfaces func(childComplexity int) int TitleName func(childComplexity int) int + User func(childComplexity int, id string) int + UserByIDAndName func(childComplexity int, id string, username string) int __resolve__service func(childComplexity int) int __resolve_entities func(childComplexity int, representations []map[string]any) int } @@ -157,10 +186,13 @@ type ComplexityRoot struct { } User struct { - History func(childComplexity int) int - ID func(childComplexity int) int - RealName func(childComplexity int) int - Username func(childComplexity int) int + CustomGreeting func(childComplexity int, input model.GreetingInput) int + Greeting func(childComplexity int, style string) int + History func(childComplexity int) int + ID func(childComplexity int) int + Nickname func(childComplexity int) int + RealName func(childComplexity int) int + Username func(childComplexity int) int } WalletType1 struct { @@ -181,13 +213,23 @@ type ComplexityRoot struct { } type EntityResolver interface { + FindAdminByID(ctx context.Context, id string) (*model.Admin, error) + FindCacheEntityByID(ctx context.Context, id string) (*model.CacheEntity, error) FindUserByID(ctx context.Context, id string) (*model.User, error) } +type MutationResolver interface { + UpdateUsername(ctx context.Context, id string, newUsername string) (*model.User, error) +} type QueryResolver interface { Me(ctx context.Context) (*model.User, error) + User(ctx context.Context, id string) (*model.User, error) + UserByIDAndName(ctx context.Context, id string, username string) (*model.User, error) + MeInterface(ctx context.Context) (model.Identifiable, error) + MeUnion(ctx context.Context) (model.MeUnion, error) Identifiable(ctx context.Context) (model.Identifiable, error) Histories(ctx context.Context) ([]model.History, error) Cat(ctx context.Context) (*model.Cat, error) + CacheEntity(ctx context.Context, id string) (*model.CacheEntity, error) InterfaceUnion(ctx context.Context, which model.Which) (model.Ab, error) AbstractList(ctx context.Context) ([]model.AbstractListItem, error) TitleName(ctx context.Context) (*model.TitleName, error) @@ -195,6 +237,10 @@ type QueryResolver interface { OtherInterfaces(ctx context.Context) ([]model.SomeInterface, error) SomeNestedInterfaces(ctx context.Context) ([]model.SomeNestedInterface, error) } +type UserResolver interface { + Greeting(ctx context.Context, obj *model.User, style string) (string, error) + CustomGreeting(ctx context.Context, obj *model.User, input model.GreetingInput) (string, error) +} type executableSchema struct { schema *ast.Schema @@ -222,6 +268,27 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.A.Name(childComplexity), true + case "Admin.id": + if e.complexity.Admin.ID == nil { + break + } + + return e.complexity.Admin.ID(childComplexity), true + + case "Admin.role": + if e.complexity.Admin.Role == nil { + break + } + + return e.complexity.Admin.Role(childComplexity), true + + case "Admin.username": + if e.complexity.Admin.Username == nil { + break + } + + return e.complexity.Admin.Username(childComplexity), true + case "B.name": if e.complexity.B.Name == nil { break @@ -257,6 +324,55 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.CDerObj.Middle(childComplexity), true + case "CacheEntity.a": + if e.complexity.CacheEntity.A == nil { + break + } + + return e.complexity.CacheEntity.A(childComplexity), true + + case "CacheEntity.b": + if e.complexity.CacheEntity.B == nil { + break + } + + return e.complexity.CacheEntity.B(childComplexity), true + + case "CacheEntity.c": + if e.complexity.CacheEntity.C == nil { + break + } + + return e.complexity.CacheEntity.C(childComplexity), true + + case "CacheEntity.d": + if e.complexity.CacheEntity.D == nil { + break + } + + return e.complexity.CacheEntity.D(childComplexity), true + + case "CacheEntity.e": + if e.complexity.CacheEntity.E == nil { + break + } + + return e.complexity.CacheEntity.E(childComplexity), true + + case "CacheEntity.f": + if e.complexity.CacheEntity.F == nil { + break + } + + return e.complexity.CacheEntity.F(childComplexity), true + + case "CacheEntity.id": + if e.complexity.CacheEntity.ID == nil { + break + } + + return e.complexity.CacheEntity.ID(childComplexity), true + case "Cat.name": if e.complexity.Cat.Name == nil { break @@ -285,6 +401,30 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.D.Name(childComplexity), true + case "Entity.findAdminByID": + if e.complexity.Entity.FindAdminByID == nil { + break + } + + args, err := ec.field_Entity_findAdminByID_args(ctx, rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Entity.FindAdminByID(childComplexity, args["id"].(string)), true + + case "Entity.findCacheEntityByID": + if e.complexity.Entity.FindCacheEntityByID == nil { + break + } + + args, err := ec.field_Entity_findCacheEntityByID_args(ctx, rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Entity.FindCacheEntityByID(childComplexity, args["id"].(string)), true + case "Entity.findUserByID": if e.complexity.Entity.FindUserByID == nil { break @@ -297,6 +437,18 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.Entity.FindUserByID(childComplexity, args["id"].(string)), true + case "Mutation.updateUsername": + if e.complexity.Mutation.UpdateUsername == nil { + break + } + + args, err := ec.field_Mutation_updateUsername_args(ctx, rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Mutation.UpdateUsername(childComplexity, args["id"].(string), args["newUsername"].(string)), true + case "Product.upc": if e.complexity.Product.Upc == nil { break @@ -332,6 +484,18 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.Query.AbstractList(childComplexity), true + case "Query.cacheEntity": + if e.complexity.Query.CacheEntity == nil { + break + } + + args, err := ec.field_Query_cacheEntity_args(ctx, rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.CacheEntity(childComplexity, args["id"].(string)), true + case "Query.cat": if e.complexity.Query.Cat == nil { break @@ -379,6 +543,20 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.Query.Me(childComplexity), true + case "Query.meInterface": + if e.complexity.Query.MeInterface == nil { + break + } + + return e.complexity.Query.MeInterface(childComplexity), true + + case "Query.meUnion": + if e.complexity.Query.MeUnion == nil { + break + } + + return e.complexity.Query.MeUnion(childComplexity), true + case "Query.otherInterfaces": if e.complexity.Query.OtherInterfaces == nil { break @@ -400,6 +578,30 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.Query.TitleName(childComplexity), true + case "Query.user": + if e.complexity.Query.User == nil { + break + } + + args, err := ec.field_Query_user_args(ctx, rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.User(childComplexity, args["id"].(string)), true + + case "Query.userByIdAndName": + if e.complexity.Query.UserByIDAndName == nil { + break + } + + args, err := ec.field_Query_userByIdAndName_args(ctx, rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.UserByIDAndName(childComplexity, args["id"].(string), args["username"].(string)), true + case "Query._service": if e.complexity.Query.__resolve__service == nil { break @@ -573,6 +775,30 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.TitleName.Title(childComplexity), true + case "User.customGreeting": + if e.complexity.User.CustomGreeting == nil { + break + } + + args, err := ec.field_User_customGreeting_args(ctx, rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.User.CustomGreeting(childComplexity, args["input"].(model.GreetingInput)), true + + case "User.greeting": + if e.complexity.User.Greeting == nil { + break + } + + args, err := ec.field_User_greeting_args(ctx, rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.User.Greeting(childComplexity, args["style"].(string)), true + case "User.history": if e.complexity.User.History == nil { break @@ -587,6 +813,13 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.User.ID(childComplexity), true + case "User.nickname": + if e.complexity.User.Nickname == nil { + break + } + + return e.complexity.User.Nickname(childComplexity), true + case "User.realName": if e.complexity.User.RealName == nil { break @@ -657,7 +890,10 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler { opCtx := graphql.GetOperationContext(ctx) ec := executionContext{opCtx, e, 0, 0, make(chan graphql.DeferredResult)} - inputUnmarshalMap := graphql.BuildUnmarshalerMap() + inputUnmarshalMap := graphql.BuildUnmarshalerMap( + ec.unmarshalInputGreetingFormatting, + ec.unmarshalInputGreetingInput, + ) first := true switch opCtx.Operation.Operation { @@ -691,6 +927,21 @@ func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler { return &response } + case ast.Mutation: + return func(ctx context.Context) *graphql.Response { + if !first { + return nil + } + first = false + ctx = graphql.WithUnmarshalerMap(ctx, inputUnmarshalMap) + data := ec._Mutation(ctx, opCtx.Operation.SelectionSet) + var buf bytes.Buffer + data.MarshalGQL(&buf) + + return &graphql.Response{ + Data: buf.Bytes(), + } + } default: return graphql.OneShot(graphql.ErrorResponse(ctx, "unsupported GraphQL operation")) @@ -741,10 +992,17 @@ func (ec *executionContext) introspectType(name string) (*introspection.Type, er var sources = []*ast.Source{ {Name: "../schema.graphqls", Input: `type Query { me: User + user(id: ID!): User + userByIdAndName(id: ID!, username: String!): User + meInterface: Identifiable + meUnion: MeUnion identifiable: Identifiable histories: [History] cat: Cat + # L1 cache union optimization testing + cacheEntity(id: ID!): CacheEntity! + # merge data test cases interfaceUnion(which: Which! = A): AB abstractList: [AbstractListItem] @@ -754,6 +1012,10 @@ var sources = []*ast.Source{ someNestedInterfaces: [SomeNestedInterface] } +type Mutation { + updateUsername(id: ID!, newUsername: String!): User! +} + type Cat { name: String! } @@ -762,11 +1024,30 @@ interface Identifiable { id: ID! } +enum GreetingStyle { + FORMAL + CASUAL + SHORT +} + +input GreetingFormatting { + uppercase: Boolean + prefix: String +} + +input GreetingInput { + style: GreetingStyle! + formatting: GreetingFormatting +} + type User implements Identifiable @key(fields: "id") { id: ID! username: String! + nickname: String! history: [History!]! realName: String! + greeting(style: String!): String! + customGreeting(input: GreetingInput!): String! } type Product @key(fields: "upc") { @@ -922,7 +1203,32 @@ type CDerObj { first: String! middle: String! last: String! -}`, BuiltIn: false}, +} + +# CacheEntity is a self-referential entity designed for L1 cache testing. +# It has many scalar fields (a-f) so tests can select different field subsets +# at each tree level, creating entity fetches with different ProvidesData. +# The ` + "`" + `nested` + "`" + ` field (defined in reviews subgraph) returns the same entity, +# enabling arbitrary-depth sequential entity fetch chains for the same key. +type CacheEntity @key(fields: "id") { + id: ID! + a: String! + b: String! + c: String! + d: String! + e: String! + f: String! +} + +# Admin is another entity that implements Identifiable for testing interface/union caching +type Admin implements Identifiable @key(fields: "id") { + id: ID! + username: String! + role: String! +} + +# Union containing entity types for testing union field caching +union MeUnion = User | Admin`, BuiltIn: false}, {Name: "../../federation/directives.graphql", Input: ` directive @key(fields: _FieldSet!) repeatable on OBJECT | INTERFACE directive @requires(fields: _FieldSet!) on FIELD_DEFINITION @@ -934,10 +1240,12 @@ type CDerObj { `, BuiltIn: true}, {Name: "../../federation/entity.graphql", Input: ` # a union of all types that use the @key directive -union _Entity = Product | User +union _Entity = Admin | CacheEntity | Product | User # fake type to build resolver interfaces for users to implement type Entity { + findAdminByID(id: ID!,): Admin! + findCacheEntityByID(id: ID!,): CacheEntity! findUserByID(id: ID!,): User! } @@ -957,17 +1265,17 @@ var parsedSchema = gqlparser.MustLoadSchema(sources...) // region ***************************** args.gotpl ***************************** -func (ec *executionContext) field_Entity_findUserByID_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { +func (ec *executionContext) field_Entity_findAdminByID_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} - arg0, err := ec.field_Entity_findUserByID_argsID(ctx, rawArgs) + arg0, err := ec.field_Entity_findAdminByID_argsID(ctx, rawArgs) if err != nil { return nil, err } args["id"] = arg0 return args, nil } -func (ec *executionContext) field_Entity_findUserByID_argsID( +func (ec *executionContext) field_Entity_findAdminByID_argsID( ctx context.Context, rawArgs map[string]any, ) (string, error) { @@ -985,141 +1293,411 @@ func (ec *executionContext) field_Entity_findUserByID_argsID( return zeroVal, nil } -func (ec *executionContext) field_Query___type_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { +func (ec *executionContext) field_Entity_findCacheEntityByID_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} - arg0, err := ec.field_Query___type_argsName(ctx, rawArgs) + arg0, err := ec.field_Entity_findCacheEntityByID_argsID(ctx, rawArgs) if err != nil { return nil, err } - args["name"] = arg0 + args["id"] = arg0 return args, nil } -func (ec *executionContext) field_Query___type_argsName( +func (ec *executionContext) field_Entity_findCacheEntityByID_argsID( ctx context.Context, rawArgs map[string]any, ) (string, error) { - if _, ok := rawArgs["name"]; !ok { + if _, ok := rawArgs["id"]; !ok { var zeroVal string return zeroVal, nil } - ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) - if tmp, ok := rawArgs["name"]; ok { - return ec.unmarshalNString2string(ctx, tmp) + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) + if tmp, ok := rawArgs["id"]; ok { + return ec.unmarshalNID2string(ctx, tmp) } var zeroVal string return zeroVal, nil } -func (ec *executionContext) field_Query__entities_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { +func (ec *executionContext) field_Entity_findUserByID_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} - arg0, err := ec.field_Query__entities_argsRepresentations(ctx, rawArgs) + arg0, err := ec.field_Entity_findUserByID_argsID(ctx, rawArgs) if err != nil { return nil, err } - args["representations"] = arg0 + args["id"] = arg0 return args, nil } -func (ec *executionContext) field_Query__entities_argsRepresentations( +func (ec *executionContext) field_Entity_findUserByID_argsID( ctx context.Context, rawArgs map[string]any, -) ([]map[string]any, error) { - if _, ok := rawArgs["representations"]; !ok { - var zeroVal []map[string]any +) (string, error) { + if _, ok := rawArgs["id"]; !ok { + var zeroVal string return zeroVal, nil } - ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("representations")) - if tmp, ok := rawArgs["representations"]; ok { - return ec.unmarshalN_Any2ᚕmapᚄ(ctx, tmp) + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) + if tmp, ok := rawArgs["id"]; ok { + return ec.unmarshalNID2string(ctx, tmp) } - var zeroVal []map[string]any + var zeroVal string return zeroVal, nil } -func (ec *executionContext) field_Query_interfaceUnion_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { +func (ec *executionContext) field_Mutation_updateUsername_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} - arg0, err := ec.field_Query_interfaceUnion_argsWhich(ctx, rawArgs) + arg0, err := ec.field_Mutation_updateUsername_argsID(ctx, rawArgs) if err != nil { return nil, err } - args["which"] = arg0 + args["id"] = arg0 + arg1, err := ec.field_Mutation_updateUsername_argsNewUsername(ctx, rawArgs) + if err != nil { + return nil, err + } + args["newUsername"] = arg1 return args, nil } -func (ec *executionContext) field_Query_interfaceUnion_argsWhich( +func (ec *executionContext) field_Mutation_updateUsername_argsID( ctx context.Context, rawArgs map[string]any, -) (model.Which, error) { - if _, ok := rawArgs["which"]; !ok { - var zeroVal model.Which +) (string, error) { + if _, ok := rawArgs["id"]; !ok { + var zeroVal string return zeroVal, nil } - ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("which")) - if tmp, ok := rawArgs["which"]; ok { - return ec.unmarshalNWhich2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐWhich(ctx, tmp) + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) + if tmp, ok := rawArgs["id"]; ok { + return ec.unmarshalNID2string(ctx, tmp) } - var zeroVal model.Which + var zeroVal string return zeroVal, nil } -func (ec *executionContext) field___Directive_args_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { - var err error - args := map[string]any{} - arg0, err := ec.field___Directive_args_argsIncludeDeprecated(ctx, rawArgs) - if err != nil { - return nil, err - } - args["includeDeprecated"] = arg0 - return args, nil -} -func (ec *executionContext) field___Directive_args_argsIncludeDeprecated( +func (ec *executionContext) field_Mutation_updateUsername_argsNewUsername( ctx context.Context, rawArgs map[string]any, -) (*bool, error) { - if _, ok := rawArgs["includeDeprecated"]; !ok { - var zeroVal *bool +) (string, error) { + if _, ok := rawArgs["newUsername"]; !ok { + var zeroVal string return zeroVal, nil } - ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated")) - if tmp, ok := rawArgs["includeDeprecated"]; ok { - return ec.unmarshalOBoolean2ᚖbool(ctx, tmp) + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("newUsername")) + if tmp, ok := rawArgs["newUsername"]; ok { + return ec.unmarshalNString2string(ctx, tmp) } - var zeroVal *bool + var zeroVal string return zeroVal, nil } -func (ec *executionContext) field___Field_args_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { +func (ec *executionContext) field_Query___type_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} - arg0, err := ec.field___Field_args_argsIncludeDeprecated(ctx, rawArgs) + arg0, err := ec.field_Query___type_argsName(ctx, rawArgs) if err != nil { return nil, err } - args["includeDeprecated"] = arg0 + args["name"] = arg0 return args, nil } -func (ec *executionContext) field___Field_args_argsIncludeDeprecated( +func (ec *executionContext) field_Query___type_argsName( ctx context.Context, rawArgs map[string]any, -) (*bool, error) { - if _, ok := rawArgs["includeDeprecated"]; !ok { - var zeroVal *bool +) (string, error) { + if _, ok := rawArgs["name"]; !ok { + var zeroVal string return zeroVal, nil } - ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated")) - if tmp, ok := rawArgs["includeDeprecated"]; ok { - return ec.unmarshalOBoolean2ᚖbool(ctx, tmp) - } + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("name")) + if tmp, ok := rawArgs["name"]; ok { + return ec.unmarshalNString2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Query__entities_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field_Query__entities_argsRepresentations(ctx, rawArgs) + if err != nil { + return nil, err + } + args["representations"] = arg0 + return args, nil +} +func (ec *executionContext) field_Query__entities_argsRepresentations( + ctx context.Context, + rawArgs map[string]any, +) ([]map[string]any, error) { + if _, ok := rawArgs["representations"]; !ok { + var zeroVal []map[string]any + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("representations")) + if tmp, ok := rawArgs["representations"]; ok { + return ec.unmarshalN_Any2ᚕmapᚄ(ctx, tmp) + } + + var zeroVal []map[string]any + return zeroVal, nil +} + +func (ec *executionContext) field_Query_cacheEntity_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field_Query_cacheEntity_argsID(ctx, rawArgs) + if err != nil { + return nil, err + } + args["id"] = arg0 + return args, nil +} +func (ec *executionContext) field_Query_cacheEntity_argsID( + ctx context.Context, + rawArgs map[string]any, +) (string, error) { + if _, ok := rawArgs["id"]; !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) + if tmp, ok := rawArgs["id"]; ok { + return ec.unmarshalNID2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_interfaceUnion_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field_Query_interfaceUnion_argsWhich(ctx, rawArgs) + if err != nil { + return nil, err + } + args["which"] = arg0 + return args, nil +} +func (ec *executionContext) field_Query_interfaceUnion_argsWhich( + ctx context.Context, + rawArgs map[string]any, +) (model.Which, error) { + if _, ok := rawArgs["which"]; !ok { + var zeroVal model.Which + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("which")) + if tmp, ok := rawArgs["which"]; ok { + return ec.unmarshalNWhich2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐWhich(ctx, tmp) + } + + var zeroVal model.Which + return zeroVal, nil +} + +func (ec *executionContext) field_Query_userByIdAndName_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field_Query_userByIdAndName_argsID(ctx, rawArgs) + if err != nil { + return nil, err + } + args["id"] = arg0 + arg1, err := ec.field_Query_userByIdAndName_argsUsername(ctx, rawArgs) + if err != nil { + return nil, err + } + args["username"] = arg1 + return args, nil +} +func (ec *executionContext) field_Query_userByIdAndName_argsID( + ctx context.Context, + rawArgs map[string]any, +) (string, error) { + if _, ok := rawArgs["id"]; !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) + if tmp, ok := rawArgs["id"]; ok { + return ec.unmarshalNID2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_userByIdAndName_argsUsername( + ctx context.Context, + rawArgs map[string]any, +) (string, error) { + if _, ok := rawArgs["username"]; !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("username")) + if tmp, ok := rawArgs["username"]; ok { + return ec.unmarshalNString2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_user_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field_Query_user_argsID(ctx, rawArgs) + if err != nil { + return nil, err + } + args["id"] = arg0 + return args, nil +} +func (ec *executionContext) field_Query_user_argsID( + ctx context.Context, + rawArgs map[string]any, +) (string, error) { + if _, ok := rawArgs["id"]; !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) + if tmp, ok := rawArgs["id"]; ok { + return ec.unmarshalNID2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_User_customGreeting_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field_User_customGreeting_argsInput(ctx, rawArgs) + if err != nil { + return nil, err + } + args["input"] = arg0 + return args, nil +} +func (ec *executionContext) field_User_customGreeting_argsInput( + ctx context.Context, + rawArgs map[string]any, +) (model.GreetingInput, error) { + if _, ok := rawArgs["input"]; !ok { + var zeroVal model.GreetingInput + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("input")) + if tmp, ok := rawArgs["input"]; ok { + return ec.unmarshalNGreetingInput2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐGreetingInput(ctx, tmp) + } + + var zeroVal model.GreetingInput + return zeroVal, nil +} + +func (ec *executionContext) field_User_greeting_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field_User_greeting_argsStyle(ctx, rawArgs) + if err != nil { + return nil, err + } + args["style"] = arg0 + return args, nil +} +func (ec *executionContext) field_User_greeting_argsStyle( + ctx context.Context, + rawArgs map[string]any, +) (string, error) { + if _, ok := rawArgs["style"]; !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("style")) + if tmp, ok := rawArgs["style"]; ok { + return ec.unmarshalNString2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field___Directive_args_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field___Directive_args_argsIncludeDeprecated(ctx, rawArgs) + if err != nil { + return nil, err + } + args["includeDeprecated"] = arg0 + return args, nil +} +func (ec *executionContext) field___Directive_args_argsIncludeDeprecated( + ctx context.Context, + rawArgs map[string]any, +) (*bool, error) { + if _, ok := rawArgs["includeDeprecated"]; !ok { + var zeroVal *bool + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated")) + if tmp, ok := rawArgs["includeDeprecated"]; ok { + return ec.unmarshalOBoolean2ᚖbool(ctx, tmp) + } + + var zeroVal *bool + return zeroVal, nil +} + +func (ec *executionContext) field___Field_args_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field___Field_args_argsIncludeDeprecated(ctx, rawArgs) + if err != nil { + return nil, err + } + args["includeDeprecated"] = arg0 + return args, nil +} +func (ec *executionContext) field___Field_args_argsIncludeDeprecated( + ctx context.Context, + rawArgs map[string]any, +) (*bool, error) { + if _, ok := rawArgs["includeDeprecated"]; !ok { + var zeroVal *bool + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated")) + if tmp, ok := rawArgs["includeDeprecated"]; ok { + return ec.unmarshalOBoolean2ᚖbool(ctx, tmp) + } var zeroVal *bool return zeroVal, nil @@ -1130,67 +1708,776 @@ func (ec *executionContext) field___Type_enumValues_args(ctx context.Context, ra args := map[string]any{} arg0, err := ec.field___Type_enumValues_argsIncludeDeprecated(ctx, rawArgs) if err != nil { - return nil, err + return nil, err + } + args["includeDeprecated"] = arg0 + return args, nil +} +func (ec *executionContext) field___Type_enumValues_argsIncludeDeprecated( + ctx context.Context, + rawArgs map[string]any, +) (bool, error) { + if _, ok := rawArgs["includeDeprecated"]; !ok { + var zeroVal bool + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated")) + if tmp, ok := rawArgs["includeDeprecated"]; ok { + return ec.unmarshalOBoolean2bool(ctx, tmp) + } + + var zeroVal bool + return zeroVal, nil +} + +func (ec *executionContext) field___Type_fields_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field___Type_fields_argsIncludeDeprecated(ctx, rawArgs) + if err != nil { + return nil, err + } + args["includeDeprecated"] = arg0 + return args, nil +} +func (ec *executionContext) field___Type_fields_argsIncludeDeprecated( + ctx context.Context, + rawArgs map[string]any, +) (bool, error) { + if _, ok := rawArgs["includeDeprecated"]; !ok { + var zeroVal bool + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated")) + if tmp, ok := rawArgs["includeDeprecated"]; ok { + return ec.unmarshalOBoolean2bool(ctx, tmp) + } + + var zeroVal bool + return zeroVal, nil +} + +// endregion ***************************** args.gotpl ***************************** + +// region ************************** directives.gotpl ************************** + +// endregion ************************** directives.gotpl ************************** + +// region **************************** field.gotpl ***************************** + +func (ec *executionContext) _A_name(ctx context.Context, field graphql.CollectedField, obj *model.A) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_A_name(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_A_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "A", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Admin_id(ctx context.Context, field graphql.CollectedField, obj *model.Admin) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Admin_id(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Admin_id(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Admin", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type ID does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Admin_username(ctx context.Context, field graphql.CollectedField, obj *model.Admin) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Admin_username(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Username, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Admin_username(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Admin", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Admin_role(ctx context.Context, field graphql.CollectedField, obj *model.Admin) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Admin_role(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Role, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Admin_role(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Admin", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _B_name(ctx context.Context, field graphql.CollectedField, obj *model.B) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_B_name(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_B_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "B", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _C_name(ctx context.Context, field graphql.CollectedField, obj *model.C) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_C_name(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*model.CDerObj) + fc.Result = res + return ec.marshalOCDerObj2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐCDerObj(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_C_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "C", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "first": + return ec.fieldContext_CDerObj_first(ctx, field) + case "middle": + return ec.fieldContext_CDerObj_middle(ctx, field) + case "last": + return ec.fieldContext_CDerObj_last(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type CDerObj", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _CDerObj_first(ctx context.Context, field graphql.CollectedField, obj *model.CDerObj) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_CDerObj_first(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.First, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_CDerObj_first(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "CDerObj", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _CDerObj_middle(ctx context.Context, field graphql.CollectedField, obj *model.CDerObj) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_CDerObj_middle(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Middle, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_CDerObj_middle(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "CDerObj", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _CDerObj_last(ctx context.Context, field graphql.CollectedField, obj *model.CDerObj) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_CDerObj_last(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Last, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_CDerObj_last(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "CDerObj", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _CacheEntity_id(ctx context.Context, field graphql.CollectedField, obj *model.CacheEntity) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_CacheEntity_id(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_CacheEntity_id(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "CacheEntity", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type ID does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _CacheEntity_a(ctx context.Context, field graphql.CollectedField, obj *model.CacheEntity) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_CacheEntity_a(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.A, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_CacheEntity_a(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "CacheEntity", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _CacheEntity_b(ctx context.Context, field graphql.CollectedField, obj *model.CacheEntity) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_CacheEntity_b(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.B, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_CacheEntity_b(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "CacheEntity", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _CacheEntity_c(ctx context.Context, field graphql.CollectedField, obj *model.CacheEntity) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_CacheEntity_c(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.C, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_CacheEntity_c(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "CacheEntity", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _CacheEntity_d(ctx context.Context, field graphql.CollectedField, obj *model.CacheEntity) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_CacheEntity_d(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.D, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_CacheEntity_d(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "CacheEntity", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _CacheEntity_e(ctx context.Context, field graphql.CollectedField, obj *model.CacheEntity) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_CacheEntity_e(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.E, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null } - args["includeDeprecated"] = arg0 - return args, nil -} -func (ec *executionContext) field___Type_enumValues_argsIncludeDeprecated( - ctx context.Context, - rawArgs map[string]any, -) (bool, error) { - if _, ok := rawArgs["includeDeprecated"]; !ok { - var zeroVal bool - return zeroVal, nil + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} - ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated")) - if tmp, ok := rawArgs["includeDeprecated"]; ok { - return ec.unmarshalOBoolean2bool(ctx, tmp) +func (ec *executionContext) fieldContext_CacheEntity_e(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "CacheEntity", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, } - - var zeroVal bool - return zeroVal, nil + return fc, nil } -func (ec *executionContext) field___Type_fields_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { - var err error - args := map[string]any{} - arg0, err := ec.field___Type_fields_argsIncludeDeprecated(ctx, rawArgs) +func (ec *executionContext) _CacheEntity_f(ctx context.Context, field graphql.CollectedField, obj *model.CacheEntity) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_CacheEntity_f(ctx, field) if err != nil { - return nil, err + return graphql.Null } - args["includeDeprecated"] = arg0 - return args, nil -} -func (ec *executionContext) field___Type_fields_argsIncludeDeprecated( - ctx context.Context, - rawArgs map[string]any, -) (bool, error) { - if _, ok := rawArgs["includeDeprecated"]; !ok { - var zeroVal bool - return zeroVal, nil + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.F, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null } - - ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated")) - if tmp, ok := rawArgs["includeDeprecated"]; ok { - return ec.unmarshalOBoolean2bool(ctx, tmp) + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null } - - var zeroVal bool - return zeroVal, nil + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) } -// endregion ***************************** args.gotpl ***************************** - -// region ************************** directives.gotpl ************************** - -// endregion ************************** directives.gotpl ************************** - -// region **************************** field.gotpl ***************************** +func (ec *executionContext) fieldContext_CacheEntity_f(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "CacheEntity", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} -func (ec *executionContext) _A_name(ctx context.Context, field graphql.CollectedField, obj *model.A) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_A_name(ctx, field) +func (ec *executionContext) _Cat_name(ctx context.Context, field graphql.CollectedField, obj *model.Cat) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Cat_name(ctx, field) if err != nil { return graphql.Null } @@ -1220,9 +2507,9 @@ func (ec *executionContext) _A_name(ctx context.Context, field graphql.Collected return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_A_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Cat_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "A", + Object: "Cat", Field: field, IsMethod: false, IsResolver: false, @@ -1233,8 +2520,8 @@ func (ec *executionContext) fieldContext_A_name(_ context.Context, field graphql return fc, nil } -func (ec *executionContext) _B_name(ctx context.Context, field graphql.CollectedField, obj *model.B) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_B_name(ctx, field) +func (ec *executionContext) _ConcreteListItem1_obj(ctx context.Context, field graphql.CollectedField, obj *model.ConcreteListItem1) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_ConcreteListItem1_obj(ctx, field) if err != nil { return graphql.Null } @@ -1247,7 +2534,7 @@ func (ec *executionContext) _B_name(ctx context.Context, field graphql.Collected }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return obj.Name, nil + return obj.Obj, nil }) if err != nil { ec.Error(ctx, err) @@ -1259,26 +2546,70 @@ func (ec *executionContext) _B_name(ctx context.Context, field graphql.Collected } return graphql.Null } - res := resTmp.(string) + res := resTmp.(model.OtherInterface) fc.Result = res - return ec.marshalNString2string(ctx, field.Selections, res) + return ec.marshalNOtherInterface2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐOtherInterface(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_B_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_ConcreteListItem1_obj(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "B", + Object: "ConcreteListItem1", Field: field, IsMethod: false, IsResolver: false, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type String does not have child fields") + return nil, errors.New("FieldContext.Child cannot be called on type INTERFACE") }, } return fc, nil } -func (ec *executionContext) _C_name(ctx context.Context, field graphql.CollectedField, obj *model.C) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_C_name(ctx, field) +func (ec *executionContext) _ConcreteListItem2_obj(ctx context.Context, field graphql.CollectedField, obj *model.ConcreteListItem2) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_ConcreteListItem2_obj(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Obj, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(model.OtherInterface) + fc.Result = res + return ec.marshalNOtherInterface2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐOtherInterface(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_ConcreteListItem2_obj(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "ConcreteListItem2", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("FieldContext.Child cannot be called on type INTERFACE") + }, + } + return fc, nil +} + +func (ec *executionContext) _D_name(ctx context.Context, field graphql.CollectedField, obj *model.D) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_D_name(ctx, field) if err != nil { return graphql.Null } @@ -1305,9 +2636,9 @@ func (ec *executionContext) _C_name(ctx context.Context, field graphql.Collected return ec.marshalOCDerObj2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐCDerObj(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_C_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_D_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "C", + Object: "D", Field: field, IsMethod: false, IsResolver: false, @@ -1326,8 +2657,8 @@ func (ec *executionContext) fieldContext_C_name(_ context.Context, field graphql return fc, nil } -func (ec *executionContext) _CDerObj_first(ctx context.Context, field graphql.CollectedField, obj *model.CDerObj) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_CDerObj_first(ctx, field) +func (ec *executionContext) _Entity_findAdminByID(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Entity_findAdminByID(ctx, field) if err != nil { return graphql.Null } @@ -1340,7 +2671,7 @@ func (ec *executionContext) _CDerObj_first(ctx context.Context, field graphql.Co }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return obj.First, nil + return ec.resolvers.Entity().FindAdminByID(rctx, fc.Args["id"].(string)) }) if err != nil { ec.Error(ctx, err) @@ -1352,26 +2683,116 @@ func (ec *executionContext) _CDerObj_first(ctx context.Context, field graphql.Co } return graphql.Null } - res := resTmp.(string) + res := resTmp.(*model.Admin) fc.Result = res - return ec.marshalNString2string(ctx, field.Selections, res) + return ec.marshalNAdmin2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐAdmin(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_CDerObj_first(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Entity_findAdminByID(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "CDerObj", + Object: "Entity", Field: field, - IsMethod: false, - IsResolver: false, + IsMethod: true, + IsResolver: true, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type String does not have child fields") + switch field.Name { + case "id": + return ec.fieldContext_Admin_id(ctx, field) + case "username": + return ec.fieldContext_Admin_username(ctx, field) + case "role": + return ec.fieldContext_Admin_role(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type Admin", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Entity_findAdminByID_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Entity_findCacheEntityByID(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Entity_findCacheEntityByID(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Entity().FindCacheEntityByID(rctx, fc.Args["id"].(string)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*model.CacheEntity) + fc.Result = res + return ec.marshalNCacheEntity2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐCacheEntity(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Entity_findCacheEntityByID(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Entity", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "id": + return ec.fieldContext_CacheEntity_id(ctx, field) + case "a": + return ec.fieldContext_CacheEntity_a(ctx, field) + case "b": + return ec.fieldContext_CacheEntity_b(ctx, field) + case "c": + return ec.fieldContext_CacheEntity_c(ctx, field) + case "d": + return ec.fieldContext_CacheEntity_d(ctx, field) + case "e": + return ec.fieldContext_CacheEntity_e(ctx, field) + case "f": + return ec.fieldContext_CacheEntity_f(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type CacheEntity", field.Name) }, } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Entity_findCacheEntityByID_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } return fc, nil } -func (ec *executionContext) _CDerObj_middle(ctx context.Context, field graphql.CollectedField, obj *model.CDerObj) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_CDerObj_middle(ctx, field) +func (ec *executionContext) _Entity_findUserByID(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Entity_findUserByID(ctx, field) if err != nil { return graphql.Null } @@ -1384,7 +2805,7 @@ func (ec *executionContext) _CDerObj_middle(ctx context.Context, field graphql.C }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return obj.Middle, nil + return ec.resolvers.Entity().FindUserByID(rctx, fc.Args["id"].(string)) }) if err != nil { ec.Error(ctx, err) @@ -1396,26 +2817,53 @@ func (ec *executionContext) _CDerObj_middle(ctx context.Context, field graphql.C } return graphql.Null } - res := resTmp.(string) + res := resTmp.(*model.User) fc.Result = res - return ec.marshalNString2string(ctx, field.Selections, res) + return ec.marshalNUser2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐUser(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_CDerObj_middle(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Entity_findUserByID(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "CDerObj", + Object: "Entity", Field: field, - IsMethod: false, - IsResolver: false, + IsMethod: true, + IsResolver: true, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type String does not have child fields") + switch field.Name { + case "id": + return ec.fieldContext_User_id(ctx, field) + case "username": + return ec.fieldContext_User_username(ctx, field) + case "nickname": + return ec.fieldContext_User_nickname(ctx, field) + case "history": + return ec.fieldContext_User_history(ctx, field) + case "realName": + return ec.fieldContext_User_realName(ctx, field) + case "greeting": + return ec.fieldContext_User_greeting(ctx, field) + case "customGreeting": + return ec.fieldContext_User_customGreeting(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type User", field.Name) }, } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Entity_findUserByID_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } return fc, nil } -func (ec *executionContext) _CDerObj_last(ctx context.Context, field graphql.CollectedField, obj *model.CDerObj) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_CDerObj_last(ctx, field) +func (ec *executionContext) _Mutation_updateUsername(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Mutation_updateUsername(ctx, field) if err != nil { return graphql.Null } @@ -1428,7 +2876,7 @@ func (ec *executionContext) _CDerObj_last(ctx context.Context, field graphql.Col }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return obj.Last, nil + return ec.resolvers.Mutation().UpdateUsername(rctx, fc.Args["id"].(string), fc.Args["newUsername"].(string)) }) if err != nil { ec.Error(ctx, err) @@ -1440,26 +2888,53 @@ func (ec *executionContext) _CDerObj_last(ctx context.Context, field graphql.Col } return graphql.Null } - res := resTmp.(string) + res := resTmp.(*model.User) fc.Result = res - return ec.marshalNString2string(ctx, field.Selections, res) + return ec.marshalNUser2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐUser(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_CDerObj_last(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Mutation_updateUsername(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "CDerObj", + Object: "Mutation", Field: field, - IsMethod: false, - IsResolver: false, + IsMethod: true, + IsResolver: true, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type String does not have child fields") + switch field.Name { + case "id": + return ec.fieldContext_User_id(ctx, field) + case "username": + return ec.fieldContext_User_username(ctx, field) + case "nickname": + return ec.fieldContext_User_nickname(ctx, field) + case "history": + return ec.fieldContext_User_history(ctx, field) + case "realName": + return ec.fieldContext_User_realName(ctx, field) + case "greeting": + return ec.fieldContext_User_greeting(ctx, field) + case "customGreeting": + return ec.fieldContext_User_customGreeting(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type User", field.Name) }, } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Mutation_updateUsername_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } return fc, nil } -func (ec *executionContext) _Cat_name(ctx context.Context, field graphql.CollectedField, obj *model.Cat) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Cat_name(ctx, field) +func (ec *executionContext) _Product_upc(ctx context.Context, field graphql.CollectedField, obj *model.Product) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Product_upc(ctx, field) if err != nil { return graphql.Null } @@ -1472,7 +2947,7 @@ func (ec *executionContext) _Cat_name(ctx context.Context, field graphql.Collect }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return obj.Name, nil + return obj.Upc, nil }) if err != nil { ec.Error(ctx, err) @@ -1489,9 +2964,9 @@ func (ec *executionContext) _Cat_name(ctx context.Context, field graphql.Collect return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Cat_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Product_upc(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "Cat", + Object: "Product", Field: field, IsMethod: false, IsResolver: false, @@ -1502,8 +2977,8 @@ func (ec *executionContext) fieldContext_Cat_name(_ context.Context, field graph return fc, nil } -func (ec *executionContext) _ConcreteListItem1_obj(ctx context.Context, field graphql.CollectedField, obj *model.ConcreteListItem1) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_ConcreteListItem1_obj(ctx, field) +func (ec *executionContext) _Purchase_product(ctx context.Context, field graphql.CollectedField, obj *model.Purchase) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Purchase_product(ctx, field) if err != nil { return graphql.Null } @@ -1516,7 +2991,7 @@ func (ec *executionContext) _ConcreteListItem1_obj(ctx context.Context, field gr }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return obj.Obj, nil + return obj.Product, nil }) if err != nil { ec.Error(ctx, err) @@ -1528,26 +3003,30 @@ func (ec *executionContext) _ConcreteListItem1_obj(ctx context.Context, field gr } return graphql.Null } - res := resTmp.(model.OtherInterface) + res := resTmp.(*model.Product) fc.Result = res - return ec.marshalNOtherInterface2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐOtherInterface(ctx, field.Selections, res) + return ec.marshalNProduct2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐProduct(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_ConcreteListItem1_obj(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Purchase_product(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "ConcreteListItem1", + Object: "Purchase", Field: field, IsMethod: false, IsResolver: false, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("FieldContext.Child cannot be called on type INTERFACE") + switch field.Name { + case "upc": + return ec.fieldContext_Product_upc(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type Product", field.Name) }, } return fc, nil } -func (ec *executionContext) _ConcreteListItem2_obj(ctx context.Context, field graphql.CollectedField, obj *model.ConcreteListItem2) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_ConcreteListItem2_obj(ctx, field) +func (ec *executionContext) _Purchase_wallet(ctx context.Context, field graphql.CollectedField, obj *model.Purchase) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Purchase_wallet(ctx, field) if err != nil { return graphql.Null } @@ -1560,26 +3039,23 @@ func (ec *executionContext) _ConcreteListItem2_obj(ctx context.Context, field gr }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return obj.Obj, nil + return obj.Wallet, nil }) if err != nil { ec.Error(ctx, err) return graphql.Null } if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } return graphql.Null } - res := resTmp.(model.OtherInterface) + res := resTmp.(model.Wallet) fc.Result = res - return ec.marshalNOtherInterface2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐOtherInterface(ctx, field.Selections, res) + return ec.marshalOWallet2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐWallet(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_ConcreteListItem2_obj(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Purchase_wallet(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "ConcreteListItem2", + Object: "Purchase", Field: field, IsMethod: false, IsResolver: false, @@ -1590,8 +3066,8 @@ func (ec *executionContext) fieldContext_ConcreteListItem2_obj(_ context.Context return fc, nil } -func (ec *executionContext) _D_name(ctx context.Context, field graphql.CollectedField, obj *model.D) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_D_name(ctx, field) +func (ec *executionContext) _Purchase_quantity(ctx context.Context, field graphql.CollectedField, obj *model.Purchase) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Purchase_quantity(ctx, field) if err != nil { return graphql.Null } @@ -1604,43 +3080,38 @@ func (ec *executionContext) _D_name(ctx context.Context, field graphql.Collected }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return obj.Name, nil + return obj.Quantity, nil }) if err != nil { ec.Error(ctx, err) return graphql.Null } if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } return graphql.Null } - res := resTmp.(*model.CDerObj) + res := resTmp.(int) fc.Result = res - return ec.marshalOCDerObj2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐCDerObj(ctx, field.Selections, res) + return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_D_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Purchase_quantity(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "D", + Object: "Purchase", Field: field, IsMethod: false, IsResolver: false, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - switch field.Name { - case "first": - return ec.fieldContext_CDerObj_first(ctx, field) - case "middle": - return ec.fieldContext_CDerObj_middle(ctx, field) - case "last": - return ec.fieldContext_CDerObj_last(ctx, field) - } - return nil, fmt.Errorf("no field named %q was found under type CDerObj", field.Name) + return nil, errors.New("field of type Int does not have child fields") }, } return fc, nil } -func (ec *executionContext) _Entity_findUserByID(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Entity_findUserByID(ctx, field) +func (ec *executionContext) _Query_me(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_me(ctx, field) if err != nil { return graphql.Null } @@ -1653,26 +3124,23 @@ func (ec *executionContext) _Entity_findUserByID(ctx context.Context, field grap }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Entity().FindUserByID(rctx, fc.Args["id"].(string)) + return ec.resolvers.Query().Me(rctx) }) if err != nil { ec.Error(ctx, err) return graphql.Null } if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } return graphql.Null } res := resTmp.(*model.User) fc.Result = res - return ec.marshalNUser2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐUser(ctx, field.Selections, res) + return ec.marshalOUser2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐUser(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Entity_findUserByID(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Query_me(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "Entity", + Object: "Query", Field: field, IsMethod: true, IsResolver: true, @@ -1682,30 +3150,25 @@ func (ec *executionContext) fieldContext_Entity_findUserByID(ctx context.Context return ec.fieldContext_User_id(ctx, field) case "username": return ec.fieldContext_User_username(ctx, field) + case "nickname": + return ec.fieldContext_User_nickname(ctx, field) case "history": return ec.fieldContext_User_history(ctx, field) case "realName": return ec.fieldContext_User_realName(ctx, field) + case "greeting": + return ec.fieldContext_User_greeting(ctx, field) + case "customGreeting": + return ec.fieldContext_User_customGreeting(ctx, field) } return nil, fmt.Errorf("no field named %q was found under type User", field.Name) }, } - defer func() { - if r := recover(); r != nil { - err = ec.Recover(ctx, r) - ec.Error(ctx, err) - } - }() - ctx = graphql.WithFieldContext(ctx, fc) - if fc.Args, err = ec.field_Entity_findUserByID_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { - ec.Error(ctx, err) - return fc, err - } return fc, nil } -func (ec *executionContext) _Product_upc(ctx context.Context, field graphql.CollectedField, obj *model.Product) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Product_upc(ctx, field) +func (ec *executionContext) _Query_user(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_user(ctx, field) if err != nil { return graphql.Null } @@ -1718,86 +3181,62 @@ func (ec *executionContext) _Product_upc(ctx context.Context, field graphql.Coll }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return obj.Upc, nil + return ec.resolvers.Query().User(rctx, fc.Args["id"].(string)) }) if err != nil { ec.Error(ctx, err) return graphql.Null } if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } return graphql.Null } - res := resTmp.(string) + res := resTmp.(*model.User) fc.Result = res - return ec.marshalNString2string(ctx, field.Selections, res) + return ec.marshalOUser2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐUser(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Product_upc(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Query_user(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "Product", + Object: "Query", Field: field, - IsMethod: false, - IsResolver: false, + IsMethod: true, + IsResolver: true, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type String does not have child fields") + switch field.Name { + case "id": + return ec.fieldContext_User_id(ctx, field) + case "username": + return ec.fieldContext_User_username(ctx, field) + case "nickname": + return ec.fieldContext_User_nickname(ctx, field) + case "history": + return ec.fieldContext_User_history(ctx, field) + case "realName": + return ec.fieldContext_User_realName(ctx, field) + case "greeting": + return ec.fieldContext_User_greeting(ctx, field) + case "customGreeting": + return ec.fieldContext_User_customGreeting(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type User", field.Name) }, } - return fc, nil -} - -func (ec *executionContext) _Purchase_product(ctx context.Context, field graphql.CollectedField, obj *model.Purchase) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Purchase_product(ctx, field) - if err != nil { - return graphql.Null - } - ctx = graphql.WithFieldContext(ctx, fc) defer func() { if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { - ctx = rctx // use context from middleware stack in children - return obj.Product, nil - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null - } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") + err = ec.Recover(ctx, r) + ec.Error(ctx, err) } - return graphql.Null - } - res := resTmp.(*model.Product) - fc.Result = res - return ec.marshalNProduct2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐProduct(ctx, field.Selections, res) -} - -func (ec *executionContext) fieldContext_Purchase_product(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { - fc = &graphql.FieldContext{ - Object: "Purchase", - Field: field, - IsMethod: false, - IsResolver: false, - Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - switch field.Name { - case "upc": - return ec.fieldContext_Product_upc(ctx, field) - } - return nil, fmt.Errorf("no field named %q was found under type Product", field.Name) - }, + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query_user_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err } return fc, nil } -func (ec *executionContext) _Purchase_wallet(ctx context.Context, field graphql.CollectedField, obj *model.Purchase) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Purchase_wallet(ctx, field) +func (ec *executionContext) _Query_userByIdAndName(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_userByIdAndName(ctx, field) if err != nil { return graphql.Null } @@ -1810,7 +3249,7 @@ func (ec *executionContext) _Purchase_wallet(ctx context.Context, field graphql. }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return obj.Wallet, nil + return ec.resolvers.Query().UserByIDAndName(rctx, fc.Args["id"].(string), fc.Args["username"].(string)) }) if err != nil { ec.Error(ctx, err) @@ -1819,26 +3258,53 @@ func (ec *executionContext) _Purchase_wallet(ctx context.Context, field graphql. if resTmp == nil { return graphql.Null } - res := resTmp.(model.Wallet) + res := resTmp.(*model.User) fc.Result = res - return ec.marshalOWallet2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐWallet(ctx, field.Selections, res) + return ec.marshalOUser2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐUser(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Purchase_wallet(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Query_userByIdAndName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "Purchase", + Object: "Query", Field: field, - IsMethod: false, - IsResolver: false, + IsMethod: true, + IsResolver: true, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("FieldContext.Child cannot be called on type INTERFACE") + switch field.Name { + case "id": + return ec.fieldContext_User_id(ctx, field) + case "username": + return ec.fieldContext_User_username(ctx, field) + case "nickname": + return ec.fieldContext_User_nickname(ctx, field) + case "history": + return ec.fieldContext_User_history(ctx, field) + case "realName": + return ec.fieldContext_User_realName(ctx, field) + case "greeting": + return ec.fieldContext_User_greeting(ctx, field) + case "customGreeting": + return ec.fieldContext_User_customGreeting(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type User", field.Name) }, } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query_userByIdAndName_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } return fc, nil } -func (ec *executionContext) _Purchase_quantity(ctx context.Context, field graphql.CollectedField, obj *model.Purchase) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Purchase_quantity(ctx, field) +func (ec *executionContext) _Query_meInterface(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_meInterface(ctx, field) if err != nil { return graphql.Null } @@ -1851,38 +3317,35 @@ func (ec *executionContext) _Purchase_quantity(ctx context.Context, field graphq }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return obj.Quantity, nil + return ec.resolvers.Query().MeInterface(rctx) }) if err != nil { ec.Error(ctx, err) return graphql.Null } if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } return graphql.Null } - res := resTmp.(int) + res := resTmp.(model.Identifiable) fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) + return ec.marshalOIdentifiable2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐIdentifiable(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Purchase_quantity(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Query_meInterface(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "Purchase", + Object: "Query", Field: field, - IsMethod: false, - IsResolver: false, + IsMethod: true, + IsResolver: true, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type Int does not have child fields") + return nil, errors.New("FieldContext.Child cannot be called on type INTERFACE") }, } return fc, nil } -func (ec *executionContext) _Query_me(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Query_me(ctx, field) +func (ec *executionContext) _Query_meUnion(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_meUnion(ctx, field) if err != nil { return graphql.Null } @@ -1895,7 +3358,7 @@ func (ec *executionContext) _Query_me(ctx context.Context, field graphql.Collect }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Query().Me(rctx) + return ec.resolvers.Query().MeUnion(rctx) }) if err != nil { ec.Error(ctx, err) @@ -1904,29 +3367,19 @@ func (ec *executionContext) _Query_me(ctx context.Context, field graphql.Collect if resTmp == nil { return graphql.Null } - res := resTmp.(*model.User) + res := resTmp.(model.MeUnion) fc.Result = res - return ec.marshalOUser2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐUser(ctx, field.Selections, res) + return ec.marshalOMeUnion2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐMeUnion(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Query_me(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Query_meUnion(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Query", Field: field, IsMethod: true, IsResolver: true, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - switch field.Name { - case "id": - return ec.fieldContext_User_id(ctx, field) - case "username": - return ec.fieldContext_User_username(ctx, field) - case "history": - return ec.fieldContext_User_history(ctx, field) - case "realName": - return ec.fieldContext_User_realName(ctx, field) - } - return nil, fmt.Errorf("no field named %q was found under type User", field.Name) + return nil, errors.New("field of type MeUnion does not have child fields") }, } return fc, nil @@ -2059,6 +3512,77 @@ func (ec *executionContext) fieldContext_Query_cat(_ context.Context, field grap return fc, nil } +func (ec *executionContext) _Query_cacheEntity(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_cacheEntity(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().CacheEntity(rctx, fc.Args["id"].(string)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*model.CacheEntity) + fc.Result = res + return ec.marshalNCacheEntity2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐCacheEntity(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query_cacheEntity(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "id": + return ec.fieldContext_CacheEntity_id(ctx, field) + case "a": + return ec.fieldContext_CacheEntity_a(ctx, field) + case "b": + return ec.fieldContext_CacheEntity_b(ctx, field) + case "c": + return ec.fieldContext_CacheEntity_c(ctx, field) + case "d": + return ec.fieldContext_CacheEntity_d(ctx, field) + case "e": + return ec.fieldContext_CacheEntity_e(ctx, field) + case "f": + return ec.fieldContext_CacheEntity_f(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type CacheEntity", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query_cacheEntity_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + func (ec *executionContext) _Query_interfaceUnion(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Query_interfaceUnion(ctx, field) if err != nil { @@ -3332,8 +4856,140 @@ func (ec *executionContext) fieldContext_SomeType3_someObject(_ context.Context, return fc, nil } -func (ec *executionContext) _TitleName_a(ctx context.Context, field graphql.CollectedField, obj *model.TitleName) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_TitleName_a(ctx, field) +func (ec *executionContext) _TitleName_a(ctx context.Context, field graphql.CollectedField, obj *model.TitleName) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_TitleName_a(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.A, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_TitleName_a(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "TitleName", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _TitleName_b(ctx context.Context, field graphql.CollectedField, obj *model.TitleName) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_TitleName_b(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.B, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_TitleName_b(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "TitleName", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _TitleName_c(ctx context.Context, field graphql.CollectedField, obj *model.TitleName) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_TitleName_c(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.C, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_TitleName_c(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "TitleName", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _TitleName_title(ctx context.Context, field graphql.CollectedField, obj *model.TitleName) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_TitleName_title(ctx, field) if err != nil { return graphql.Null } @@ -3346,7 +5002,7 @@ func (ec *executionContext) _TitleName_a(ctx context.Context, field graphql.Coll }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return obj.A, nil + return obj.Title, nil }) if err != nil { ec.Error(ctx, err) @@ -3363,7 +5019,7 @@ func (ec *executionContext) _TitleName_a(ctx context.Context, field graphql.Coll return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_TitleName_a(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_TitleName_title(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "TitleName", Field: field, @@ -3376,8 +5032,8 @@ func (ec *executionContext) fieldContext_TitleName_a(_ context.Context, field gr return fc, nil } -func (ec *executionContext) _TitleName_b(ctx context.Context, field graphql.CollectedField, obj *model.TitleName) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_TitleName_b(ctx, field) +func (ec *executionContext) _TitleName_name(ctx context.Context, field graphql.CollectedField, obj *model.TitleName) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_TitleName_name(ctx, field) if err != nil { return graphql.Null } @@ -3390,7 +5046,7 @@ func (ec *executionContext) _TitleName_b(ctx context.Context, field graphql.Coll }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return obj.B, nil + return obj.Name, nil }) if err != nil { ec.Error(ctx, err) @@ -3407,7 +5063,7 @@ func (ec *executionContext) _TitleName_b(ctx context.Context, field graphql.Coll return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_TitleName_b(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_TitleName_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "TitleName", Field: field, @@ -3420,8 +5076,8 @@ func (ec *executionContext) fieldContext_TitleName_b(_ context.Context, field gr return fc, nil } -func (ec *executionContext) _TitleName_c(ctx context.Context, field graphql.CollectedField, obj *model.TitleName) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_TitleName_c(ctx, field) +func (ec *executionContext) _User_id(ctx context.Context, field graphql.CollectedField, obj *model.User) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_User_id(ctx, field) if err != nil { return graphql.Null } @@ -3434,7 +5090,7 @@ func (ec *executionContext) _TitleName_c(ctx context.Context, field graphql.Coll }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return obj.C, nil + return obj.ID, nil }) if err != nil { ec.Error(ctx, err) @@ -3448,24 +5104,24 @@ func (ec *executionContext) _TitleName_c(ctx context.Context, field graphql.Coll } res := resTmp.(string) fc.Result = res - return ec.marshalNString2string(ctx, field.Selections, res) + return ec.marshalNID2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_TitleName_c(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_User_id(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "TitleName", + Object: "User", Field: field, IsMethod: false, IsResolver: false, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type String does not have child fields") + return nil, errors.New("field of type ID does not have child fields") }, } return fc, nil } -func (ec *executionContext) _TitleName_title(ctx context.Context, field graphql.CollectedField, obj *model.TitleName) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_TitleName_title(ctx, field) +func (ec *executionContext) _User_username(ctx context.Context, field graphql.CollectedField, obj *model.User) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_User_username(ctx, field) if err != nil { return graphql.Null } @@ -3478,7 +5134,7 @@ func (ec *executionContext) _TitleName_title(ctx context.Context, field graphql. }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return obj.Title, nil + return obj.Username, nil }) if err != nil { ec.Error(ctx, err) @@ -3495,9 +5151,9 @@ func (ec *executionContext) _TitleName_title(ctx context.Context, field graphql. return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_TitleName_title(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_User_username(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "TitleName", + Object: "User", Field: field, IsMethod: false, IsResolver: false, @@ -3508,8 +5164,8 @@ func (ec *executionContext) fieldContext_TitleName_title(_ context.Context, fiel return fc, nil } -func (ec *executionContext) _TitleName_name(ctx context.Context, field graphql.CollectedField, obj *model.TitleName) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_TitleName_name(ctx, field) +func (ec *executionContext) _User_nickname(ctx context.Context, field graphql.CollectedField, obj *model.User) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_User_nickname(ctx, field) if err != nil { return graphql.Null } @@ -3522,7 +5178,7 @@ func (ec *executionContext) _TitleName_name(ctx context.Context, field graphql.C }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return obj.Name, nil + return obj.Nickname, nil }) if err != nil { ec.Error(ctx, err) @@ -3539,9 +5195,9 @@ func (ec *executionContext) _TitleName_name(ctx context.Context, field graphql.C return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_TitleName_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_User_nickname(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "TitleName", + Object: "User", Field: field, IsMethod: false, IsResolver: false, @@ -3552,8 +5208,8 @@ func (ec *executionContext) fieldContext_TitleName_name(_ context.Context, field return fc, nil } -func (ec *executionContext) _User_id(ctx context.Context, field graphql.CollectedField, obj *model.User) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_User_id(ctx, field) +func (ec *executionContext) _User_history(ctx context.Context, field graphql.CollectedField, obj *model.User) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_User_history(ctx, field) if err != nil { return graphql.Null } @@ -3566,7 +5222,7 @@ func (ec *executionContext) _User_id(ctx context.Context, field graphql.Collecte }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return obj.ID, nil + return obj.History, nil }) if err != nil { ec.Error(ctx, err) @@ -3578,26 +5234,26 @@ func (ec *executionContext) _User_id(ctx context.Context, field graphql.Collecte } return graphql.Null } - res := resTmp.(string) + res := resTmp.([]model.History) fc.Result = res - return ec.marshalNID2string(ctx, field.Selections, res) + return ec.marshalNHistory2ᚕgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐHistoryᚄ(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_User_id(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_User_history(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "User", Field: field, IsMethod: false, IsResolver: false, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type ID does not have child fields") + return nil, errors.New("field of type History does not have child fields") }, } return fc, nil } -func (ec *executionContext) _User_username(ctx context.Context, field graphql.CollectedField, obj *model.User) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_User_username(ctx, field) +func (ec *executionContext) _User_realName(ctx context.Context, field graphql.CollectedField, obj *model.User) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_User_realName(ctx, field) if err != nil { return graphql.Null } @@ -3610,7 +5266,7 @@ func (ec *executionContext) _User_username(ctx context.Context, field graphql.Co }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return obj.Username, nil + return obj.RealName, nil }) if err != nil { ec.Error(ctx, err) @@ -3627,7 +5283,7 @@ func (ec *executionContext) _User_username(ctx context.Context, field graphql.Co return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_User_username(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_User_realName(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "User", Field: field, @@ -3640,8 +5296,8 @@ func (ec *executionContext) fieldContext_User_username(_ context.Context, field return fc, nil } -func (ec *executionContext) _User_history(ctx context.Context, field graphql.CollectedField, obj *model.User) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_User_history(ctx, field) +func (ec *executionContext) _User_greeting(ctx context.Context, field graphql.CollectedField, obj *model.User) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_User_greeting(ctx, field) if err != nil { return graphql.Null } @@ -3654,7 +5310,7 @@ func (ec *executionContext) _User_history(ctx context.Context, field graphql.Col }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return obj.History, nil + return ec.resolvers.User().Greeting(rctx, obj, fc.Args["style"].(string)) }) if err != nil { ec.Error(ctx, err) @@ -3666,26 +5322,37 @@ func (ec *executionContext) _User_history(ctx context.Context, field graphql.Col } return graphql.Null } - res := resTmp.([]model.History) + res := resTmp.(string) fc.Result = res - return ec.marshalNHistory2ᚕgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐHistoryᚄ(ctx, field.Selections, res) + return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_User_history(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_User_greeting(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "User", Field: field, - IsMethod: false, - IsResolver: false, + IsMethod: true, + IsResolver: true, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type History does not have child fields") + return nil, errors.New("field of type String does not have child fields") }, } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_User_greeting_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } return fc, nil } -func (ec *executionContext) _User_realName(ctx context.Context, field graphql.CollectedField, obj *model.User) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_User_realName(ctx, field) +func (ec *executionContext) _User_customGreeting(ctx context.Context, field graphql.CollectedField, obj *model.User) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_User_customGreeting(ctx, field) if err != nil { return graphql.Null } @@ -3698,7 +5365,7 @@ func (ec *executionContext) _User_realName(ctx context.Context, field graphql.Co }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return obj.RealName, nil + return ec.resolvers.User().CustomGreeting(rctx, obj, fc.Args["input"].(model.GreetingInput)) }) if err != nil { ec.Error(ctx, err) @@ -3715,16 +5382,27 @@ func (ec *executionContext) _User_realName(ctx context.Context, field graphql.Co return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_User_realName(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_User_customGreeting(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "User", Field: field, - IsMethod: false, - IsResolver: false, + IsMethod: true, + IsResolver: true, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { return nil, errors.New("field of type String does not have child fields") }, } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_User_customGreeting_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } return fc, nil } @@ -5984,6 +7662,74 @@ func (ec *executionContext) fieldContext___Type_isOneOf(_ context.Context, field // region **************************** input.gotpl ***************************** +func (ec *executionContext) unmarshalInputGreetingFormatting(ctx context.Context, obj any) (model.GreetingFormatting, error) { + var it model.GreetingFormatting + asMap := map[string]any{} + for k, v := range obj.(map[string]any) { + asMap[k] = v + } + + fieldsInOrder := [...]string{"uppercase", "prefix"} + for _, k := range fieldsInOrder { + v, ok := asMap[k] + if !ok { + continue + } + switch k { + case "uppercase": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("uppercase")) + data, err := ec.unmarshalOBoolean2ᚖbool(ctx, v) + if err != nil { + return it, err + } + it.Uppercase = data + case "prefix": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("prefix")) + data, err := ec.unmarshalOString2ᚖstring(ctx, v) + if err != nil { + return it, err + } + it.Prefix = data + } + } + + return it, nil +} + +func (ec *executionContext) unmarshalInputGreetingInput(ctx context.Context, obj any) (model.GreetingInput, error) { + var it model.GreetingInput + asMap := map[string]any{} + for k, v := range obj.(map[string]any) { + asMap[k] = v + } + + fieldsInOrder := [...]string{"style", "formatting"} + for _, k := range fieldsInOrder { + v, ok := asMap[k] + if !ok { + continue + } + switch k { + case "style": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("style")) + data, err := ec.unmarshalNGreetingStyle2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐGreetingStyle(ctx, v) + if err != nil { + return it, err + } + it.Style = data + case "formatting": + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("formatting")) + data, err := ec.unmarshalOGreetingFormatting2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐGreetingFormatting(ctx, v) + if err != nil { + return it, err + } + it.Formatting = data + } + } + + return it, nil +} + // endregion **************************** input.gotpl ***************************** // region ************************** interface.gotpl *************************** @@ -6114,6 +7860,13 @@ func (ec *executionContext) _Identifiable(ctx context.Context, sel ast.Selection return graphql.Null } return ec._User(ctx, sel, obj) + case model.Admin: + return ec._Admin(ctx, sel, &obj) + case *model.Admin: + if obj == nil { + return graphql.Null + } + return ec._Admin(ctx, sel, obj) default: panic(fmt.Errorf("unexpected type %T", obj)) } @@ -6135,6 +7888,29 @@ func (ec *executionContext) _Info(ctx context.Context, sel ast.SelectionSet, obj } } +func (ec *executionContext) _MeUnion(ctx context.Context, sel ast.SelectionSet, obj model.MeUnion) graphql.Marshaler { + switch obj := (obj).(type) { + case nil: + return graphql.Null + case model.User: + return ec._User(ctx, sel, &obj) + case *model.User: + if obj == nil { + return graphql.Null + } + return ec._User(ctx, sel, obj) + case model.Admin: + return ec._Admin(ctx, sel, &obj) + case *model.Admin: + if obj == nil { + return graphql.Null + } + return ec._Admin(ctx, sel, obj) + default: + panic(fmt.Errorf("unexpected type %T", obj)) + } +} + func (ec *executionContext) _Name(ctx context.Context, sel ast.SelectionSet, obj model.Name) graphql.Marshaler { switch obj := (obj).(type) { case nil: @@ -6316,6 +8092,13 @@ func (ec *executionContext) __Entity(ctx context.Context, sel ast.SelectionSet, return graphql.Null } return ec._User(ctx, sel, obj) + case model.Admin: + return ec._Admin(ctx, sel, &obj) + case *model.Admin: + if obj == nil { + return graphql.Null + } + return ec._Admin(ctx, sel, obj) case model.Product: return ec._Product(ctx, sel, &obj) case *model.Product: @@ -6323,6 +8106,13 @@ func (ec *executionContext) __Entity(ctx context.Context, sel ast.SelectionSet, return graphql.Null } return ec._Product(ctx, sel, obj) + case model.CacheEntity: + return ec._CacheEntity(ctx, sel, &obj) + case *model.CacheEntity: + if obj == nil { + return graphql.Null + } + return ec._CacheEntity(ctx, sel, obj) default: panic(fmt.Errorf("unexpected type %T", obj)) } @@ -6371,6 +8161,55 @@ func (ec *executionContext) _A(ctx context.Context, sel ast.SelectionSet, obj *m return out } +var adminImplementors = []string{"Admin", "Identifiable", "MeUnion", "_Entity"} + +func (ec *executionContext) _Admin(ctx context.Context, sel ast.SelectionSet, obj *model.Admin) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, adminImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("Admin") + case "id": + out.Values[i] = ec._Admin_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "username": + out.Values[i] = ec._Admin_username(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "role": + out.Values[i] = ec._Admin_role(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var bImplementors = []string{"B", "AB", "Namer"} func (ec *executionContext) _B(ctx context.Context, sel ast.SelectionSet, obj *model.B) graphql.Marshaler { @@ -6495,6 +8334,75 @@ func (ec *executionContext) _CDerObj(ctx context.Context, sel ast.SelectionSet, return out } +var cacheEntityImplementors = []string{"CacheEntity", "_Entity"} + +func (ec *executionContext) _CacheEntity(ctx context.Context, sel ast.SelectionSet, obj *model.CacheEntity) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, cacheEntityImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("CacheEntity") + case "id": + out.Values[i] = ec._CacheEntity_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "a": + out.Values[i] = ec._CacheEntity_a(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "b": + out.Values[i] = ec._CacheEntity_b(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "c": + out.Values[i] = ec._CacheEntity_c(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "d": + out.Values[i] = ec._CacheEntity_d(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "e": + out.Values[i] = ec._CacheEntity_e(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "f": + out.Values[i] = ec._CacheEntity_f(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var catImplementors = []string{"Cat"} func (ec *executionContext) _Cat(ctx context.Context, sel ast.SelectionSet, obj *model.Cat) graphql.Marshaler { @@ -6667,6 +8575,50 @@ func (ec *executionContext) _Entity(ctx context.Context, sel ast.SelectionSet) g switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("Entity") + case "findAdminByID": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Entity_findAdminByID(ctx, field) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "findCacheEntityByID": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Entity_findCacheEntityByID(ctx, field) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "findUserByID": field := field @@ -6712,6 +8664,55 @@ func (ec *executionContext) _Entity(ctx context.Context, sel ast.SelectionSet) g return out } +var mutationImplementors = []string{"Mutation"} + +func (ec *executionContext) _Mutation(ctx context.Context, sel ast.SelectionSet) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, mutationImplementors) + ctx = graphql.WithFieldContext(ctx, &graphql.FieldContext{ + Object: "Mutation", + }) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + innerCtx := graphql.WithRootFieldContext(ctx, &graphql.RootFieldContext{ + Object: field.Name, + Field: field, + }) + + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("Mutation") + case "updateUsername": + out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) { + return ec._Mutation_updateUsername(ctx, field) + }) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var productImplementors = []string{"Product", "_Entity"} func (ec *executionContext) _Product(ctx context.Context, sel ast.SelectionSet, obj *model.Product) graphql.Marshaler { @@ -6834,6 +8835,82 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "user": + field := field + + innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_user(ctx, field) + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "userByIdAndName": + field := field + + innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_userByIdAndName(ctx, field) + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "meInterface": + field := field + + innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_meInterface(ctx, field) + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "meUnion": + field := field + + innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_meUnion(ctx, field) + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "identifiable": field := field @@ -6891,6 +8968,28 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "cacheEntity": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_cacheEntity(ctx, field) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "interfaceUnion": field := field @@ -7457,7 +9556,7 @@ func (ec *executionContext) _TitleName(ctx context.Context, sel ast.SelectionSet return out } -var userImplementors = []string{"User", "Identifiable", "_Entity"} +var userImplementors = []string{"User", "Identifiable", "MeUnion", "_Entity"} func (ec *executionContext) _User(ctx context.Context, sel ast.SelectionSet, obj *model.User) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, userImplementors) @@ -7471,23 +9570,100 @@ func (ec *executionContext) _User(ctx context.Context, sel ast.SelectionSet, obj case "id": out.Values[i] = ec._User_id(ctx, field, obj) if out.Values[i] == graphql.Null { - out.Invalids++ + atomic.AddUint32(&out.Invalids, 1) } case "username": out.Values[i] = ec._User_username(ctx, field, obj) if out.Values[i] == graphql.Null { - out.Invalids++ + atomic.AddUint32(&out.Invalids, 1) + } + case "nickname": + out.Values[i] = ec._User_nickname(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&out.Invalids, 1) } case "history": out.Values[i] = ec._User_history(ctx, field, obj) if out.Values[i] == graphql.Null { - out.Invalids++ + atomic.AddUint32(&out.Invalids, 1) } case "realName": out.Values[i] = ec._User_realName(ctx, field, obj) if out.Values[i] == graphql.Null { - out.Invalids++ + atomic.AddUint32(&out.Invalids, 1) + } + case "greeting": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._User_greeting(ctx, field, obj) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "customGreeting": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._User_customGreeting(ctx, field, obj) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) default: panic("unknown field " + strconv.Quote(field.Name)) } @@ -7980,6 +10156,20 @@ func (ec *executionContext) ___Type(ctx context.Context, sel ast.SelectionSet, o // region ***************************** type.gotpl ***************************** +func (ec *executionContext) marshalNAdmin2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐAdmin(ctx context.Context, sel ast.SelectionSet, v model.Admin) graphql.Marshaler { + return ec._Admin(ctx, sel, &v) +} + +func (ec *executionContext) marshalNAdmin2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐAdmin(ctx context.Context, sel ast.SelectionSet, v *model.Admin) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._Admin(ctx, sel, v) +} + func (ec *executionContext) unmarshalNBoolean2bool(ctx context.Context, v any) (bool, error) { res, err := graphql.UnmarshalBoolean(v) return res, graphql.ErrorOnPath(ctx, err) @@ -7996,6 +10186,20 @@ func (ec *executionContext) marshalNBoolean2bool(ctx context.Context, sel ast.Se return res } +func (ec *executionContext) marshalNCacheEntity2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐCacheEntity(ctx context.Context, sel ast.SelectionSet, v model.CacheEntity) graphql.Marshaler { + return ec._CacheEntity(ctx, sel, &v) +} + +func (ec *executionContext) marshalNCacheEntity2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐCacheEntity(ctx context.Context, sel ast.SelectionSet, v *model.CacheEntity) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._CacheEntity(ctx, sel, v) +} + func (ec *executionContext) unmarshalNFloat2float64(ctx context.Context, v any) (float64, error) { res, err := graphql.UnmarshalFloatContext(ctx, v) return res, graphql.ErrorOnPath(ctx, err) @@ -8012,6 +10216,21 @@ func (ec *executionContext) marshalNFloat2float64(ctx context.Context, sel ast.S return graphql.WrapContextMarshaler(ctx, res) } +func (ec *executionContext) unmarshalNGreetingInput2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐGreetingInput(ctx context.Context, v any) (model.GreetingInput, error) { + res, err := ec.unmarshalInputGreetingInput(ctx, v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) unmarshalNGreetingStyle2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐGreetingStyle(ctx context.Context, v any) (model.GreetingStyle, error) { + var res model.GreetingStyle + err := res.UnmarshalGQL(v) + return res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalNGreetingStyle2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐGreetingStyle(ctx context.Context, sel ast.SelectionSet, v model.GreetingStyle) graphql.Marshaler { + return v +} + func (ec *executionContext) marshalNHistory2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐHistory(ctx context.Context, sel ast.SelectionSet, v model.History) graphql.Marshaler { if v == nil { if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { @@ -8708,6 +10927,14 @@ func (ec *executionContext) marshalOCat2ᚖgithubᚗcomᚋwundergraphᚋgraphql return ec._Cat(ctx, sel, v) } +func (ec *executionContext) unmarshalOGreetingFormatting2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐGreetingFormatting(ctx context.Context, v any) (*model.GreetingFormatting, error) { + if v == nil { + return nil, nil + } + res, err := ec.unmarshalInputGreetingFormatting(ctx, v) + return &res, graphql.ErrorOnPath(ctx, err) +} + func (ec *executionContext) marshalOHistory2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐHistory(ctx context.Context, sel ast.SelectionSet, v model.History) graphql.Marshaler { if v == nil { return graphql.Null @@ -8763,6 +10990,13 @@ func (ec *executionContext) marshalOIdentifiable2githubᚗcomᚋwundergraphᚋgr return ec._Identifiable(ctx, sel, v) } +func (ec *executionContext) marshalOMeUnion2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐMeUnion(ctx context.Context, sel ast.SelectionSet, v model.MeUnion) graphql.Marshaler { + if v == nil { + return graphql.Null + } + return ec._MeUnion(ctx, sel, v) +} + func (ec *executionContext) marshalOSomeInterface2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋaccountsᚋgraphᚋmodelᚐSomeInterface(ctx context.Context, sel ast.SelectionSet, v model.SomeInterface) graphql.Marshaler { if v == nil { return graphql.Null diff --git a/execution/federationtesting/accounts/graph/handler.go b/execution/federationtesting/accounts/graph/handler.go index b48a93acac..f415da9ce9 100644 --- a/execution/federationtesting/accounts/graph/handler.go +++ b/execution/federationtesting/accounts/graph/handler.go @@ -20,7 +20,7 @@ var TestOptions = EndpointOptions{ } func GraphQLEndpointHandler(opts EndpointOptions) http.Handler { - srv := handler.New(generated.NewExecutableSchema(generated.Config{Resolvers: &Resolver{}})) + srv := handler.New(generated.NewExecutableSchema(generated.Config{Resolvers: NewResolver()})) srv.AddTransport(transport.POST{}) srv.Use(extension.Introspection{}) if opts.EnableDebug { diff --git a/execution/federationtesting/accounts/graph/model/models_gen.go b/execution/federationtesting/accounts/graph/model/models_gen.go index 43e7569711..61cd0c8528 100644 --- a/execution/federationtesting/accounts/graph/model/models_gen.go +++ b/execution/federationtesting/accounts/graph/model/models_gen.go @@ -41,6 +41,10 @@ type Info interface { GetQuantity() int } +type MeUnion interface { + IsMeUnion() +} + type Name interface { IsName() GetName() string @@ -92,6 +96,19 @@ func (A) IsAb() {} func (A) IsNamer() {} func (this A) GetName() string { return this.Name } +type Admin struct { + ID string `json:"id"` + Username string `json:"username"` + Role string `json:"role"` +} + +func (Admin) IsIdentifiable() {} +func (this Admin) GetID() string { return this.ID } + +func (Admin) IsMeUnion() {} + +func (Admin) IsEntity() {} + type B struct { Name string `json:"name"` } @@ -116,6 +133,18 @@ type CDerObj struct { Last string `json:"last"` } +type CacheEntity struct { + ID string `json:"id"` + A string `json:"a"` + B string `json:"b"` + C string `json:"c"` + D string `json:"d"` + E string `json:"e"` + F string `json:"f"` +} + +func (CacheEntity) IsEntity() {} + type Cat struct { Name string `json:"name"` } @@ -143,6 +172,19 @@ func (D) IsCd() {} func (D) IsCDer() {} func (this D) GetName() *CDerObj { return this.Name } +type GreetingFormatting struct { + Uppercase *bool `json:"uppercase,omitempty"` + Prefix *string `json:"prefix,omitempty"` +} + +type GreetingInput struct { + Style GreetingStyle `json:"style"` + Formatting *GreetingFormatting `json:"formatting,omitempty"` +} + +type Mutation struct { +} + type Product struct { Upc string `json:"upc"` } @@ -280,15 +322,20 @@ func (TitleName) IsName() {} func (this TitleName) GetName() string { return this.Name } type User struct { - ID string `json:"id"` - Username string `json:"username"` - History []History `json:"history"` - RealName string `json:"realName"` + ID string `json:"id"` + Username string `json:"username"` + Nickname string `json:"nickname"` + History []History `json:"history"` + RealName string `json:"realName"` + Greeting string `json:"greeting"` + CustomGreeting string `json:"customGreeting"` } func (User) IsIdentifiable() {} func (this User) GetID() string { return this.ID } +func (User) IsMeUnion() {} + func (User) IsEntity() {} type WalletType1 struct { @@ -311,6 +358,63 @@ func (WalletType2) IsWallet() {} func (this WalletType2) GetCurrency() string { return this.Currency } func (this WalletType2) GetAmount() float64 { return this.Amount } +type GreetingStyle string + +const ( + GreetingStyleFormal GreetingStyle = "FORMAL" + GreetingStyleCasual GreetingStyle = "CASUAL" + GreetingStyleShort GreetingStyle = "SHORT" +) + +var AllGreetingStyle = []GreetingStyle{ + GreetingStyleFormal, + GreetingStyleCasual, + GreetingStyleShort, +} + +func (e GreetingStyle) IsValid() bool { + switch e { + case GreetingStyleFormal, GreetingStyleCasual, GreetingStyleShort: + return true + } + return false +} + +func (e GreetingStyle) String() string { + return string(e) +} + +func (e *GreetingStyle) UnmarshalGQL(v any) error { + str, ok := v.(string) + if !ok { + return fmt.Errorf("enums must be strings") + } + + *e = GreetingStyle(str) + if !e.IsValid() { + return fmt.Errorf("%s is not a valid GreetingStyle", str) + } + return nil +} + +func (e GreetingStyle) MarshalGQL(w io.Writer) { + fmt.Fprint(w, strconv.Quote(e.String())) +} + +func (e *GreetingStyle) UnmarshalJSON(b []byte) error { + s, err := strconv.Unquote(string(b)) + if err != nil { + return err + } + return e.UnmarshalGQL(s) +} + +func (e GreetingStyle) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + e.MarshalGQL(&buf) + return buf.Bytes(), nil +} + type Which string const ( diff --git a/execution/federationtesting/accounts/graph/resolver.go b/execution/federationtesting/accounts/graph/resolver.go index 278fb7db60..c21db2e8cb 100644 --- a/execution/federationtesting/accounts/graph/resolver.go +++ b/execution/federationtesting/accounts/graph/resolver.go @@ -3,4 +3,33 @@ // It serves as dependency injection for your app, add any dependencies you require here. package graph -type Resolver struct{} +import "sync" + +type Resolver struct { + usersMu sync.RWMutex + users map[string]string +} + +func NewResolver() *Resolver { + return &Resolver{ + users: map[string]string{ + "1234": "Me", + "7777": "User 7777", + }, + } +} + +func (r *Resolver) GetUsername(id string) string { + r.usersMu.RLock() + defer r.usersMu.RUnlock() + if name, ok := r.users[id]; ok { + return name + } + return "User " + id +} + +func (r *Resolver) SetUsername(id, newUsername string) { + r.usersMu.Lock() + defer r.usersMu.Unlock() + r.users[id] = newUsername +} diff --git a/execution/federationtesting/accounts/graph/schema.graphqls b/execution/federationtesting/accounts/graph/schema.graphqls index 1f8806c71a..be82ebc0d9 100644 --- a/execution/federationtesting/accounts/graph/schema.graphqls +++ b/execution/federationtesting/accounts/graph/schema.graphqls @@ -1,9 +1,16 @@ type Query { me: User + user(id: ID!): User + userByIdAndName(id: ID!, username: String!): User + meInterface: Identifiable + meUnion: MeUnion identifiable: Identifiable histories: [History] cat: Cat + # L1 cache union optimization testing + cacheEntity(id: ID!): CacheEntity! + # merge data test cases interfaceUnion(which: Which! = A): AB abstractList: [AbstractListItem] @@ -13,6 +20,10 @@ type Query { someNestedInterfaces: [SomeNestedInterface] } +type Mutation { + updateUsername(id: ID!, newUsername: String!): User! +} + type Cat { name: String! } @@ -21,11 +32,30 @@ interface Identifiable { id: ID! } +enum GreetingStyle { + FORMAL + CASUAL + SHORT +} + +input GreetingFormatting { + uppercase: Boolean + prefix: String +} + +input GreetingInput { + style: GreetingStyle! + formatting: GreetingFormatting +} + type User implements Identifiable @key(fields: "id") { id: ID! username: String! + nickname: String! history: [History!]! realName: String! + greeting(style: String!): String! + customGreeting(input: GreetingInput!): String! } type Product @key(fields: "upc") { @@ -181,4 +211,29 @@ type CDerObj { first: String! middle: String! last: String! -} \ No newline at end of file +} + +# CacheEntity is a self-referential entity designed for L1 cache testing. +# It has many scalar fields (a-f) so tests can select different field subsets +# at each tree level, creating entity fetches with different ProvidesData. +# The `nested` field (defined in reviews subgraph) returns the same entity, +# enabling arbitrary-depth sequential entity fetch chains for the same key. +type CacheEntity @key(fields: "id") { + id: ID! + a: String! + b: String! + c: String! + d: String! + e: String! + f: String! +} + +# Admin is another entity that implements Identifiable for testing interface/union caching +type Admin implements Identifiable @key(fields: "id") { + id: ID! + username: String! + role: String! +} + +# Union containing entity types for testing union field caching +union MeUnion = User | Admin \ No newline at end of file diff --git a/execution/federationtesting/accounts/graph/schema.resolvers.go b/execution/federationtesting/accounts/graph/schema.resolvers.go index 1b56e64752..4cc32f2b42 100644 --- a/execution/federationtesting/accounts/graph/schema.resolvers.go +++ b/execution/federationtesting/accounts/graph/schema.resolvers.go @@ -7,28 +7,87 @@ package graph import ( "context" "fmt" + "strings" "github.com/wundergraph/graphql-go-tools/execution/federationtesting/accounts/graph/generated" "github.com/wundergraph/graphql-go-tools/execution/federationtesting/accounts/graph/model" ) +// UpdateUsername is the resolver for the updateUsername field. +func (r *mutationResolver) UpdateUsername(ctx context.Context, id string, newUsername string) (*model.User, error) { + r.SetUsername(id, newUsername) + return &model.User{ + ID: id, + Username: newUsername, + Nickname: "nick-" + newUsername, + RealName: "Real " + newUsername, + }, nil +} + // Me is the resolver for the me field. func (r *queryResolver) Me(ctx context.Context) (*model.User, error) { return &model.User{ ID: "1234", - Username: "Me", + Username: r.GetUsername("1234"), + Nickname: "nick-Me", History: histories, RealName: "User Usington", }, nil } +// User is the resolver for the user field. +func (r *queryResolver) User(ctx context.Context, id string) (*model.User, error) { + name := r.GetUsername(id) + return &model.User{ + ID: id, + Username: name, + Nickname: "nick-" + name, + RealName: "Real " + name, + }, nil +} + +// UserByIDAndName is the resolver for the userByIdAndName field. +func (r *queryResolver) UserByIDAndName(ctx context.Context, id string, username string) (*model.User, error) { + return &model.User{ + ID: id, + Username: username, + Nickname: "nick-" + username, + }, nil +} + +// MeInterface is the resolver for the meInterface field. +func (r *queryResolver) MeInterface(ctx context.Context) (model.Identifiable, error) { + username := r.GetUsername("1234") + return &model.User{ + ID: "1234", + Username: username, + Nickname: "nick-" + username, + History: histories, + RealName: "Real " + username, + }, nil +} + +// MeUnion is the resolver for the meUnion field. +func (r *queryResolver) MeUnion(ctx context.Context) (model.MeUnion, error) { + username := r.GetUsername("1234") + return &model.User{ + ID: "1234", + Username: username, + Nickname: "nick-" + username, + History: histories, + RealName: "Real " + username, + }, nil +} + // Identifiable is the resolver for the identifiable field. func (r *queryResolver) Identifiable(ctx context.Context) (model.Identifiable, error) { + username := r.GetUsername("1234") return &model.User{ ID: "1234", - Username: "Me", + Username: username, + Nickname: "nick-" + username, History: histories, - RealName: "User Usington", + RealName: "Real " + username, }, nil } @@ -44,6 +103,19 @@ func (r *queryResolver) Cat(ctx context.Context) (*model.Cat, error) { }, nil } +// CacheEntity is the resolver for the cacheEntity field. +func (r *queryResolver) CacheEntity(ctx context.Context, id string) (*model.CacheEntity, error) { + return &model.CacheEntity{ + ID: id, + A: "a-" + id, + B: "b-" + id, + C: "c-" + id, + D: "d-" + id, + E: "e-" + id, + F: "f-" + id, + }, nil +} + // InterfaceUnion is the resolver for the interfaceUnion field. func (r *queryResolver) InterfaceUnion(ctx context.Context, which model.Which) (model.Ab, error) { switch which { @@ -211,7 +283,59 @@ func (r *queryResolver) SomeNestedInterfaces(ctx context.Context) ([]model.SomeN }, nil } +// Greeting is the resolver for the greeting field. +func (r *userResolver) Greeting(ctx context.Context, obj *model.User, style string) (string, error) { + name := obj.Username + if name == "" { + name = r.GetUsername(obj.ID) + } + switch style { + case "formal": + return "Good day, " + name, nil + case "casual": + return "Hey, " + name + "!", nil + case "short": + return "Hi " + name, nil + default: + return "Hello, " + name, nil + } +} + +// CustomGreeting is the resolver for the customGreeting field. +func (r *userResolver) CustomGreeting(ctx context.Context, obj *model.User, input model.GreetingInput) (string, error) { + name := obj.Username + if name == "" { + name = r.GetUsername(obj.ID) + } + var greeting string + switch input.Style { + case model.GreetingStyleFormal: + greeting = "Good day, " + name + case model.GreetingStyleCasual: + greeting = "Hey, " + name + "!" + case model.GreetingStyleShort: + greeting = "Hi " + name + } + if input.Formatting != nil { + if input.Formatting.Prefix != nil && *input.Formatting.Prefix != "" { + greeting = *input.Formatting.Prefix + " " + greeting + } + if input.Formatting.Uppercase != nil && *input.Formatting.Uppercase { + greeting = strings.ToUpper(greeting) + } + } + return greeting, nil +} + +// Mutation returns generated.MutationResolver implementation. +func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResolver{r} } + // Query returns generated.QueryResolver implementation. func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} } +// User returns generated.UserResolver implementation. +func (r *Resolver) User() generated.UserResolver { return &userResolver{r} } + +type mutationResolver struct{ *Resolver } type queryResolver struct{ *Resolver } +type userResolver struct{ *Resolver } diff --git a/execution/federationtesting/gateway/gateway.go b/execution/federationtesting/gateway/gateway.go index ffc62eb7d9..1b6945c55e 100644 --- a/execution/federationtesting/gateway/gateway.go +++ b/execution/federationtesting/gateway/gateway.go @@ -12,6 +12,9 @@ import ( "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" ) +// GatewayOption is a function that configures a Gateway +type GatewayOption func(*Gateway) + type DataSourceObserver interface { UpdateDataSources(subgraphsConfigs []engine.SubgraphConfiguration) } @@ -34,22 +37,35 @@ func NewGateway( gqlHandlerFactory HandlerFactory, httpClient *http.Client, logger log.Logger, + loaderCaches map[string]resolve.LoaderCache, + opts ...GatewayOption, ) *Gateway { - return &Gateway{ + g := &Gateway{ gqlHandlerFactory: gqlHandlerFactory, httpClient: httpClient, logger: logger, + loaderCaches: loaderCaches, mu: &sync.Mutex{}, readyCh: make(chan struct{}), readyOnce: &sync.Once{}, } + + for _, opt := range opts { + opt(g) + } + + return g } type Gateway struct { - gqlHandlerFactory HandlerFactory - httpClient *http.Client - logger log.Logger + gqlHandlerFactory HandlerFactory + httpClient *http.Client + logger log.Logger + loaderCaches map[string]resolve.LoaderCache + subgraphEntityCachingConfigs engine.SubgraphCachingConfigs + resolverOptionsFns []func(*resolve.ResolverOptions) // Applied to ResolverOptions before creating the engine + remapVariables map[string]string gqlHandler http.Handler mu *sync.Mutex @@ -58,6 +74,52 @@ type Gateway struct { readyOnce *sync.Once } +// WithSubgraphEntityCachingConfigs configures per-subgraph entity caching for the gateway +func WithSubgraphEntityCachingConfigs(configs engine.SubgraphCachingConfigs) GatewayOption { + return func(g *Gateway) { + g.subgraphEntityCachingConfigs = configs + } +} + +func WithRemapVariables(remap map[string]string) GatewayOption { + return func(g *Gateway) { + g.remapVariables = remap + } +} + +// WithResolverOptions adds a function that customizes ResolverOptions before the engine is created. +// Multiple functions are applied in order. +func WithResolverOptions(fn func(*resolve.ResolverOptions)) GatewayOption { + return func(g *Gateway) { + g.resolverOptionsFns = append(g.resolverOptionsFns, fn) + } +} + +// buildEntityCacheConfigs converts SubgraphCachingConfigs into the runtime lookup map +// needed by the resolver for extensions-based cache invalidation. +// Only EntityCaching entries are processed — RootFieldCaching uses a different key format +// and is not eligible for extensions-based invalidation. +func buildEntityCacheConfigs(configs engine.SubgraphCachingConfigs) map[string]map[string]*resolve.EntityCacheInvalidationConfig { + if len(configs) == 0 { + return nil + } + result := make(map[string]map[string]*resolve.EntityCacheInvalidationConfig, len(configs)) + for _, sc := range configs { + if len(sc.EntityCaching) == 0 { + continue + } + entityMap := make(map[string]*resolve.EntityCacheInvalidationConfig, len(sc.EntityCaching)) + for _, ec := range sc.EntityCaching { + entityMap[ec.TypeName] = &resolve.EntityCacheInvalidationConfig{ + CacheName: ec.CacheName, + IncludeSubgraphHeaderPrefix: ec.IncludeSubgraphHeaderPrefix, + } + } + result[sc.SubgraphName] = entityMap + } + return result +} + func (g *Gateway) ServeHTTP(w http.ResponseWriter, r *http.Request) { g.mu.Lock() handler := g.gqlHandler @@ -72,7 +134,15 @@ func (g *Gateway) Ready() { func (g *Gateway) UpdateDataSources(subgraphsConfigs []engine.SubgraphConfiguration) { ctx := context.Background() - engineConfigFactory := engine.NewFederationEngineConfigFactory(ctx, subgraphsConfigs, engine.WithFederationHttpClient(g.httpClient)) + + opts := []engine.FederationEngineConfigFactoryOption{ + engine.WithFederationHttpClient(g.httpClient), + } + if len(g.subgraphEntityCachingConfigs) > 0 { + opts = append(opts, engine.WithSubgraphEntityCachingConfigs(g.subgraphEntityCachingConfigs)) + } + + engineConfigFactory := engine.NewFederationEngineConfigFactory(ctx, subgraphsConfigs, opts...) engineConfig, err := engineConfigFactory.BuildEngineConfiguration() if err != nil { @@ -80,9 +150,15 @@ func (g *Gateway) UpdateDataSources(subgraphsConfigs []engine.SubgraphConfigurat return } - executionEngine, err := engine.NewExecutionEngine(ctx, g.logger, engineConfig, resolve.ResolverOptions{ - MaxConcurrency: 1024, - }) + resolverOpts := resolve.ResolverOptions{ + MaxConcurrency: 1024, + Caches: g.loaderCaches, + EntityCacheConfigs: buildEntityCacheConfigs(g.subgraphEntityCachingConfigs), + } + for _, fn := range g.resolverOptionsFns { + fn(&resolverOpts) + } + executionEngine, err := engine.NewExecutionEngine(ctx, g.logger, engineConfig, resolverOpts) if err != nil { g.logger.Error("create engine: %v", log.Error(err)) return diff --git a/execution/federationtesting/gateway/http/handler.go b/execution/federationtesting/gateway/http/handler.go index e6d575cd7a..e4e52b9feb 100644 --- a/execution/federationtesting/gateway/http/handler.go +++ b/execution/federationtesting/gateway/http/handler.go @@ -8,6 +8,7 @@ import ( "github.com/wundergraph/graphql-go-tools/execution/engine" "github.com/wundergraph/graphql-go-tools/execution/graphql" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" ) const ( @@ -20,22 +21,34 @@ func NewGraphqlHTTPHandler( upgrader *ws.HTTPUpgrader, logger log.Logger, enableART bool, + subgraphHeadersBuilder resolve.SubgraphHeadersBuilder, + cachingOptions resolve.CachingOptions, + debugMode bool, + remapVariables map[string]string, ) http.Handler { return &GraphQLHTTPRequestHandler{ - schema: schema, - engine: engine, - wsUpgrader: upgrader, - log: logger, - enableART: enableART, + schema: schema, + engine: engine, + wsUpgrader: upgrader, + log: logger, + enableART: enableART, + subgraphHeadersBuilder: subgraphHeadersBuilder, + cachingOptions: cachingOptions, + debugMode: debugMode, + remapVariables: remapVariables, } } type GraphQLHTTPRequestHandler struct { - log log.Logger - wsUpgrader *ws.HTTPUpgrader - engine *engine.ExecutionEngine - schema *graphql.Schema - enableART bool + log log.Logger + wsUpgrader *ws.HTTPUpgrader + engine *engine.ExecutionEngine + schema *graphql.Schema + enableART bool + subgraphHeadersBuilder resolve.SubgraphHeadersBuilder + cachingOptions resolve.CachingOptions + debugMode bool + remapVariables map[string]string } func (g *GraphQLHTTPRequestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { diff --git a/execution/federationtesting/gateway/http/http.go b/execution/federationtesting/gateway/http/http.go index 5a255e01c9..5df39d1e7b 100644 --- a/execution/federationtesting/gateway/http/http.go +++ b/execution/federationtesting/gateway/http/http.go @@ -3,6 +3,7 @@ package http import ( "bytes" + "encoding/json" "net/http" log "github.com/jensneuse/abstractlogger" @@ -45,6 +46,27 @@ func (g *GraphQLHTTPRequestHandler) handleHTTP(w http.ResponseWriter, r *http.Re opts = append(opts, engine.WithRequestTraceOptions(tracingOpts)) } + if g.subgraphHeadersBuilder != nil { + opts = append(opts, engine.WithSubgraphHeadersBuilder(g.subgraphHeadersBuilder)) + } + + // Add caching options if L1 or L2 cache is enabled + if g.cachingOptions.EnableL1Cache || g.cachingOptions.EnableL2Cache { + opts = append(opts, engine.WithCachingOptions(g.cachingOptions)) + } + + if g.debugMode { + opts = append(opts, engine.WithDebugMode()) + } + + if len(g.remapVariables) > 0 { + opts = append(opts, engine.WithRemapVariables(g.remapVariables)) + } + + // Capture cache stats for debugging/testing + var cacheStats resolve.CacheAnalyticsSnapshot + opts = append(opts, engine.WithCacheStatsOutput(&cacheStats)) + buf := bytes.NewBuffer(make([]byte, 0, 4096)) resultWriter := graphql.NewEngineResultWriterFromBuffer(buf) if err = g.engine.Execute(r.Context(), &gqlRequest, &resultWriter, opts...); err != nil { @@ -54,6 +76,14 @@ func (g *GraphQLHTTPRequestHandler) handleHTTP(w http.ResponseWriter, r *http.Re } w.Header().Add(httpHeaderContentType, httpContentTypeApplicationJson) + + // Add full analytics snapshot as JSON header when analytics is enabled + if g.cachingOptions.EnableCacheAnalytics { + if analyticsJSON, jsonErr := json.Marshal(cacheStats); jsonErr == nil { + w.Header().Add("X-Cache-Analytics", string(analyticsJSON)) + } + } + w.WriteHeader(http.StatusOK) if _, err = w.Write(buf.Bytes()); err != nil { g.log.Error("write response", log.Error(err)) diff --git a/execution/federationtesting/gateway/http/ws.go b/execution/federationtesting/gateway/http/ws.go index ecd7994c15..ac40e8cb24 100644 --- a/execution/federationtesting/gateway/http/ws.go +++ b/execution/federationtesting/gateway/http/ws.go @@ -4,11 +4,13 @@ import ( "context" "encoding/json" "net" + "sync" "github.com/gobwas/ws" "github.com/gobwas/ws/wsutil" "github.com/jensneuse/abstractlogger" + "github.com/wundergraph/graphql-go-tools/execution/engine" "github.com/wundergraph/graphql-go-tools/execution/subscription" ) @@ -19,6 +21,7 @@ type WebsocketSubscriptionClient struct { clientConn net.Conn // isClosedConnection indicates if the websocket connection is closed. isClosedConnection bool + mu sync.RWMutex } // NewWebsocketSubscriptionClient will create a new websocket subscription client. @@ -71,7 +74,7 @@ func (w *WebsocketSubscriptionClient) ReadFromClient() (message *subscription.Me // //nolint:staticcheck func (w *WebsocketSubscriptionClient) WriteToClient(message subscription.Message) error { - if w.isClosedConnection { + if !w.IsConnected() { return nil } @@ -100,6 +103,8 @@ func (w *WebsocketSubscriptionClient) WriteToClient(message subscription.Message // IsConnected will indicate if the websocket connection is still established. func (w *WebsocketSubscriptionClient) IsConnected() bool { + w.mu.RLock() + defer w.mu.RUnlock() return !w.isClosedConnection } @@ -108,19 +113,27 @@ func (w *WebsocketSubscriptionClient) Disconnect() error { w.logger.Debug("http.GraphQLHTTPRequestHandler.Disconnect()", abstractlogger.String("message", "disconnecting client"), ) - w.isClosedConnection = true + w.changeConnectionStateToClosed() return w.clientConn.Close() } // isClosedConnectionError will indicate if the given error is a connection closed error. func (w *WebsocketSubscriptionClient) isClosedConnectionError(err error) bool { if _, ok := err.(wsutil.ClosedError); ok { - w.isClosedConnection = true + w.changeConnectionStateToClosed() } + w.mu.RLock() + defer w.mu.RUnlock() return w.isClosedConnection } +func (w *WebsocketSubscriptionClient) changeConnectionStateToClosed() { + w.mu.Lock() + defer w.mu.Unlock() + w.isClosedConnection = true +} + func HandleWebsocket(done chan bool, errChan chan error, conn net.Conn, executorPool subscription.ExecutorPool, logger abstractlogger.Logger) { defer func() { if err := conn.Close(); err != nil { @@ -152,7 +165,18 @@ func (g *GraphQLHTTPRequestHandler) handleWebsocket(connInitReqCtx context.Conte done := make(chan bool) errChan := make(chan error) - executorPool := subscription.NewExecutorV2Pool(g.engine, connInitReqCtx) + var opts []engine.ExecutionOptions + if g.cachingOptions.EnableL1Cache || g.cachingOptions.EnableL2Cache { + opts = append(opts, engine.WithCachingOptions(g.cachingOptions)) + } + if g.subgraphHeadersBuilder != nil { + opts = append(opts, engine.WithSubgraphHeadersBuilder(g.subgraphHeadersBuilder)) + } + if g.debugMode { + opts = append(opts, engine.WithDebugMode()) + } + + executorPool := subscription.NewExecutorV2Pool(g.engine, connInitReqCtx, opts...) go HandleWebsocket(done, errChan, conn, executorPool, g.log) select { case err := <-errChan: diff --git a/execution/federationtesting/gateway/main.go b/execution/federationtesting/gateway/main.go index 69f6e5023e..7d2b5508a1 100644 --- a/execution/federationtesting/gateway/main.go +++ b/execution/federationtesting/gateway/main.go @@ -10,6 +10,7 @@ import ( "github.com/wundergraph/graphql-go-tools/execution/engine" http2 "github.com/wundergraph/graphql-go-tools/execution/federationtesting/gateway/http" "github.com/wundergraph/graphql-go-tools/execution/graphql" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" ) func NewDatasource(serviceConfig []ServiceConfig, httpClient *http.Client) *DatasourcePollerPoller { @@ -24,6 +25,39 @@ func Handler( datasourcePoller *DatasourcePollerPoller, httpClient *http.Client, enableART bool, + loaderCaches map[string]resolve.LoaderCache, + subgraphHeadersBuilder resolve.SubgraphHeadersBuilder, +) *Gateway { + return HandlerWithCaching(logger, datasourcePoller, httpClient, enableART, loaderCaches, subgraphHeadersBuilder, resolve.CachingOptions{}, nil, false) +} + +func HandlerWithCaching( + logger log.Logger, + datasourcePoller *DatasourcePollerPoller, + httpClient *http.Client, + enableART bool, + loaderCaches map[string]resolve.LoaderCache, + subgraphHeadersBuilder resolve.SubgraphHeadersBuilder, + cachingOptions resolve.CachingOptions, + subgraphEntityCachingConfigs engine.SubgraphCachingConfigs, + debugMode bool, +) *Gateway { + return HandlerWithCachingAndOpts(logger, datasourcePoller, httpClient, enableART, loaderCaches, subgraphHeadersBuilder, cachingOptions, subgraphEntityCachingConfigs, debugMode) +} + +// HandlerWithCachingAndOpts is like HandlerWithCaching but accepts additional GatewayOptions +// for configuring resolver-level options (e.g., OnSubscriptionCacheWrite callbacks). +func HandlerWithCachingAndOpts( + logger log.Logger, + datasourcePoller *DatasourcePollerPoller, + httpClient *http.Client, + enableART bool, + loaderCaches map[string]resolve.LoaderCache, + subgraphHeadersBuilder resolve.SubgraphHeadersBuilder, + cachingOptions resolve.CachingOptions, + subgraphEntityCachingConfigs engine.SubgraphCachingConfigs, + debugMode bool, + extraOpts ...GatewayOption, ) *Gateway { upgrader := &ws.HTTPUpgrader{ Header: http.Header{}, @@ -31,11 +65,24 @@ func Handler( datasourceWatcher := datasourcePoller + // remapVariables is captured by the handler factory closure. + // The extraction opt (appended last) copies the value set by extraOpts. + var remapVariables map[string]string + var gqlHandlerFactory HandlerFactoryFn = func(schema *graphql.Schema, engine *engine.ExecutionEngine) http.Handler { - return http2.NewGraphqlHTTPHandler(schema, engine, upgrader, logger, enableART) + return http2.NewGraphqlHTTPHandler(schema, engine, upgrader, logger, enableART, subgraphHeadersBuilder, cachingOptions, debugMode, remapVariables) + } + + var gatewayOpts []GatewayOption + if len(subgraphEntityCachingConfigs) > 0 { + gatewayOpts = append(gatewayOpts, WithSubgraphEntityCachingConfigs(subgraphEntityCachingConfigs)) } + gatewayOpts = append(gatewayOpts, extraOpts...) + gatewayOpts = append(gatewayOpts, func(g *Gateway) { + remapVariables = g.remapVariables + }) - gateway := NewGateway(gqlHandlerFactory, httpClient, logger) + gateway := NewGateway(gqlHandlerFactory, httpClient, logger, loaderCaches, gatewayOpts...) datasourceWatcher.Register(gateway) diff --git a/execution/federationtesting/products/graph/entity.resolvers.go b/execution/federationtesting/products/graph/entity.resolvers.go index a1d61b2460..a8737922d2 100644 --- a/execution/federationtesting/products/graph/entity.resolvers.go +++ b/execution/federationtesting/products/graph/entity.resolvers.go @@ -11,14 +11,14 @@ import ( "github.com/wundergraph/graphql-go-tools/execution/federationtesting/products/graph/model" ) +// FindDigitalProductByUpc is the resolver for the findDigitalProductByUpc field. +func (r *entityResolver) FindDigitalProductByUpc(ctx context.Context, upc string) (*model.DigitalProduct, error) { + return r.findDigitalProduct(upc), nil +} + // FindProductByUpc is the resolver for the findProductByUpc field. func (r *entityResolver) FindProductByUpc(ctx context.Context, upc string) (*model.Product, error) { - for _, h := range r.products { - if h.Upc == upc { - return h, nil - } - } - return nil, nil + return r.findProduct(upc), nil } // Entity returns generated.EntityResolver implementation. diff --git a/execution/federationtesting/products/graph/generated/federation.go b/execution/federationtesting/products/graph/generated/federation.go index 45eb8cf256..8b180a8b0b 100644 --- a/execution/federationtesting/products/graph/generated/federation.go +++ b/execution/federationtesting/products/graph/generated/federation.go @@ -153,6 +153,25 @@ func (ec *executionContext) resolveEntity( }() switch typeName { + case "DigitalProduct": + resolverName, err := entityResolverNameForDigitalProduct(ctx, rep) + if err != nil { + return nil, fmt.Errorf(`finding resolver for Entity "DigitalProduct": %w`, err) + } + switch resolverName { + + case "findDigitalProductByUpc": + id0, err := ec.unmarshalNString2string(ctx, rep["upc"]) + if err != nil { + return nil, fmt.Errorf(`unmarshalling param 0 for findDigitalProductByUpc(): %w`, err) + } + entity, err := ec.resolvers.Entity().FindDigitalProductByUpc(ctx, id0) + if err != nil { + return nil, fmt.Errorf(`resolving Entity "DigitalProduct": %w`, err) + } + + return entity, nil + } case "Product": resolverName, err := entityResolverNameForProduct(ctx, rep) if err != nil { @@ -198,6 +217,41 @@ func (ec *executionContext) resolveManyEntities( } } +func entityResolverNameForDigitalProduct(ctx context.Context, rep EntityRepresentation) (string, error) { + // we collect errors because a later entity resolver may work fine + // when an entity has multiple keys + entityResolverErrs := []error{} + for { + var ( + m EntityRepresentation + val any + ok bool + ) + _ = val + // if all of the KeyFields values for this resolver are null, + // we shouldn't use use it + allNull := true + m = rep + val, ok = m["upc"] + if !ok { + entityResolverErrs = append(entityResolverErrs, + fmt.Errorf("%w due to missing Key Field \"upc\" for DigitalProduct", ErrTypeNotFound)) + break + } + if allNull { + allNull = val == nil + } + if allNull { + entityResolverErrs = append(entityResolverErrs, + fmt.Errorf("%w due to all null value KeyFields for DigitalProduct", ErrTypeNotFound)) + break + } + return "findDigitalProductByUpc", nil + } + return "", fmt.Errorf("%w for DigitalProduct due to %v", ErrTypeNotFound, + errors.Join(entityResolverErrs...).Error()) +} + func entityResolverNameForProduct(ctx context.Context, rep EntityRepresentation) (string, error) { // we collect errors because a later entity resolver may work fine // when an entity has multiple keys diff --git a/execution/federationtesting/products/graph/generated/generated.go b/execution/federationtesting/products/graph/generated/generated.go index 51ccd9b4ed..7c0dafed1e 100644 --- a/execution/federationtesting/products/graph/generated/generated.go +++ b/execution/federationtesting/products/graph/generated/generated.go @@ -50,8 +50,16 @@ type DirectiveRoot struct { } type ComplexityRoot struct { + DigitalProduct struct { + DownloadURL func(childComplexity int) int + Name func(childComplexity int) int + Price func(childComplexity int) int + Upc func(childComplexity int) int + } + Entity struct { - FindProductByUpc func(childComplexity int, upc string) int + FindDigitalProductByUpc func(childComplexity int, upc string) int + FindProductByUpc func(childComplexity int, upc string) int } Mutation struct { @@ -66,14 +74,21 @@ type ComplexityRoot struct { } Query struct { + Product func(childComplexity int, upc string) int + Products func(childComplexity int, upcs []string) int TopProducts func(childComplexity int, first *int) int __resolve__service func(childComplexity int) int __resolve_entities func(childComplexity int, representations []map[string]any) int } Subscription struct { - UpdateProductPrice func(childComplexity int, upc string) int - UpdatedPrice func(childComplexity int) int + UpdateDigitalProductPriceInterface func(childComplexity int, upc string) int + UpdateDigitalProductPriceUnion func(childComplexity int, upc string) int + UpdateProductPrice func(childComplexity int, upc string) int + UpdateProductPriceInterface func(childComplexity int, upc string) int + UpdateProductPriceUnion func(childComplexity int, upc string) int + UpdatedPrice func(childComplexity int) int + UpdatedPrices func(childComplexity int, first *int) int } _Service struct { @@ -82,6 +97,7 @@ type ComplexityRoot struct { } type EntityResolver interface { + FindDigitalProductByUpc(ctx context.Context, upc string) (*model.DigitalProduct, error) FindProductByUpc(ctx context.Context, upc string) (*model.Product, error) } type MutationResolver interface { @@ -89,10 +105,17 @@ type MutationResolver interface { } type QueryResolver interface { TopProducts(ctx context.Context, first *int) ([]*model.Product, error) + Product(ctx context.Context, upc string) (*model.Product, error) + Products(ctx context.Context, upcs []string) ([]*model.Product, error) } type SubscriptionResolver interface { UpdatedPrice(ctx context.Context) (<-chan *model.Product, error) UpdateProductPrice(ctx context.Context, upc string) (<-chan *model.Product, error) + UpdatedPrices(ctx context.Context, first *int) (<-chan []*model.Product, error) + UpdateProductPriceUnion(ctx context.Context, upc string) (<-chan model.ProductUpdate, error) + UpdateProductPriceInterface(ctx context.Context, upc string) (<-chan model.ProductInterface, error) + UpdateDigitalProductPriceUnion(ctx context.Context, upc string) (<-chan model.ProductUpdate, error) + UpdateDigitalProductPriceInterface(ctx context.Context, upc string) (<-chan model.ProductInterface, error) } type executableSchema struct { @@ -114,6 +137,46 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin _ = ec switch typeName + "." + field { + case "DigitalProduct.downloadUrl": + if e.complexity.DigitalProduct.DownloadURL == nil { + break + } + + return e.complexity.DigitalProduct.DownloadURL(childComplexity), true + + case "DigitalProduct.name": + if e.complexity.DigitalProduct.Name == nil { + break + } + + return e.complexity.DigitalProduct.Name(childComplexity), true + + case "DigitalProduct.price": + if e.complexity.DigitalProduct.Price == nil { + break + } + + return e.complexity.DigitalProduct.Price(childComplexity), true + + case "DigitalProduct.upc": + if e.complexity.DigitalProduct.Upc == nil { + break + } + + return e.complexity.DigitalProduct.Upc(childComplexity), true + + case "Entity.findDigitalProductByUpc": + if e.complexity.Entity.FindDigitalProductByUpc == nil { + break + } + + args, err := ec.field_Entity_findDigitalProductByUpc_args(ctx, rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Entity.FindDigitalProductByUpc(childComplexity, args["upc"].(string)), true + case "Entity.findProductByUpc": if e.complexity.Entity.FindProductByUpc == nil { break @@ -166,6 +229,30 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.Product.Upc(childComplexity), true + case "Query.product": + if e.complexity.Query.Product == nil { + break + } + + args, err := ec.field_Query_product_args(ctx, rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.Product(childComplexity, args["upc"].(string)), true + + case "Query.products": + if e.complexity.Query.Products == nil { + break + } + + args, err := ec.field_Query_products_args(ctx, rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Query.Products(childComplexity, args["upcs"].([]string)), true + case "Query.topProducts": if e.complexity.Query.TopProducts == nil { break @@ -197,6 +284,30 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.Query.__resolve_entities(childComplexity, args["representations"].([]map[string]any)), true + case "Subscription.updateDigitalProductPriceInterface": + if e.complexity.Subscription.UpdateDigitalProductPriceInterface == nil { + break + } + + args, err := ec.field_Subscription_updateDigitalProductPriceInterface_args(ctx, rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Subscription.UpdateDigitalProductPriceInterface(childComplexity, args["upc"].(string)), true + + case "Subscription.updateDigitalProductPriceUnion": + if e.complexity.Subscription.UpdateDigitalProductPriceUnion == nil { + break + } + + args, err := ec.field_Subscription_updateDigitalProductPriceUnion_args(ctx, rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Subscription.UpdateDigitalProductPriceUnion(childComplexity, args["upc"].(string)), true + case "Subscription.updateProductPrice": if e.complexity.Subscription.UpdateProductPrice == nil { break @@ -209,6 +320,30 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.Subscription.UpdateProductPrice(childComplexity, args["upc"].(string)), true + case "Subscription.updateProductPriceInterface": + if e.complexity.Subscription.UpdateProductPriceInterface == nil { + break + } + + args, err := ec.field_Subscription_updateProductPriceInterface_args(ctx, rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Subscription.UpdateProductPriceInterface(childComplexity, args["upc"].(string)), true + + case "Subscription.updateProductPriceUnion": + if e.complexity.Subscription.UpdateProductPriceUnion == nil { + break + } + + args, err := ec.field_Subscription_updateProductPriceUnion_args(ctx, rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Subscription.UpdateProductPriceUnion(childComplexity, args["upc"].(string)), true + case "Subscription.updatedPrice": if e.complexity.Subscription.UpdatedPrice == nil { break @@ -216,6 +351,18 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.Subscription.UpdatedPrice(childComplexity), true + case "Subscription.updatedPrices": + if e.complexity.Subscription.UpdatedPrices == nil { + break + } + + args, err := ec.field_Subscription_updatedPrices_args(ctx, rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Subscription.UpdatedPrices(childComplexity, args["first"].(*int)), true + case "_Service.sdl": if e.complexity._Service.SDL == nil { break @@ -346,6 +493,8 @@ func (ec *executionContext) introspectType(name string) (*introspection.Type, er var sources = []*ast.Source{ {Name: "../schema.graphqls", Input: `type Query { topProducts(first: Int = 5): [Product] + product(upc: String!): Product + products(upcs: [String!]!): [Product] } type Mutation { @@ -355,14 +504,34 @@ type Mutation { type Subscription { updatedPrice: Product! updateProductPrice(upc: String!): Product! + updatedPrices(first: Int = 3): [Product!]! + updateProductPriceUnion(upc: String!): ProductUpdate! + updateProductPriceInterface(upc: String!): ProductInterface! + updateDigitalProductPriceUnion(upc: String!): ProductUpdate! + updateDigitalProductPriceInterface(upc: String!): ProductInterface! +} + +union ProductUpdate = Product | DigitalProduct + +interface ProductInterface { + upc: String! + name: String! + price: Int! } -type Product @key(fields: "upc") { +type Product implements ProductInterface @key(fields: "upc") { upc: String! name: String! price: Int! inStock: Int! } + +type DigitalProduct implements ProductInterface @key(fields: "upc") { + upc: String! + name: String! + price: Int! + downloadUrl: String! +} `, BuiltIn: false}, {Name: "../../federation/directives.graphql", Input: ` directive @key(fields: _FieldSet!) repeatable on OBJECT | INTERFACE @@ -375,10 +544,11 @@ type Product @key(fields: "upc") { `, BuiltIn: true}, {Name: "../../federation/entity.graphql", Input: ` # a union of all types that use the @key directive -union _Entity = Product +union _Entity = DigitalProduct | Product # fake type to build resolver interfaces for users to implement type Entity { + findDigitalProductByUpc(upc: String!,): DigitalProduct! findProductByUpc(upc: String!,): Product! } @@ -398,6 +568,34 @@ var parsedSchema = gqlparser.MustLoadSchema(sources...) // region ***************************** args.gotpl ***************************** +func (ec *executionContext) field_Entity_findDigitalProductByUpc_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field_Entity_findDigitalProductByUpc_argsUpc(ctx, rawArgs) + if err != nil { + return nil, err + } + args["upc"] = arg0 + return args, nil +} +func (ec *executionContext) field_Entity_findDigitalProductByUpc_argsUpc( + ctx context.Context, + rawArgs map[string]any, +) (string, error) { + if _, ok := rawArgs["upc"]; !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("upc")) + if tmp, ok := rawArgs["upc"]; ok { + return ec.unmarshalNString2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + func (ec *executionContext) field_Entity_findProductByUpc_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} @@ -533,6 +731,62 @@ func (ec *executionContext) field_Query__entities_argsRepresentations( return zeroVal, nil } +func (ec *executionContext) field_Query_product_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field_Query_product_argsUpc(ctx, rawArgs) + if err != nil { + return nil, err + } + args["upc"] = arg0 + return args, nil +} +func (ec *executionContext) field_Query_product_argsUpc( + ctx context.Context, + rawArgs map[string]any, +) (string, error) { + if _, ok := rawArgs["upc"]; !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("upc")) + if tmp, ok := rawArgs["upc"]; ok { + return ec.unmarshalNString2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + +func (ec *executionContext) field_Query_products_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field_Query_products_argsUpcs(ctx, rawArgs) + if err != nil { + return nil, err + } + args["upcs"] = arg0 + return args, nil +} +func (ec *executionContext) field_Query_products_argsUpcs( + ctx context.Context, + rawArgs map[string]any, +) ([]string, error) { + if _, ok := rawArgs["upcs"]; !ok { + var zeroVal []string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("upcs")) + if tmp, ok := rawArgs["upcs"]; ok { + return ec.unmarshalNString2ᚕstringᚄ(ctx, tmp) + } + + var zeroVal []string + return zeroVal, nil +} + func (ec *executionContext) field_Query_topProducts_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} @@ -561,17 +815,17 @@ func (ec *executionContext) field_Query_topProducts_argsFirst( return zeroVal, nil } -func (ec *executionContext) field_Subscription_updateProductPrice_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { +func (ec *executionContext) field_Subscription_updateDigitalProductPriceInterface_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} - arg0, err := ec.field_Subscription_updateProductPrice_argsUpc(ctx, rawArgs) + arg0, err := ec.field_Subscription_updateDigitalProductPriceInterface_argsUpc(ctx, rawArgs) if err != nil { return nil, err } args["upc"] = arg0 return args, nil } -func (ec *executionContext) field_Subscription_updateProductPrice_argsUpc( +func (ec *executionContext) field_Subscription_updateDigitalProductPriceInterface_argsUpc( ctx context.Context, rawArgs map[string]any, ) (string, error) { @@ -589,255 +843,268 @@ func (ec *executionContext) field_Subscription_updateProductPrice_argsUpc( return zeroVal, nil } -func (ec *executionContext) field___Directive_args_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { +func (ec *executionContext) field_Subscription_updateDigitalProductPriceUnion_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} - arg0, err := ec.field___Directive_args_argsIncludeDeprecated(ctx, rawArgs) + arg0, err := ec.field_Subscription_updateDigitalProductPriceUnion_argsUpc(ctx, rawArgs) if err != nil { return nil, err } - args["includeDeprecated"] = arg0 + args["upc"] = arg0 return args, nil } -func (ec *executionContext) field___Directive_args_argsIncludeDeprecated( +func (ec *executionContext) field_Subscription_updateDigitalProductPriceUnion_argsUpc( ctx context.Context, rawArgs map[string]any, -) (*bool, error) { - if _, ok := rawArgs["includeDeprecated"]; !ok { - var zeroVal *bool +) (string, error) { + if _, ok := rawArgs["upc"]; !ok { + var zeroVal string return zeroVal, nil } - ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated")) - if tmp, ok := rawArgs["includeDeprecated"]; ok { - return ec.unmarshalOBoolean2ᚖbool(ctx, tmp) + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("upc")) + if tmp, ok := rawArgs["upc"]; ok { + return ec.unmarshalNString2string(ctx, tmp) } - var zeroVal *bool + var zeroVal string return zeroVal, nil } -func (ec *executionContext) field___Field_args_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { +func (ec *executionContext) field_Subscription_updateProductPriceInterface_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} - arg0, err := ec.field___Field_args_argsIncludeDeprecated(ctx, rawArgs) + arg0, err := ec.field_Subscription_updateProductPriceInterface_argsUpc(ctx, rawArgs) if err != nil { return nil, err } - args["includeDeprecated"] = arg0 + args["upc"] = arg0 return args, nil } -func (ec *executionContext) field___Field_args_argsIncludeDeprecated( +func (ec *executionContext) field_Subscription_updateProductPriceInterface_argsUpc( ctx context.Context, rawArgs map[string]any, -) (*bool, error) { - if _, ok := rawArgs["includeDeprecated"]; !ok { - var zeroVal *bool +) (string, error) { + if _, ok := rawArgs["upc"]; !ok { + var zeroVal string return zeroVal, nil } - ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated")) - if tmp, ok := rawArgs["includeDeprecated"]; ok { - return ec.unmarshalOBoolean2ᚖbool(ctx, tmp) + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("upc")) + if tmp, ok := rawArgs["upc"]; ok { + return ec.unmarshalNString2string(ctx, tmp) } - var zeroVal *bool + var zeroVal string return zeroVal, nil } -func (ec *executionContext) field___Type_enumValues_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { +func (ec *executionContext) field_Subscription_updateProductPriceUnion_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} - arg0, err := ec.field___Type_enumValues_argsIncludeDeprecated(ctx, rawArgs) + arg0, err := ec.field_Subscription_updateProductPriceUnion_argsUpc(ctx, rawArgs) if err != nil { return nil, err } - args["includeDeprecated"] = arg0 + args["upc"] = arg0 return args, nil } -func (ec *executionContext) field___Type_enumValues_argsIncludeDeprecated( +func (ec *executionContext) field_Subscription_updateProductPriceUnion_argsUpc( ctx context.Context, rawArgs map[string]any, -) (bool, error) { - if _, ok := rawArgs["includeDeprecated"]; !ok { - var zeroVal bool +) (string, error) { + if _, ok := rawArgs["upc"]; !ok { + var zeroVal string return zeroVal, nil } - ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated")) - if tmp, ok := rawArgs["includeDeprecated"]; ok { - return ec.unmarshalOBoolean2bool(ctx, tmp) + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("upc")) + if tmp, ok := rawArgs["upc"]; ok { + return ec.unmarshalNString2string(ctx, tmp) } - var zeroVal bool + var zeroVal string return zeroVal, nil } -func (ec *executionContext) field___Type_fields_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { +func (ec *executionContext) field_Subscription_updateProductPrice_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} - arg0, err := ec.field___Type_fields_argsIncludeDeprecated(ctx, rawArgs) + arg0, err := ec.field_Subscription_updateProductPrice_argsUpc(ctx, rawArgs) if err != nil { return nil, err } - args["includeDeprecated"] = arg0 + args["upc"] = arg0 return args, nil } -func (ec *executionContext) field___Type_fields_argsIncludeDeprecated( +func (ec *executionContext) field_Subscription_updateProductPrice_argsUpc( ctx context.Context, rawArgs map[string]any, -) (bool, error) { - if _, ok := rawArgs["includeDeprecated"]; !ok { - var zeroVal bool +) (string, error) { + if _, ok := rawArgs["upc"]; !ok { + var zeroVal string return zeroVal, nil } - ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated")) - if tmp, ok := rawArgs["includeDeprecated"]; ok { - return ec.unmarshalOBoolean2bool(ctx, tmp) + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("upc")) + if tmp, ok := rawArgs["upc"]; ok { + return ec.unmarshalNString2string(ctx, tmp) } - var zeroVal bool + var zeroVal string return zeroVal, nil } -// endregion ***************************** args.gotpl ***************************** - -// region ************************** directives.gotpl ************************** - -// endregion ************************** directives.gotpl ************************** - -// region **************************** field.gotpl ***************************** - -func (ec *executionContext) _Entity_findProductByUpc(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Entity_findProductByUpc(ctx, field) +func (ec *executionContext) field_Subscription_updatedPrices_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field_Subscription_updatedPrices_argsFirst(ctx, rawArgs) if err != nil { - return graphql.Null + return nil, err } - ctx = graphql.WithFieldContext(ctx, fc) - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { - ctx = rctx // use context from middleware stack in children - return ec.resolvers.Entity().FindProductByUpc(rctx, fc.Args["upc"].(string)) - }) - if err != nil { - ec.Error(ctx, err) - return graphql.Null + args["first"] = arg0 + return args, nil +} +func (ec *executionContext) field_Subscription_updatedPrices_argsFirst( + ctx context.Context, + rawArgs map[string]any, +) (*int, error) { + if _, ok := rawArgs["first"]; !ok { + var zeroVal *int + return zeroVal, nil } - if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } - return graphql.Null + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("first")) + if tmp, ok := rawArgs["first"]; ok { + return ec.unmarshalOInt2ᚖint(ctx, tmp) } - res := resTmp.(*model.Product) - fc.Result = res - return ec.marshalNProduct2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐProduct(ctx, field.Selections, res) + + var zeroVal *int + return zeroVal, nil } -func (ec *executionContext) fieldContext_Entity_findProductByUpc(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { - fc = &graphql.FieldContext{ - Object: "Entity", - Field: field, - IsMethod: true, - IsResolver: true, - Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - switch field.Name { - case "upc": - return ec.fieldContext_Product_upc(ctx, field) - case "name": - return ec.fieldContext_Product_name(ctx, field) - case "price": - return ec.fieldContext_Product_price(ctx, field) - case "inStock": - return ec.fieldContext_Product_inStock(ctx, field) - } - return nil, fmt.Errorf("no field named %q was found under type Product", field.Name) - }, +func (ec *executionContext) field___Directive_args_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field___Directive_args_argsIncludeDeprecated(ctx, rawArgs) + if err != nil { + return nil, err } - defer func() { - if r := recover(); r != nil { - err = ec.Recover(ctx, r) - ec.Error(ctx, err) - } - }() - ctx = graphql.WithFieldContext(ctx, fc) - if fc.Args, err = ec.field_Entity_findProductByUpc_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { - ec.Error(ctx, err) - return fc, err + args["includeDeprecated"] = arg0 + return args, nil +} +func (ec *executionContext) field___Directive_args_argsIncludeDeprecated( + ctx context.Context, + rawArgs map[string]any, +) (*bool, error) { + if _, ok := rawArgs["includeDeprecated"]; !ok { + var zeroVal *bool + return zeroVal, nil } - return fc, nil + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated")) + if tmp, ok := rawArgs["includeDeprecated"]; ok { + return ec.unmarshalOBoolean2ᚖbool(ctx, tmp) + } + + var zeroVal *bool + return zeroVal, nil } -func (ec *executionContext) _Mutation_setPrice(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Mutation_setPrice(ctx, field) +func (ec *executionContext) field___Field_args_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field___Field_args_argsIncludeDeprecated(ctx, rawArgs) if err != nil { - return graphql.Null + return nil, err } - ctx = graphql.WithFieldContext(ctx, fc) - defer func() { - if r := recover(); r != nil { - ec.Error(ctx, ec.Recover(ctx, r)) - ret = graphql.Null - } - }() - resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { - ctx = rctx // use context from middleware stack in children - return ec.resolvers.Mutation().SetPrice(rctx, fc.Args["upc"].(string), fc.Args["price"].(int)) - }) + args["includeDeprecated"] = arg0 + return args, nil +} +func (ec *executionContext) field___Field_args_argsIncludeDeprecated( + ctx context.Context, + rawArgs map[string]any, +) (*bool, error) { + if _, ok := rawArgs["includeDeprecated"]; !ok { + var zeroVal *bool + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated")) + if tmp, ok := rawArgs["includeDeprecated"]; ok { + return ec.unmarshalOBoolean2ᚖbool(ctx, tmp) + } + + var zeroVal *bool + return zeroVal, nil +} + +func (ec *executionContext) field___Type_enumValues_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field___Type_enumValues_argsIncludeDeprecated(ctx, rawArgs) if err != nil { - ec.Error(ctx, err) - return graphql.Null + return nil, err } - if resTmp == nil { - return graphql.Null + args["includeDeprecated"] = arg0 + return args, nil +} +func (ec *executionContext) field___Type_enumValues_argsIncludeDeprecated( + ctx context.Context, + rawArgs map[string]any, +) (bool, error) { + if _, ok := rawArgs["includeDeprecated"]; !ok { + var zeroVal bool + return zeroVal, nil } - res := resTmp.(*model.Product) - fc.Result = res - return ec.marshalOProduct2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐProduct(ctx, field.Selections, res) + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated")) + if tmp, ok := rawArgs["includeDeprecated"]; ok { + return ec.unmarshalOBoolean2bool(ctx, tmp) + } + + var zeroVal bool + return zeroVal, nil } -func (ec *executionContext) fieldContext_Mutation_setPrice(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { - fc = &graphql.FieldContext{ - Object: "Mutation", - Field: field, - IsMethod: true, - IsResolver: true, - Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - switch field.Name { - case "upc": - return ec.fieldContext_Product_upc(ctx, field) - case "name": - return ec.fieldContext_Product_name(ctx, field) - case "price": - return ec.fieldContext_Product_price(ctx, field) - case "inStock": - return ec.fieldContext_Product_inStock(ctx, field) - } - return nil, fmt.Errorf("no field named %q was found under type Product", field.Name) - }, +func (ec *executionContext) field___Type_fields_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field___Type_fields_argsIncludeDeprecated(ctx, rawArgs) + if err != nil { + return nil, err } - defer func() { - if r := recover(); r != nil { - err = ec.Recover(ctx, r) - ec.Error(ctx, err) - } - }() - ctx = graphql.WithFieldContext(ctx, fc) - if fc.Args, err = ec.field_Mutation_setPrice_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { - ec.Error(ctx, err) - return fc, err + args["includeDeprecated"] = arg0 + return args, nil +} +func (ec *executionContext) field___Type_fields_argsIncludeDeprecated( + ctx context.Context, + rawArgs map[string]any, +) (bool, error) { + if _, ok := rawArgs["includeDeprecated"]; !ok { + var zeroVal bool + return zeroVal, nil } - return fc, nil + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated")) + if tmp, ok := rawArgs["includeDeprecated"]; ok { + return ec.unmarshalOBoolean2bool(ctx, tmp) + } + + var zeroVal bool + return zeroVal, nil } -func (ec *executionContext) _Product_upc(ctx context.Context, field graphql.CollectedField, obj *model.Product) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Product_upc(ctx, field) +// endregion ***************************** args.gotpl ***************************** + +// region ************************** directives.gotpl ************************** + +// endregion ************************** directives.gotpl ************************** + +// region **************************** field.gotpl ***************************** + +func (ec *executionContext) _DigitalProduct_upc(ctx context.Context, field graphql.CollectedField, obj *model.DigitalProduct) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_DigitalProduct_upc(ctx, field) if err != nil { return graphql.Null } @@ -867,9 +1134,9 @@ func (ec *executionContext) _Product_upc(ctx context.Context, field graphql.Coll return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Product_upc(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_DigitalProduct_upc(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "Product", + Object: "DigitalProduct", Field: field, IsMethod: false, IsResolver: false, @@ -880,8 +1147,8 @@ func (ec *executionContext) fieldContext_Product_upc(_ context.Context, field gr return fc, nil } -func (ec *executionContext) _Product_name(ctx context.Context, field graphql.CollectedField, obj *model.Product) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Product_name(ctx, field) +func (ec *executionContext) _DigitalProduct_name(ctx context.Context, field graphql.CollectedField, obj *model.DigitalProduct) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_DigitalProduct_name(ctx, field) if err != nil { return graphql.Null } @@ -911,9 +1178,9 @@ func (ec *executionContext) _Product_name(ctx context.Context, field graphql.Col return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Product_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_DigitalProduct_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "Product", + Object: "DigitalProduct", Field: field, IsMethod: false, IsResolver: false, @@ -924,8 +1191,8 @@ func (ec *executionContext) fieldContext_Product_name(_ context.Context, field g return fc, nil } -func (ec *executionContext) _Product_price(ctx context.Context, field graphql.CollectedField, obj *model.Product) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Product_price(ctx, field) +func (ec *executionContext) _DigitalProduct_price(ctx context.Context, field graphql.CollectedField, obj *model.DigitalProduct) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_DigitalProduct_price(ctx, field) if err != nil { return graphql.Null } @@ -955,9 +1222,9 @@ func (ec *executionContext) _Product_price(ctx context.Context, field graphql.Co return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Product_price(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_DigitalProduct_price(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "Product", + Object: "DigitalProduct", Field: field, IsMethod: false, IsResolver: false, @@ -968,8 +1235,8 @@ func (ec *executionContext) fieldContext_Product_price(_ context.Context, field return fc, nil } -func (ec *executionContext) _Product_inStock(ctx context.Context, field graphql.CollectedField, obj *model.Product) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Product_inStock(ctx, field) +func (ec *executionContext) _DigitalProduct_downloadUrl(ctx context.Context, field graphql.CollectedField, obj *model.DigitalProduct) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_DigitalProduct_downloadUrl(ctx, field) if err != nil { return graphql.Null } @@ -982,7 +1249,7 @@ func (ec *executionContext) _Product_inStock(ctx context.Context, field graphql. }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return obj.InStock, nil + return obj.DownloadURL, nil }) if err != nil { ec.Error(ctx, err) @@ -994,26 +1261,26 @@ func (ec *executionContext) _Product_inStock(ctx context.Context, field graphql. } return graphql.Null } - res := resTmp.(int) + res := resTmp.(string) fc.Result = res - return ec.marshalNInt2int(ctx, field.Selections, res) + return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Product_inStock(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_DigitalProduct_downloadUrl(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "Product", + Object: "DigitalProduct", Field: field, IsMethod: false, IsResolver: false, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type Int does not have child fields") + return nil, errors.New("field of type String does not have child fields") }, } return fc, nil } -func (ec *executionContext) _Query_topProducts(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Query_topProducts(ctx, field) +func (ec *executionContext) _Entity_findDigitalProductByUpc(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Entity_findDigitalProductByUpc(ctx, field) if err != nil { return graphql.Null } @@ -1026,38 +1293,41 @@ func (ec *executionContext) _Query_topProducts(ctx context.Context, field graphq }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Query().TopProducts(rctx, fc.Args["first"].(*int)) + return ec.resolvers.Entity().FindDigitalProductByUpc(rctx, fc.Args["upc"].(string)) }) if err != nil { ec.Error(ctx, err) return graphql.Null } if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } return graphql.Null } - res := resTmp.([]*model.Product) + res := resTmp.(*model.DigitalProduct) fc.Result = res - return ec.marshalOProduct2ᚕᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐProduct(ctx, field.Selections, res) + return ec.marshalNDigitalProduct2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐDigitalProduct(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Query_topProducts(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Entity_findDigitalProductByUpc(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "Query", + Object: "Entity", Field: field, IsMethod: true, IsResolver: true, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { switch field.Name { case "upc": - return ec.fieldContext_Product_upc(ctx, field) + return ec.fieldContext_DigitalProduct_upc(ctx, field) case "name": - return ec.fieldContext_Product_name(ctx, field) + return ec.fieldContext_DigitalProduct_name(ctx, field) case "price": - return ec.fieldContext_Product_price(ctx, field) - case "inStock": - return ec.fieldContext_Product_inStock(ctx, field) + return ec.fieldContext_DigitalProduct_price(ctx, field) + case "downloadUrl": + return ec.fieldContext_DigitalProduct_downloadUrl(ctx, field) } - return nil, fmt.Errorf("no field named %q was found under type Product", field.Name) + return nil, fmt.Errorf("no field named %q was found under type DigitalProduct", field.Name) }, } defer func() { @@ -1067,15 +1337,15 @@ func (ec *executionContext) fieldContext_Query_topProducts(ctx context.Context, } }() ctx = graphql.WithFieldContext(ctx, fc) - if fc.Args, err = ec.field_Query_topProducts_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + if fc.Args, err = ec.field_Entity_findDigitalProductByUpc_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) return fc, err } return fc, nil } -func (ec *executionContext) _Query__entities(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Query__entities(ctx, field) +func (ec *executionContext) _Entity_findProductByUpc(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Entity_findProductByUpc(ctx, field) if err != nil { return graphql.Null } @@ -1088,7 +1358,7 @@ func (ec *executionContext) _Query__entities(ctx context.Context, field graphql. }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return ec.__resolve_entities(ctx, fc.Args["representations"].([]map[string]any)), nil + return ec.resolvers.Entity().FindProductByUpc(rctx, fc.Args["upc"].(string)) }) if err != nil { ec.Error(ctx, err) @@ -1100,19 +1370,29 @@ func (ec *executionContext) _Query__entities(ctx context.Context, field graphql. } return graphql.Null } - res := resTmp.([]fedruntime.Entity) + res := resTmp.(*model.Product) fc.Result = res - return ec.marshalN_Entity2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋpluginᚋfederationᚋfedruntimeᚐEntity(ctx, field.Selections, res) + return ec.marshalNProduct2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐProduct(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Query__entities(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Entity_findProductByUpc(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "Query", + Object: "Entity", Field: field, IsMethod: true, - IsResolver: false, + IsResolver: true, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type _Entity does not have child fields") + switch field.Name { + case "upc": + return ec.fieldContext_Product_upc(ctx, field) + case "name": + return ec.fieldContext_Product_name(ctx, field) + case "price": + return ec.fieldContext_Product_price(ctx, field) + case "inStock": + return ec.fieldContext_Product_inStock(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type Product", field.Name) }, } defer func() { @@ -1122,15 +1402,15 @@ func (ec *executionContext) fieldContext_Query__entities(ctx context.Context, fi } }() ctx = graphql.WithFieldContext(ctx, fc) - if fc.Args, err = ec.field_Query__entities_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + if fc.Args, err = ec.field_Entity_findProductByUpc_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) return fc, err } return fc, nil } -func (ec *executionContext) _Query__service(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Query__service(ctx, field) +func (ec *executionContext) _Mutation_setPrice(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Mutation_setPrice(ctx, field) if err != nil { return graphql.Null } @@ -1143,42 +1423,56 @@ func (ec *executionContext) _Query__service(ctx context.Context, field graphql.C }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return ec.__resolve__service(ctx) + return ec.resolvers.Mutation().SetPrice(rctx, fc.Args["upc"].(string), fc.Args["price"].(int)) }) if err != nil { ec.Error(ctx, err) return graphql.Null } if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } return graphql.Null } - res := resTmp.(fedruntime.Service) + res := resTmp.(*model.Product) fc.Result = res - return ec.marshalN_Service2githubᚗcomᚋ99designsᚋgqlgenᚋpluginᚋfederationᚋfedruntimeᚐService(ctx, field.Selections, res) + return ec.marshalOProduct2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐProduct(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Query__service(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Mutation_setPrice(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "Query", + Object: "Mutation", Field: field, IsMethod: true, - IsResolver: false, + IsResolver: true, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { switch field.Name { - case "sdl": - return ec.fieldContext__Service_sdl(ctx, field) + case "upc": + return ec.fieldContext_Product_upc(ctx, field) + case "name": + return ec.fieldContext_Product_name(ctx, field) + case "price": + return ec.fieldContext_Product_price(ctx, field) + case "inStock": + return ec.fieldContext_Product_inStock(ctx, field) } - return nil, fmt.Errorf("no field named %q was found under type _Service", field.Name) + return nil, fmt.Errorf("no field named %q was found under type Product", field.Name) }, } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Mutation_setPrice_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } return fc, nil } -func (ec *executionContext) _Query___type(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Query___type(ctx, field) +func (ec *executionContext) _Product_upc(ctx context.Context, field graphql.CollectedField, obj *model.Product) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Product_upc(ctx, field) if err != nil { return graphql.Null } @@ -1191,70 +1485,82 @@ func (ec *executionContext) _Query___type(ctx context.Context, field graphql.Col }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return ec.introspectType(fc.Args["name"].(string)) + return obj.Upc, nil }) if err != nil { ec.Error(ctx, err) return graphql.Null } if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } return graphql.Null } - res := resTmp.(*introspection.Type) + res := resTmp.(string) fc.Result = res - return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) + return ec.marshalNString2string(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Query___type(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Product_upc(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "Query", + Object: "Product", Field: field, - IsMethod: true, + IsMethod: false, IsResolver: false, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - switch field.Name { - case "kind": - return ec.fieldContext___Type_kind(ctx, field) - case "name": - return ec.fieldContext___Type_name(ctx, field) - case "description": - return ec.fieldContext___Type_description(ctx, field) - case "specifiedByURL": - return ec.fieldContext___Type_specifiedByURL(ctx, field) - case "fields": - return ec.fieldContext___Type_fields(ctx, field) - case "interfaces": - return ec.fieldContext___Type_interfaces(ctx, field) - case "possibleTypes": - return ec.fieldContext___Type_possibleTypes(ctx, field) - case "enumValues": - return ec.fieldContext___Type_enumValues(ctx, field) - case "inputFields": - return ec.fieldContext___Type_inputFields(ctx, field) - case "ofType": - return ec.fieldContext___Type_ofType(ctx, field) - case "isOneOf": - return ec.fieldContext___Type_isOneOf(ctx, field) - } - return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name) + return nil, errors.New("field of type String does not have child fields") }, } + return fc, nil +} + +func (ec *executionContext) _Product_name(ctx context.Context, field graphql.CollectedField, obj *model.Product) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Product_name(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) defer func() { if r := recover(); r != nil { - err = ec.Recover(ctx, r) - ec.Error(ctx, err) + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null } }() - ctx = graphql.WithFieldContext(ctx, fc) - if fc.Args, err = ec.field_Query___type_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.Name, nil + }) + if err != nil { ec.Error(ctx, err) - return fc, err + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Product_name(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Product", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, } return fc, nil } -func (ec *executionContext) _Query___schema(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Query___schema(ctx, field) +func (ec *executionContext) _Product_price(ctx context.Context, field graphql.CollectedField, obj *model.Product) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Product_price(ctx, field) if err != nil { return graphql.Null } @@ -1267,117 +1573,935 @@ func (ec *executionContext) _Query___schema(ctx context.Context, field graphql.C }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return ec.introspectSchema() + return obj.Price, nil }) if err != nil { ec.Error(ctx, err) return graphql.Null } if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } return graphql.Null } - res := resTmp.(*introspection.Schema) + res := resTmp.(int) fc.Result = res - return ec.marshalO__Schema2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐSchema(ctx, field.Selections, res) + return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Query___schema(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Product_price(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "Query", + Object: "Product", Field: field, - IsMethod: true, + IsMethod: false, IsResolver: false, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - switch field.Name { - case "description": - return ec.fieldContext___Schema_description(ctx, field) - case "types": - return ec.fieldContext___Schema_types(ctx, field) - case "queryType": - return ec.fieldContext___Schema_queryType(ctx, field) - case "mutationType": - return ec.fieldContext___Schema_mutationType(ctx, field) - case "subscriptionType": - return ec.fieldContext___Schema_subscriptionType(ctx, field) - case "directives": - return ec.fieldContext___Schema_directives(ctx, field) - } - return nil, fmt.Errorf("no field named %q was found under type __Schema", field.Name) + return nil, errors.New("field of type Int does not have child fields") }, } return fc, nil } -func (ec *executionContext) _Subscription_updatedPrice(ctx context.Context, field graphql.CollectedField) (ret func(ctx context.Context) graphql.Marshaler) { - fc, err := ec.fieldContext_Subscription_updatedPrice(ctx, field) +func (ec *executionContext) _Product_inStock(ctx context.Context, field graphql.CollectedField, obj *model.Product) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Product_inStock(ctx, field) if err != nil { - return nil + return graphql.Null } ctx = graphql.WithFieldContext(ctx, fc) defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) - ret = nil + ret = graphql.Null } }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Subscription().UpdatedPrice(rctx) + return obj.InStock, nil }) if err != nil { ec.Error(ctx, err) - return nil + return graphql.Null } if resTmp == nil { if !graphql.HasFieldError(ctx, fc) { ec.Errorf(ctx, "must not be null") } - return nil - } - return func(ctx context.Context) graphql.Marshaler { - select { - case res, ok := <-resTmp.(<-chan *model.Product): - if !ok { - return nil - } - return graphql.WriterFunc(func(w io.Writer) { - w.Write([]byte{'{'}) - graphql.MarshalString(field.Alias).MarshalGQL(w) - w.Write([]byte{':'}) - ec.marshalNProduct2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐProduct(ctx, field.Selections, res).MarshalGQL(w) - w.Write([]byte{'}'}) - }) - case <-ctx.Done(): - return nil - } + return graphql.Null } + res := resTmp.(int) + fc.Result = res + return ec.marshalNInt2int(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Subscription_updatedPrice(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Product_inStock(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ - Object: "Subscription", + Object: "Product", Field: field, - IsMethod: true, - IsResolver: true, + IsMethod: false, + IsResolver: false, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - switch field.Name { - case "upc": - return ec.fieldContext_Product_upc(ctx, field) - case "name": - return ec.fieldContext_Product_name(ctx, field) - case "price": - return ec.fieldContext_Product_price(ctx, field) - case "inStock": - return ec.fieldContext_Product_inStock(ctx, field) - } + return nil, errors.New("field of type Int does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _Query_topProducts(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_topProducts(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().TopProducts(rctx, fc.Args["first"].(*int)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]*model.Product) + fc.Result = res + return ec.marshalOProduct2ᚕᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐProduct(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query_topProducts(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "upc": + return ec.fieldContext_Product_upc(ctx, field) + case "name": + return ec.fieldContext_Product_name(ctx, field) + case "price": + return ec.fieldContext_Product_price(ctx, field) + case "inStock": + return ec.fieldContext_Product_inStock(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type Product", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query_topProducts_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Query_product(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_product(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().Product(rctx, fc.Args["upc"].(string)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*model.Product) + fc.Result = res + return ec.marshalOProduct2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐProduct(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query_product(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "upc": + return ec.fieldContext_Product_upc(ctx, field) + case "name": + return ec.fieldContext_Product_name(ctx, field) + case "price": + return ec.fieldContext_Product_price(ctx, field) + case "inStock": + return ec.fieldContext_Product_inStock(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type Product", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query_product_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Query_products(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_products(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().Products(rctx, fc.Args["upcs"].([]string)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]*model.Product) + fc.Result = res + return ec.marshalOProduct2ᚕᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐProduct(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query_products(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "upc": + return ec.fieldContext_Product_upc(ctx, field) + case "name": + return ec.fieldContext_Product_name(ctx, field) + case "price": + return ec.fieldContext_Product_price(ctx, field) + case "inStock": + return ec.fieldContext_Product_inStock(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type Product", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query_products_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Query__entities(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query__entities(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.__resolve_entities(ctx, fc.Args["representations"].([]map[string]any)), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]fedruntime.Entity) + fc.Result = res + return ec.marshalN_Entity2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋpluginᚋfederationᚋfedruntimeᚐEntity(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query__entities(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type _Entity does not have child fields") + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query__entities_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Query__service(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query__service(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.__resolve__service(ctx) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(fedruntime.Service) + fc.Result = res + return ec.marshalN_Service2githubᚗcomᚋ99designsᚋgqlgenᚋpluginᚋfederationᚋfedruntimeᚐService(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query__service(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "sdl": + return ec.fieldContext__Service_sdl(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type _Service", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _Query___type(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query___type(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.introspectType(fc.Args["name"].(string)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*introspection.Type) + fc.Result = res + return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query___type(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "kind": + return ec.fieldContext___Type_kind(ctx, field) + case "name": + return ec.fieldContext___Type_name(ctx, field) + case "description": + return ec.fieldContext___Type_description(ctx, field) + case "specifiedByURL": + return ec.fieldContext___Type_specifiedByURL(ctx, field) + case "fields": + return ec.fieldContext___Type_fields(ctx, field) + case "interfaces": + return ec.fieldContext___Type_interfaces(ctx, field) + case "possibleTypes": + return ec.fieldContext___Type_possibleTypes(ctx, field) + case "enumValues": + return ec.fieldContext___Type_enumValues(ctx, field) + case "inputFields": + return ec.fieldContext___Type_inputFields(ctx, field) + case "ofType": + return ec.fieldContext___Type_ofType(ctx, field) + case "isOneOf": + return ec.fieldContext___Type_isOneOf(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Query___type_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Query___schema(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query___schema(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.introspectSchema() + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.(*introspection.Schema) + fc.Result = res + return ec.marshalO__Schema2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐSchema(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query___schema(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "description": + return ec.fieldContext___Schema_description(ctx, field) + case "types": + return ec.fieldContext___Schema_types(ctx, field) + case "queryType": + return ec.fieldContext___Schema_queryType(ctx, field) + case "mutationType": + return ec.fieldContext___Schema_mutationType(ctx, field) + case "subscriptionType": + return ec.fieldContext___Schema_subscriptionType(ctx, field) + case "directives": + return ec.fieldContext___Schema_directives(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type __Schema", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _Subscription_updatedPrice(ctx context.Context, field graphql.CollectedField) (ret func(ctx context.Context) graphql.Marshaler) { + fc, err := ec.fieldContext_Subscription_updatedPrice(ctx, field) + if err != nil { + return nil + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Subscription().UpdatedPrice(rctx) + }) + if err != nil { + ec.Error(ctx, err) + return nil + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return nil + } + return func(ctx context.Context) graphql.Marshaler { + select { + case res, ok := <-resTmp.(<-chan *model.Product): + if !ok { + return nil + } + return graphql.WriterFunc(func(w io.Writer) { + w.Write([]byte{'{'}) + graphql.MarshalString(field.Alias).MarshalGQL(w) + w.Write([]byte{':'}) + ec.marshalNProduct2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐProduct(ctx, field.Selections, res).MarshalGQL(w) + w.Write([]byte{'}'}) + }) + case <-ctx.Done(): + return nil + } + } +} + +func (ec *executionContext) fieldContext_Subscription_updatedPrice(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Subscription", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "upc": + return ec.fieldContext_Product_upc(ctx, field) + case "name": + return ec.fieldContext_Product_name(ctx, field) + case "price": + return ec.fieldContext_Product_price(ctx, field) + case "inStock": + return ec.fieldContext_Product_inStock(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type Product", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _Subscription_updateProductPrice(ctx context.Context, field graphql.CollectedField) (ret func(ctx context.Context) graphql.Marshaler) { + fc, err := ec.fieldContext_Subscription_updateProductPrice(ctx, field) + if err != nil { + return nil + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Subscription().UpdateProductPrice(rctx, fc.Args["upc"].(string)) + }) + if err != nil { + ec.Error(ctx, err) + return nil + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return nil + } + return func(ctx context.Context) graphql.Marshaler { + select { + case res, ok := <-resTmp.(<-chan *model.Product): + if !ok { + return nil + } + return graphql.WriterFunc(func(w io.Writer) { + w.Write([]byte{'{'}) + graphql.MarshalString(field.Alias).MarshalGQL(w) + w.Write([]byte{':'}) + ec.marshalNProduct2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐProduct(ctx, field.Selections, res).MarshalGQL(w) + w.Write([]byte{'}'}) + }) + case <-ctx.Done(): + return nil + } + } +} + +func (ec *executionContext) fieldContext_Subscription_updateProductPrice(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Subscription", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "upc": + return ec.fieldContext_Product_upc(ctx, field) + case "name": + return ec.fieldContext_Product_name(ctx, field) + case "price": + return ec.fieldContext_Product_price(ctx, field) + case "inStock": + return ec.fieldContext_Product_inStock(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type Product", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Subscription_updateProductPrice_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Subscription_updatedPrices(ctx context.Context, field graphql.CollectedField) (ret func(ctx context.Context) graphql.Marshaler) { + fc, err := ec.fieldContext_Subscription_updatedPrices(ctx, field) + if err != nil { + return nil + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Subscription().UpdatedPrices(rctx, fc.Args["first"].(*int)) + }) + if err != nil { + ec.Error(ctx, err) + return nil + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return nil + } + return func(ctx context.Context) graphql.Marshaler { + select { + case res, ok := <-resTmp.(<-chan []*model.Product): + if !ok { + return nil + } + return graphql.WriterFunc(func(w io.Writer) { + w.Write([]byte{'{'}) + graphql.MarshalString(field.Alias).MarshalGQL(w) + w.Write([]byte{':'}) + ec.marshalNProduct2ᚕᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐProductᚄ(ctx, field.Selections, res).MarshalGQL(w) + w.Write([]byte{'}'}) + }) + case <-ctx.Done(): + return nil + } + } +} + +func (ec *executionContext) fieldContext_Subscription_updatedPrices(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Subscription", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "upc": + return ec.fieldContext_Product_upc(ctx, field) + case "name": + return ec.fieldContext_Product_name(ctx, field) + case "price": + return ec.fieldContext_Product_price(ctx, field) + case "inStock": + return ec.fieldContext_Product_inStock(ctx, field) + } return nil, fmt.Errorf("no field named %q was found under type Product", field.Name) }, } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Subscription_updatedPrices_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Subscription_updateProductPriceUnion(ctx context.Context, field graphql.CollectedField) (ret func(ctx context.Context) graphql.Marshaler) { + fc, err := ec.fieldContext_Subscription_updateProductPriceUnion(ctx, field) + if err != nil { + return nil + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Subscription().UpdateProductPriceUnion(rctx, fc.Args["upc"].(string)) + }) + if err != nil { + ec.Error(ctx, err) + return nil + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return nil + } + return func(ctx context.Context) graphql.Marshaler { + select { + case res, ok := <-resTmp.(<-chan model.ProductUpdate): + if !ok { + return nil + } + return graphql.WriterFunc(func(w io.Writer) { + w.Write([]byte{'{'}) + graphql.MarshalString(field.Alias).MarshalGQL(w) + w.Write([]byte{':'}) + ec.marshalNProductUpdate2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐProductUpdate(ctx, field.Selections, res).MarshalGQL(w) + w.Write([]byte{'}'}) + }) + case <-ctx.Done(): + return nil + } + } +} + +func (ec *executionContext) fieldContext_Subscription_updateProductPriceUnion(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Subscription", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type ProductUpdate does not have child fields") + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Subscription_updateProductPriceUnion_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Subscription_updateProductPriceInterface(ctx context.Context, field graphql.CollectedField) (ret func(ctx context.Context) graphql.Marshaler) { + fc, err := ec.fieldContext_Subscription_updateProductPriceInterface(ctx, field) + if err != nil { + return nil + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Subscription().UpdateProductPriceInterface(rctx, fc.Args["upc"].(string)) + }) + if err != nil { + ec.Error(ctx, err) + return nil + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return nil + } + return func(ctx context.Context) graphql.Marshaler { + select { + case res, ok := <-resTmp.(<-chan model.ProductInterface): + if !ok { + return nil + } + return graphql.WriterFunc(func(w io.Writer) { + w.Write([]byte{'{'}) + graphql.MarshalString(field.Alias).MarshalGQL(w) + w.Write([]byte{':'}) + ec.marshalNProductInterface2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐProductInterface(ctx, field.Selections, res).MarshalGQL(w) + w.Write([]byte{'}'}) + }) + case <-ctx.Done(): + return nil + } + } +} + +func (ec *executionContext) fieldContext_Subscription_updateProductPriceInterface(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Subscription", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("FieldContext.Child cannot be called on type INTERFACE") + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Subscription_updateProductPriceInterface_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + +func (ec *executionContext) _Subscription_updateDigitalProductPriceUnion(ctx context.Context, field graphql.CollectedField) (ret func(ctx context.Context) graphql.Marshaler) { + fc, err := ec.fieldContext_Subscription_updateDigitalProductPriceUnion(ctx, field) + if err != nil { + return nil + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Subscription().UpdateDigitalProductPriceUnion(rctx, fc.Args["upc"].(string)) + }) + if err != nil { + ec.Error(ctx, err) + return nil + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return nil + } + return func(ctx context.Context) graphql.Marshaler { + select { + case res, ok := <-resTmp.(<-chan model.ProductUpdate): + if !ok { + return nil + } + return graphql.WriterFunc(func(w io.Writer) { + w.Write([]byte{'{'}) + graphql.MarshalString(field.Alias).MarshalGQL(w) + w.Write([]byte{':'}) + ec.marshalNProductUpdate2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐProductUpdate(ctx, field.Selections, res).MarshalGQL(w) + w.Write([]byte{'}'}) + }) + case <-ctx.Done(): + return nil + } + } +} + +func (ec *executionContext) fieldContext_Subscription_updateDigitalProductPriceUnion(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Subscription", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type ProductUpdate does not have child fields") + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Subscription_updateDigitalProductPriceUnion_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } return fc, nil } -func (ec *executionContext) _Subscription_updateProductPrice(ctx context.Context, field graphql.CollectedField) (ret func(ctx context.Context) graphql.Marshaler) { - fc, err := ec.fieldContext_Subscription_updateProductPrice(ctx, field) +func (ec *executionContext) _Subscription_updateDigitalProductPriceInterface(ctx context.Context, field graphql.CollectedField) (ret func(ctx context.Context) graphql.Marshaler) { + fc, err := ec.fieldContext_Subscription_updateDigitalProductPriceInterface(ctx, field) if err != nil { return nil } @@ -1390,7 +2514,7 @@ func (ec *executionContext) _Subscription_updateProductPrice(ctx context.Context }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Subscription().UpdateProductPrice(rctx, fc.Args["upc"].(string)) + return ec.resolvers.Subscription().UpdateDigitalProductPriceInterface(rctx, fc.Args["upc"].(string)) }) if err != nil { ec.Error(ctx, err) @@ -1404,7 +2528,7 @@ func (ec *executionContext) _Subscription_updateProductPrice(ctx context.Context } return func(ctx context.Context) graphql.Marshaler { select { - case res, ok := <-resTmp.(<-chan *model.Product): + case res, ok := <-resTmp.(<-chan model.ProductInterface): if !ok { return nil } @@ -1412,7 +2536,7 @@ func (ec *executionContext) _Subscription_updateProductPrice(ctx context.Context w.Write([]byte{'{'}) graphql.MarshalString(field.Alias).MarshalGQL(w) w.Write([]byte{':'}) - ec.marshalNProduct2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐProduct(ctx, field.Selections, res).MarshalGQL(w) + ec.marshalNProductInterface2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐProductInterface(ctx, field.Selections, res).MarshalGQL(w) w.Write([]byte{'}'}) }) case <-ctx.Done(): @@ -1421,24 +2545,14 @@ func (ec *executionContext) _Subscription_updateProductPrice(ctx context.Context } } -func (ec *executionContext) fieldContext_Subscription_updateProductPrice(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Subscription_updateDigitalProductPriceInterface(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Subscription", Field: field, IsMethod: true, IsResolver: true, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - switch field.Name { - case "upc": - return ec.fieldContext_Product_upc(ctx, field) - case "name": - return ec.fieldContext_Product_name(ctx, field) - case "price": - return ec.fieldContext_Product_price(ctx, field) - case "inStock": - return ec.fieldContext_Product_inStock(ctx, field) - } - return nil, fmt.Errorf("no field named %q was found under type Product", field.Name) + return nil, errors.New("FieldContext.Child cannot be called on type INTERFACE") }, } defer func() { @@ -1448,7 +2562,7 @@ func (ec *executionContext) fieldContext_Subscription_updateProductPrice(ctx con } }() ctx = graphql.WithFieldContext(ctx, fc) - if fc.Args, err = ec.field_Subscription_updateProductPrice_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + if fc.Args, err = ec.field_Subscription_updateDigitalProductPriceInterface_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { ec.Error(ctx, err) return fc, err } @@ -3451,6 +4565,52 @@ func (ec *executionContext) fieldContext___Type_isOneOf(_ context.Context, field // region ************************** interface.gotpl *************************** +func (ec *executionContext) _ProductInterface(ctx context.Context, sel ast.SelectionSet, obj model.ProductInterface) graphql.Marshaler { + switch obj := (obj).(type) { + case nil: + return graphql.Null + case model.Product: + return ec._Product(ctx, sel, &obj) + case *model.Product: + if obj == nil { + return graphql.Null + } + return ec._Product(ctx, sel, obj) + case model.DigitalProduct: + return ec._DigitalProduct(ctx, sel, &obj) + case *model.DigitalProduct: + if obj == nil { + return graphql.Null + } + return ec._DigitalProduct(ctx, sel, obj) + default: + panic(fmt.Errorf("unexpected type %T", obj)) + } +} + +func (ec *executionContext) _ProductUpdate(ctx context.Context, sel ast.SelectionSet, obj model.ProductUpdate) graphql.Marshaler { + switch obj := (obj).(type) { + case nil: + return graphql.Null + case model.Product: + return ec._Product(ctx, sel, &obj) + case *model.Product: + if obj == nil { + return graphql.Null + } + return ec._Product(ctx, sel, obj) + case model.DigitalProduct: + return ec._DigitalProduct(ctx, sel, &obj) + case *model.DigitalProduct: + if obj == nil { + return graphql.Null + } + return ec._DigitalProduct(ctx, sel, obj) + default: + panic(fmt.Errorf("unexpected type %T", obj)) + } +} + func (ec *executionContext) __Entity(ctx context.Context, sel ast.SelectionSet, obj fedruntime.Entity) graphql.Marshaler { switch obj := (obj).(type) { case nil: @@ -3462,6 +4622,13 @@ func (ec *executionContext) __Entity(ctx context.Context, sel ast.SelectionSet, return graphql.Null } return ec._Product(ctx, sel, obj) + case model.DigitalProduct: + return ec._DigitalProduct(ctx, sel, &obj) + case *model.DigitalProduct: + if obj == nil { + return graphql.Null + } + return ec._DigitalProduct(ctx, sel, obj) default: panic(fmt.Errorf("unexpected type %T", obj)) } @@ -3471,6 +4638,60 @@ func (ec *executionContext) __Entity(ctx context.Context, sel ast.SelectionSet, // region **************************** object.gotpl **************************** +var digitalProductImplementors = []string{"DigitalProduct", "ProductUpdate", "ProductInterface", "_Entity"} + +func (ec *executionContext) _DigitalProduct(ctx context.Context, sel ast.SelectionSet, obj *model.DigitalProduct) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, digitalProductImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("DigitalProduct") + case "upc": + out.Values[i] = ec._DigitalProduct_upc(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "name": + out.Values[i] = ec._DigitalProduct_name(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "price": + out.Values[i] = ec._DigitalProduct_price(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + case "downloadUrl": + out.Values[i] = ec._DigitalProduct_downloadUrl(ctx, field, obj) + if out.Values[i] == graphql.Null { + out.Invalids++ + } + default: + panic("unknown field " + strconv.Quote(field.Name)) + } + } + out.Dispatch(ctx) + if out.Invalids > 0 { + return graphql.Null + } + + atomic.AddInt32(&ec.deferred, int32(len(deferred))) + + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} + var entityImplementors = []string{"Entity"} func (ec *executionContext) _Entity(ctx context.Context, sel ast.SelectionSet) graphql.Marshaler { @@ -3490,6 +4711,28 @@ func (ec *executionContext) _Entity(ctx context.Context, sel ast.SelectionSet) g switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("Entity") + case "findDigitalProductByUpc": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Entity_findDigitalProductByUpc(ctx, field) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "findProductByUpc": field := field @@ -3581,7 +4824,7 @@ func (ec *executionContext) _Mutation(ctx context.Context, sel ast.SelectionSet) return out } -var productImplementors = []string{"Product", "_Entity"} +var productImplementors = []string{"Product", "ProductUpdate", "ProductInterface", "_Entity"} func (ec *executionContext) _Product(ctx context.Context, sel ast.SelectionSet, obj *model.Product) graphql.Marshaler { fields := graphql.CollectFields(ec.OperationContext, sel, productImplementors) @@ -3672,6 +4915,44 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "product": + field := field + + innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_product(ctx, field) + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "products": + field := field + + innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_products(ctx, field) + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "_entities": field := field @@ -3765,6 +5046,16 @@ func (ec *executionContext) _Subscription(ctx context.Context, sel ast.Selection return ec._Subscription_updatedPrice(ctx, fields[0]) case "updateProductPrice": return ec._Subscription_updateProductPrice(ctx, fields[0]) + case "updatedPrices": + return ec._Subscription_updatedPrices(ctx, fields[0]) + case "updateProductPriceUnion": + return ec._Subscription_updateProductPriceUnion(ctx, fields[0]) + case "updateProductPriceInterface": + return ec._Subscription_updateProductPriceInterface(ctx, fields[0]) + case "updateDigitalProductPriceUnion": + return ec._Subscription_updateDigitalProductPriceUnion(ctx, fields[0]) + case "updateDigitalProductPriceInterface": + return ec._Subscription_updateDigitalProductPriceInterface(ctx, fields[0]) default: panic("unknown field " + strconv.Quote(fields[0].Name)) } @@ -4157,6 +5448,20 @@ func (ec *executionContext) marshalNBoolean2bool(ctx context.Context, sel ast.Se return res } +func (ec *executionContext) marshalNDigitalProduct2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐDigitalProduct(ctx context.Context, sel ast.SelectionSet, v model.DigitalProduct) graphql.Marshaler { + return ec._DigitalProduct(ctx, sel, &v) +} + +func (ec *executionContext) marshalNDigitalProduct2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐDigitalProduct(ctx context.Context, sel ast.SelectionSet, v *model.DigitalProduct) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._DigitalProduct(ctx, sel, v) +} + func (ec *executionContext) unmarshalNInt2int(ctx context.Context, v any) (int, error) { res, err := graphql.UnmarshalInt(v) return res, graphql.ErrorOnPath(ctx, err) @@ -4177,6 +5482,50 @@ func (ec *executionContext) marshalNProduct2githubᚗcomᚋwundergraphᚋgraphql return ec._Product(ctx, sel, &v) } +func (ec *executionContext) marshalNProduct2ᚕᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐProductᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Product) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNProduct2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐProduct(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + func (ec *executionContext) marshalNProduct2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐProduct(ctx context.Context, sel ast.SelectionSet, v *model.Product) graphql.Marshaler { if v == nil { if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { @@ -4187,6 +5536,26 @@ func (ec *executionContext) marshalNProduct2ᚖgithubᚗcomᚋwundergraphᚋgrap return ec._Product(ctx, sel, v) } +func (ec *executionContext) marshalNProductInterface2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐProductInterface(ctx context.Context, sel ast.SelectionSet, v model.ProductInterface) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._ProductInterface(ctx, sel, v) +} + +func (ec *executionContext) marshalNProductUpdate2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋproductsᚋgraphᚋmodelᚐProductUpdate(ctx context.Context, sel ast.SelectionSet, v model.ProductUpdate) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._ProductUpdate(ctx, sel, v) +} + func (ec *executionContext) unmarshalNString2string(ctx context.Context, v any) (string, error) { res, err := graphql.UnmarshalString(v) return res, graphql.ErrorOnPath(ctx, err) @@ -4203,6 +5572,36 @@ func (ec *executionContext) marshalNString2string(ctx context.Context, sel ast.S return res } +func (ec *executionContext) unmarshalNString2ᚕstringᚄ(ctx context.Context, v any) ([]string, error) { + var vSlice []any + vSlice = graphql.CoerceList(v) + var err error + res := make([]string, len(vSlice)) + for i := range vSlice { + ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i)) + res[i], err = ec.unmarshalNString2string(ctx, vSlice[i]) + if err != nil { + return nil, err + } + } + return res, nil +} + +func (ec *executionContext) marshalNString2ᚕstringᚄ(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + for i := range v { + ret[i] = ec.marshalNString2string(ctx, sel, v[i]) + } + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + func (ec *executionContext) unmarshalN_Any2map(ctx context.Context, v any) (map[string]any, error) { res, err := graphql.UnmarshalMap(v) return res, graphql.ErrorOnPath(ctx, err) diff --git a/execution/federationtesting/products/graph/handler.go b/execution/federationtesting/products/graph/handler.go index a9333740b1..135f261aca 100644 --- a/execution/federationtesting/products/graph/handler.go +++ b/execution/federationtesting/products/graph/handler.go @@ -16,12 +16,11 @@ import ( "github.com/wundergraph/graphql-go-tools/execution/federationtesting/products/graph/generated" ) -var websocketConnections atomic.Uint32 - type EndpointOptions struct { - EnableDebug bool - EnableRandomness bool - OverrideUpdateInterval time.Duration + EnableDebug bool + EnableRandomness bool + OverrideUpdateInterval time.Duration + EnableManualSubscriptionEvents bool } var TestOptions = EndpointOptions{ @@ -30,23 +29,61 @@ var TestOptions = EndpointOptions{ OverrideUpdateInterval: 50 * time.Millisecond, } -func GraphQLEndpointHandler(opts EndpointOptions) http.Handler { - websocketConnections.Store(0) +// Endpoint holds the GraphQL handler and its per-instance websocket connection counter. +type Endpoint struct { + handler http.Handler + websocketConnections atomic.Uint32 + subscriptionEvents *ManualSubscriptionEventSource +} + +// ServeHTTP delegates to the underlying gqlgen handler. +func (e *Endpoint) ServeHTTP(w http.ResponseWriter, r *http.Request) { + e.handler.ServeHTTP(w, r) +} + +// WebsocketConnectionsHandler returns an HTTP handler that reports the current +// websocket connection count for this endpoint instance. +func (e *Endpoint) WebsocketConnectionsHandler() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + response := map[string]uint32{ + "websocket_connections": e.websocketConnections.Load(), + } + + responseBytes, err := json.Marshal(response) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write([]byte("error")) + return + } + + _, _ = w.Write(responseBytes) + } +} +func GraphQLEndpointHandler(opts EndpointOptions) *Endpoint { updateInterval := time.Second if opts.OverrideUpdateInterval > 0 { updateInterval = opts.OverrideUpdateInterval } + var subscriptionEvents *ManualSubscriptionEventSource + if opts.EnableManualSubscriptionEvents { + subscriptionEvents = NewManualSubscriptionEventSource() + } resolver := &Resolver{ - products: newProducts(), - randomnessEnabled: opts.EnableRandomness, - minPrice: 10, - maxPrice: 1499, - currentPrice: 10, - updateInterval: updateInterval, + products: newProducts(), + extraProducts: newExtraProducts(), + digitalProducts: newDigitalProducts(), + randomnessEnabled: opts.EnableRandomness, + minPrice: 10, + maxPrice: 1499, + currentPrice: 10, + updateInterval: updateInterval, + subscriptionEvents: subscriptionEvents, } + endpoint := &Endpoint{subscriptionEvents: subscriptionEvents} + srv := handler.New(generated.NewExecutableSchema(generated.Config{Resolvers: resolver})) srv.AddTransport(transport.POST{}) @@ -58,10 +95,10 @@ func GraphQLEndpointHandler(opts EndpointOptions) http.Handler { }, }, InitFunc: func(ctx context.Context, ip transport.InitPayload) (context.Context, *transport.InitPayload, error) { - websocketConnections.Inc() + endpoint.websocketConnections.Inc() go func(ctx context.Context) { <-ctx.Done() - websocketConnections.Dec() + endpoint.websocketConnections.Dec() }(ctx) return ctx, nil, nil }, @@ -72,20 +109,11 @@ func GraphQLEndpointHandler(opts EndpointOptions) http.Handler { srv.Use(&debug.Tracer{}) } - return srv + endpoint.handler = srv + return endpoint } -func WebsocketConnectionsHandler(w http.ResponseWriter, r *http.Request) { - response := map[string]uint32{ - "websocket_connections": websocketConnections.Load(), - } - - responseBytes, err := json.Marshal(response) - if err != nil { - w.WriteHeader(http.StatusBadRequest) - _, _ = w.Write([]byte("error")) - return - } - - _, _ = w.Write(responseBytes) +// SubscriptionEvents returns the manual event source for active subscriptions. +func (e *Endpoint) SubscriptionEvents() *ManualSubscriptionEventSource { + return e.subscriptionEvents } diff --git a/execution/federationtesting/products/graph/manual_subscription_events.go b/execution/federationtesting/products/graph/manual_subscription_events.go new file mode 100644 index 0000000000..ab2705284e --- /dev/null +++ b/execution/federationtesting/products/graph/manual_subscription_events.go @@ -0,0 +1,47 @@ +package graph + +import ( + "context" +) + +// ManualSubscriptionEventSource registers one explicit emit handle per active +// subscription so tests can control event delivery deterministically. +type ManualSubscriptionEventSource struct { + registered chan *ManualSubscriptionHandle +} + +// ManualSubscriptionHandle is the per-subscription trigger used by tests. +type ManualSubscriptionHandle struct { + events chan struct{} +} + +func NewManualSubscriptionEventSource() *ManualSubscriptionEventSource { + return &ManualSubscriptionEventSource{ + registered: make(chan *ManualSubscriptionHandle, 64), + } +} + +func (s *ManualSubscriptionEventSource) NewSubscription() *ManualSubscriptionHandle { + handle := &ManualSubscriptionHandle{ + events: make(chan struct{}, 16), + } + s.registered <- handle + return handle +} + +func (s *ManualSubscriptionEventSource) NextSubscription(ctx context.Context) (*ManualSubscriptionHandle, error) { + select { + case handle := <-s.registered: + return handle, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (h *ManualSubscriptionHandle) Emit() { + h.events <- struct{}{} +} + +func (h *ManualSubscriptionHandle) Events() <-chan struct{} { + return h.events +} diff --git a/execution/federationtesting/products/graph/manual_subscription_events_test.go b/execution/federationtesting/products/graph/manual_subscription_events_test.go new file mode 100644 index 0000000000..938e3da1b6 --- /dev/null +++ b/execution/federationtesting/products/graph/manual_subscription_events_test.go @@ -0,0 +1,59 @@ +package graph + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestManualSubscriptionEventSource_RegistersIndependentEmitHandles(t *testing.T) { + source := NewManualSubscriptionEventSource() + + first := source.NewSubscription() + second := source.NewSubscription() + + require.NotNil(t, first) + require.NotNil(t, second) + assert.NotSame(t, first, second) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + registeredFirst, err := source.NextSubscription(ctx) + require.NoError(t, err) + registeredSecond, err := source.NextSubscription(ctx) + require.NoError(t, err) + + assert.Same(t, first, registeredFirst) + assert.Same(t, second, registeredSecond) + + select { + case <-first.Events(): + t.Fatal("first subscription emitted before explicit trigger") + default: + } + + select { + case <-second.Events(): + t.Fatal("second subscription emitted before explicit trigger") + default: + } + + first.Emit() + second.Emit() + + select { + case <-first.Events(): + case <-time.After(time.Second): + t.Fatal("expected first subscription event after explicit trigger") + } + + select { + case <-second.Events(): + case <-time.After(time.Second): + t.Fatal("expected second subscription event after explicit trigger") + } +} diff --git a/execution/federationtesting/products/graph/model/models_gen.go b/execution/federationtesting/products/graph/model/models_gen.go index a060b76cb1..54157f36cc 100644 --- a/execution/federationtesting/products/graph/model/models_gen.go +++ b/execution/federationtesting/products/graph/model/models_gen.go @@ -2,6 +2,33 @@ package model +type ProductInterface interface { + IsProductInterface() + GetUpc() string + GetName() string + GetPrice() int +} + +type ProductUpdate interface { + IsProductUpdate() +} + +type DigitalProduct struct { + Upc string `json:"upc"` + Name string `json:"name"` + Price int `json:"price"` + DownloadURL string `json:"downloadUrl"` +} + +func (DigitalProduct) IsProductUpdate() {} + +func (DigitalProduct) IsProductInterface() {} +func (this DigitalProduct) GetUpc() string { return this.Upc } +func (this DigitalProduct) GetName() string { return this.Name } +func (this DigitalProduct) GetPrice() int { return this.Price } + +func (DigitalProduct) IsEntity() {} + type Mutation struct { } @@ -12,6 +39,13 @@ type Product struct { InStock int `json:"inStock"` } +func (Product) IsProductUpdate() {} + +func (Product) IsProductInterface() {} +func (this Product) GetUpc() string { return this.Upc } +func (this Product) GetName() string { return this.Name } +func (this Product) GetPrice() int { return this.Price } + func (Product) IsEntity() {} type Query struct { diff --git a/execution/federationtesting/products/graph/products.go b/execution/federationtesting/products/graph/products.go index f76f51d22e..8534ab884b 100644 --- a/execution/federationtesting/products/graph/products.go +++ b/execution/federationtesting/products/graph/products.go @@ -26,3 +26,26 @@ func newProducts() []*model.Product { }, } } + +// newExtraProducts returns products not listed in TopProducts but findable by UPC. +func newExtraProducts() []*model.Product { + return []*model.Product{ + { + Upc: "top-4", + Name: "Bowler", + Price: 64, + InStock: 12, + }, + } +} + +func newDigitalProducts() []*model.DigitalProduct { + return []*model.DigitalProduct{ + { + Upc: "digital-1", + Name: "eBook: GraphQL in Action", + Price: 29, + DownloadURL: "https://example.com/downloads/graphql-in-action", + }, + } +} diff --git a/execution/federationtesting/products/graph/resolver.go b/execution/federationtesting/products/graph/resolver.go index dcb75a979a..aaaae07e66 100644 --- a/execution/federationtesting/products/graph/resolver.go +++ b/execution/federationtesting/products/graph/resolver.go @@ -11,11 +11,39 @@ import ( ) type Resolver struct { - products []*model.Product - randomnessEnabled bool - minPrice int - maxPrice int - currentPrice int - updateInterval time.Duration - priceMu sync.Mutex + products []*model.Product + extraProducts []*model.Product + digitalProducts []*model.DigitalProduct + randomnessEnabled bool + minPrice int + maxPrice int + currentPrice int + updateInterval time.Duration + priceMu sync.Mutex + subscriptionEvents *ManualSubscriptionEventSource +} + +// findProduct searches both products and extraProducts by UPC. +func (r *Resolver) findProduct(upc string) *model.Product { + for _, p := range r.products { + if p.Upc == upc { + return p + } + } + for _, p := range r.extraProducts { + if p.Upc == upc { + return p + } + } + return nil +} + +// findDigitalProduct searches digitalProducts by UPC. +func (r *Resolver) findDigitalProduct(upc string) *model.DigitalProduct { + for _, d := range r.digitalProducts { + if d.Upc == upc { + return d + } + } + return nil } diff --git a/execution/federationtesting/products/graph/schema.graphqls b/execution/federationtesting/products/graph/schema.graphqls index f201a8c9af..ee5bbf3cf4 100644 --- a/execution/federationtesting/products/graph/schema.graphqls +++ b/execution/federationtesting/products/graph/schema.graphqls @@ -1,5 +1,7 @@ type Query { topProducts(first: Int = 5): [Product] + product(upc: String!): Product + products(upcs: [String!]!): [Product] } type Mutation { @@ -9,11 +11,31 @@ type Mutation { type Subscription { updatedPrice: Product! updateProductPrice(upc: String!): Product! + updatedPrices(first: Int = 3): [Product!]! + updateProductPriceUnion(upc: String!): ProductUpdate! + updateProductPriceInterface(upc: String!): ProductInterface! + updateDigitalProductPriceUnion(upc: String!): ProductUpdate! + updateDigitalProductPriceInterface(upc: String!): ProductInterface! } -type Product @key(fields: "upc") { +union ProductUpdate = Product | DigitalProduct + +interface ProductInterface { + upc: String! + name: String! + price: Int! +} + +type Product implements ProductInterface @key(fields: "upc") { upc: String! name: String! price: Int! inStock: Int! } + +type DigitalProduct implements ProductInterface @key(fields: "upc") { + upc: String! + name: String! + price: Int! + downloadUrl: String! +} diff --git a/execution/federationtesting/products/graph/schema.resolvers.go b/execution/federationtesting/products/graph/schema.resolvers.go index 3a9114c697..d1e51c3fee 100644 --- a/execution/federationtesting/products/graph/schema.resolvers.go +++ b/execution/federationtesting/products/graph/schema.resolvers.go @@ -31,18 +31,74 @@ func (r *queryResolver) TopProducts(ctx context.Context, first *int) ([]*model.P return r.products[:end], nil } +// Product is the resolver for the product field. +func (r *queryResolver) Product(ctx context.Context, upc string) (*model.Product, error) { + return r.findProduct(upc), nil +} + +// Products is the resolver for the products field. +// Returns products in the same order as the input UPC list. +// Unknown UPCs produce null at the corresponding position. +func (r *queryResolver) Products(ctx context.Context, upcs []string) ([]*model.Product, error) { + result := make([]*model.Product, len(upcs)) + for i, upc := range upcs { + result[i] = r.findProduct(upc) + } + return result, nil +} + // UpdatedPrice is the resolver for the updatedPrice field. func (r *subscriptionResolver) UpdatedPrice(ctx context.Context) (<-chan *model.Product, error) { if len(r.products) == 0 { return nil, fmt.Errorf("no products configured") } updatedPrice := make(chan *model.Product) + trigger := r.nextSubscriptionHandle() + if trigger == nil { + go func() { + defer close(updatedPrice) + for { + select { + case <-ctx.Done(): + return + case <-time.After(r.updateInterval): + product := r.products[len(r.products)-1] + if r.randomnessEnabled { + if len(r.products) > 1 { + product = r.products[rand.Intn(len(r.products)-1)] + } + p := *product + p.Price = rand.Intn(r.maxPrice-r.minPrice+1) + r.minPrice + select { + case updatedPrice <- &p: + case <-ctx.Done(): + return + } + continue + } + + r.priceMu.Lock() + p := *product + p.Price = r.currentPrice + r.currentPrice++ + r.priceMu.Unlock() + select { + case updatedPrice <- &p: + case <-ctx.Done(): + return + } + } + } + }() + return updatedPrice, nil + } go func() { + defer close(updatedPrice) for { select { case <-ctx.Done(): return - case <-time.After(r.updateInterval): + case <-trigger.Events(): product := r.products[len(r.products)-1] if r.randomnessEnabled { if len(r.products) > 1 { @@ -50,7 +106,11 @@ func (r *subscriptionResolver) UpdatedPrice(ctx context.Context) (<-chan *model. } p := *product p.Price = rand.Intn(r.maxPrice-r.minPrice+1) + r.minPrice - updatedPrice <- &p + select { + case updatedPrice <- &p: + case <-ctx.Done(): + return + } continue } @@ -59,7 +119,11 @@ func (r *subscriptionResolver) UpdatedPrice(ctx context.Context) (<-chan *model. p.Price = r.currentPrice r.currentPrice++ r.priceMu.Unlock() - updatedPrice <- &p + select { + case updatedPrice <- &p: + case <-ctx.Done(): + return + } } } }() @@ -69,37 +133,342 @@ func (r *subscriptionResolver) UpdatedPrice(ctx context.Context) (<-chan *model. // UpdateProductPrice is the resolver for the updateProductPrice field. func (r *subscriptionResolver) UpdateProductPrice(ctx context.Context, upc string) (<-chan *model.Product, error) { updatedPrice := make(chan *model.Product) - var product *model.Product + product := r.findProduct(upc) + + if product == nil { + return nil, fmt.Errorf("unknown product upc: %s", upc) + } + + trigger := r.nextSubscriptionHandle() + if trigger == nil { + go func() { + defer close(updatedPrice) + var num int + + for { + num++ - for _, hat := range r.products { - if hat.Upc == upc { - product = hat - break + select { + case <-ctx.Done(): + return + case <-time.After(100 * time.Millisecond): + p := *product + p.Price = num + select { + case updatedPrice <- &p: + case <-ctx.Done(): + return + } + } + } + }() + + return updatedPrice, nil + } + go func() { + defer close(updatedPrice) + var num int + + for { + select { + case <-ctx.Done(): + return + case <-trigger.Events(): + num++ + p := *product + p.Price = num + select { + case updatedPrice <- &p: + case <-ctx.Done(): + return + } + } } + }() + + return updatedPrice, nil +} + +// UpdatedPrices is the resolver for the updatedPrices field. +func (r *subscriptionResolver) UpdatedPrices(ctx context.Context, first *int) (<-chan []*model.Product, error) { + limit := 3 + if first != nil && *first >= 0 { + limit = *first + } + if limit > len(r.products) { + limit = len(r.products) } + snapshot := make([]*model.Product, limit) + for i := 0; i < limit; i++ { + h := *r.products[i] + snapshot[i] = &h + } + + ch := make(chan []*model.Product) + trigger := r.nextSubscriptionHandle() + if trigger == nil { + go func() { + defer close(ch) + var num int + for { + num++ + select { + case <-ctx.Done(): + return + case <-time.After(100 * time.Millisecond): + batch := make([]*model.Product, limit) + for i := 0; i < limit; i++ { + p := *snapshot[i] + p.Price = num + i + batch[i] = &p + } + select { + case ch <- batch: + case <-ctx.Done(): + return + } + } + } + }() + return ch, nil + } + go func() { + defer close(ch) + var num int + for { + select { + case <-ctx.Done(): + return + case <-trigger.Events(): + num++ + batch := make([]*model.Product, limit) + for i := 0; i < limit; i++ { + p := *snapshot[i] + p.Price = num + i + batch[i] = &p + } + select { + case ch <- batch: + case <-ctx.Done(): + return + } + } + } + }() + return ch, nil +} + +// UpdateProductPriceUnion is the resolver for the updateProductPriceUnion field. +func (r *subscriptionResolver) UpdateProductPriceUnion(ctx context.Context, upc string) (<-chan model.ProductUpdate, error) { + product := r.findProduct(upc) if product == nil { return nil, fmt.Errorf("unknown product upc: %s", upc) } + ch := make(chan model.ProductUpdate) + trigger := r.nextSubscriptionHandle() + if trigger == nil { + go func() { + defer close(ch) + var num int + for { + num++ + select { + case <-ctx.Done(): + return + case <-time.After(100 * time.Millisecond): + p := *product + p.Price = num + select { + case ch <- &p: + case <-ctx.Done(): + return + } + } + } + }() + return ch, nil + } go func() { + defer close(ch) var num int - for { - num++ + select { + case <-ctx.Done(): + return + case <-trigger.Events(): + num++ + p := *product + p.Price = num + select { + case ch <- &p: + case <-ctx.Done(): + return + } + } + } + }() + return ch, nil +} + +// UpdateProductPriceInterface is the resolver for the updateProductPriceInterface field. +func (r *subscriptionResolver) UpdateProductPriceInterface(ctx context.Context, upc string) (<-chan model.ProductInterface, error) { + product := r.findProduct(upc) + if product == nil { + return nil, fmt.Errorf("unknown product upc: %s", upc) + } + ch := make(chan model.ProductInterface) + trigger := r.nextSubscriptionHandle() + if trigger == nil { + go func() { + defer close(ch) + var num int + for { + num++ + select { + case <-ctx.Done(): + return + case <-time.After(100 * time.Millisecond): + p := *product + p.Price = num + select { + case ch <- &p: + case <-ctx.Done(): + return + } + } + } + }() + return ch, nil + } + go func() { + defer close(ch) + var num int + for { select { case <-ctx.Done(): return - case <-time.After(100 * time.Millisecond): + case <-trigger.Events(): + num++ p := *product p.Price = num - updatedPrice <- &p + select { + case ch <- &p: + case <-ctx.Done(): + return + } } } }() + return ch, nil +} - return updatedPrice, nil +// UpdateDigitalProductPriceUnion is the resolver for the updateDigitalProductPriceUnion field. +func (r *subscriptionResolver) UpdateDigitalProductPriceUnion(ctx context.Context, upc string) (<-chan model.ProductUpdate, error) { + dp := r.findDigitalProduct(upc) + if dp == nil { + return nil, fmt.Errorf("unknown digital product upc: %s", upc) + } + + ch := make(chan model.ProductUpdate) + trigger := r.nextSubscriptionHandle() + if trigger == nil { + go func() { + defer close(ch) + var num int + for { + num++ + select { + case <-ctx.Done(): + return + case <-time.After(100 * time.Millisecond): + p := *dp + p.Price = num + select { + case ch <- &p: + case <-ctx.Done(): + return + } + } + } + }() + return ch, nil + } + go func() { + defer close(ch) + var num int + for { + select { + case <-ctx.Done(): + return + case <-trigger.Events(): + num++ + p := *dp + p.Price = num + select { + case ch <- &p: + case <-ctx.Done(): + return + } + } + } + }() + return ch, nil +} + +// UpdateDigitalProductPriceInterface is the resolver for the updateDigitalProductPriceInterface field. +func (r *subscriptionResolver) UpdateDigitalProductPriceInterface(ctx context.Context, upc string) (<-chan model.ProductInterface, error) { + dp := r.findDigitalProduct(upc) + if dp == nil { + return nil, fmt.Errorf("unknown digital product upc: %s", upc) + } + + ch := make(chan model.ProductInterface) + trigger := r.nextSubscriptionHandle() + if trigger == nil { + go func() { + defer close(ch) + var num int + for { + num++ + select { + case <-ctx.Done(): + return + case <-time.After(100 * time.Millisecond): + p := *dp + p.Price = num + select { + case ch <- &p: + case <-ctx.Done(): + return + } + } + } + }() + return ch, nil + } + go func() { + defer close(ch) + var num int + for { + select { + case <-ctx.Done(): + return + case <-trigger.Events(): + num++ + p := *dp + p.Price = num + select { + case ch <- &p: + case <-ctx.Done(): + return + } + } + } + }() + return ch, nil } // Mutation returns generated.MutationResolver implementation. @@ -114,3 +483,10 @@ func (r *Resolver) Subscription() generated.SubscriptionResolver { return &subsc type mutationResolver struct{ *Resolver } type queryResolver struct{ *Resolver } type subscriptionResolver struct{ *Resolver } + +func (r *subscriptionResolver) nextSubscriptionHandle() *ManualSubscriptionHandle { + if r.subscriptionEvents == nil { + return nil + } + return r.subscriptionEvents.NewSubscription() +} diff --git a/execution/federationtesting/products/handler.go b/execution/federationtesting/products/handler.go index f9176ab947..d2cb0216ba 100644 --- a/execution/federationtesting/products/handler.go +++ b/execution/federationtesting/products/handler.go @@ -10,8 +10,9 @@ import ( func Handler() http.Handler { mux := http.NewServeMux() - mux.Handle("/", graph.GraphQLEndpointHandler(graph.EndpointOptions{EnableDebug: true})) - mux.HandleFunc("/websocket_connections", graph.WebsocketConnectionsHandler) + endpoint := graph.GraphQLEndpointHandler(graph.EndpointOptions{EnableDebug: true}) + mux.Handle("/", endpoint) + mux.HandleFunc("/websocket_connections", endpoint.WebsocketConnectionsHandler()) return mux } diff --git a/execution/federationtesting/reviews/gqlgen.yml b/execution/federationtesting/reviews/gqlgen.yml index 4d43729803..77569b3775 100644 --- a/execution/federationtesting/reviews/gqlgen.yml +++ b/execution/federationtesting/reviews/gqlgen.yml @@ -53,3 +53,7 @@ models: - github.com/99designs/gqlgen/graphql.Int - github.com/99designs/gqlgen/graphql.Int64 - github.com/99designs/gqlgen/graphql.Int32 + CacheEntity: + fields: + nested: + resolver: true diff --git a/execution/federationtesting/reviews/graph/entity.resolvers.go b/execution/federationtesting/reviews/graph/entity.resolvers.go index d9ecd7d05d..b37e7ba686 100644 --- a/execution/federationtesting/reviews/graph/entity.resolvers.go +++ b/execution/federationtesting/reviews/graph/entity.resolvers.go @@ -11,6 +11,12 @@ import ( "github.com/wundergraph/graphql-go-tools/execution/federationtesting/reviews/graph/model" ) +// FindCacheEntityByID is the resolver for the findCacheEntityByID field. +// Reviews subgraph only needs the ID for the entity reference — accounts owns the data. +func (r *entityResolver) FindCacheEntityByID(ctx context.Context, id string) (*model.CacheEntity, error) { + return &model.CacheEntity{ID: id}, nil +} + // FindProductByUpc is the resolver for the findProductByUpc field. func (r *entityResolver) FindProductByUpc(ctx context.Context, upc string) (*model.Product, error) { return &model.Product{ diff --git a/execution/federationtesting/reviews/graph/generated/federation.go b/execution/federationtesting/reviews/graph/generated/federation.go index 29af1e1c05..56c0a0917c 100644 --- a/execution/federationtesting/reviews/graph/generated/federation.go +++ b/execution/federationtesting/reviews/graph/generated/federation.go @@ -153,6 +153,29 @@ func (ec *executionContext) resolveEntity( }() switch typeName { + case "CacheEntity": + resolverName, err := entityResolverNameForCacheEntity(ctx, rep) + if err != nil { + return nil, fmt.Errorf(`finding resolver for Entity "CacheEntity": %w`, err) + } + switch resolverName { + + case "findCacheEntityByID": + id0, err := ec.unmarshalNID2string(ctx, rep["id"]) + if err != nil { + return nil, fmt.Errorf(`unmarshalling param 0 for findCacheEntityByID(): %w`, err) + } + entity, err := ec.resolvers.Entity().FindCacheEntityByID(ctx, id0) + if err != nil { + return nil, fmt.Errorf(`resolving Entity "CacheEntity": %w`, err) + } + + entity.A, err = ec.unmarshalNString2string(ctx, rep["a"]) + if err != nil { + return nil, err + } + return entity, nil + } case "Product": resolverName, err := entityResolverNameForProduct(ctx, rep) if err != nil { @@ -189,6 +212,14 @@ func (ec *executionContext) resolveEntity( return nil, fmt.Errorf(`resolving Entity "User": %w`, err) } + entity.Username, err = ec.unmarshalNString2string(ctx, rep["username"]) + if err != nil { + return nil, err + } + entity.Username, err = ec.unmarshalNString2string(ctx, rep["username"]) + if err != nil { + return nil, err + } return entity, nil } @@ -217,6 +248,41 @@ func (ec *executionContext) resolveManyEntities( } } +func entityResolverNameForCacheEntity(ctx context.Context, rep EntityRepresentation) (string, error) { + // we collect errors because a later entity resolver may work fine + // when an entity has multiple keys + entityResolverErrs := []error{} + for { + var ( + m EntityRepresentation + val any + ok bool + ) + _ = val + // if all of the KeyFields values for this resolver are null, + // we shouldn't use use it + allNull := true + m = rep + val, ok = m["id"] + if !ok { + entityResolverErrs = append(entityResolverErrs, + fmt.Errorf("%w due to missing Key Field \"id\" for CacheEntity", ErrTypeNotFound)) + break + } + if allNull { + allNull = val == nil + } + if allNull { + entityResolverErrs = append(entityResolverErrs, + fmt.Errorf("%w due to all null value KeyFields for CacheEntity", ErrTypeNotFound)) + break + } + return "findCacheEntityByID", nil + } + return "", fmt.Errorf("%w for CacheEntity due to %v", ErrTypeNotFound, + errors.Join(entityResolverErrs...).Error()) +} + func entityResolverNameForProduct(ctx context.Context, rep EntityRepresentation) (string, error) { // we collect errors because a later entity resolver may work fine // when an entity has multiple keys diff --git a/execution/federationtesting/reviews/graph/generated/generated.go b/execution/federationtesting/reviews/graph/generated/generated.go index 4051570a0d..74cf710824 100644 --- a/execution/federationtesting/reviews/graph/generated/generated.go +++ b/execution/federationtesting/reviews/graph/generated/generated.go @@ -39,6 +39,7 @@ type Config struct { } type ResolverRoot interface { + CacheEntity() CacheEntityResolver Entity() EntityResolver Mutation() MutationResolver Product() ProductResolver @@ -51,6 +52,12 @@ type DirectiveRoot struct { } type ComplexityRoot struct { + CacheEntity struct { + A func(childComplexity int) int + ID func(childComplexity int) int + Nested func(childComplexity int) int + } + Cat struct { Name func(childComplexity int) int } @@ -61,8 +68,9 @@ type ComplexityRoot struct { } Entity struct { - FindProductByUpc func(childComplexity int, upc string) int - FindUserByID func(childComplexity int, id string) int + FindCacheEntityByID func(childComplexity int, id string) int + FindProductByUpc func(childComplexity int, upc string) int + FindUserByID func(childComplexity int, id string) int } Mutation struct { @@ -82,6 +90,8 @@ type ComplexityRoot struct { Query struct { Cat func(childComplexity int) int Me func(childComplexity int) int + ReviewWithError func(childComplexity int) int + TopReviews func(childComplexity int) int __resolve__service func(childComplexity int) int __resolve_entities func(childComplexity int, representations []map[string]any) int } @@ -99,18 +109,21 @@ type ComplexityRoot struct { } Review struct { - Attachments func(childComplexity int) int - Author func(childComplexity int) int - Body func(childComplexity int) int - Comment func(childComplexity int) int - Product func(childComplexity int) int + Attachments func(childComplexity int) int + Author func(childComplexity int) int + AuthorWithoutProvides func(childComplexity int) int + Body func(childComplexity int) int + Comment func(childComplexity int) int + Product func(childComplexity int) int } User struct { - ID func(childComplexity int) int - RealName func(childComplexity int) int - Reviews func(childComplexity int) int - Username func(childComplexity int) int + CoReviewers func(childComplexity int) int + ID func(childComplexity int) int + RealName func(childComplexity int) int + Reviews func(childComplexity int) int + SameUserReviewers func(childComplexity int) int + Username func(childComplexity int) int } Video struct { @@ -124,7 +137,11 @@ type ComplexityRoot struct { } } +type CacheEntityResolver interface { + Nested(ctx context.Context, obj *model.CacheEntity) (*model.CacheEntity, error) +} type EntityResolver interface { + FindCacheEntityByID(ctx context.Context, id string) (*model.CacheEntity, error) FindProductByUpc(ctx context.Context, upc string) (*model.Product, error) FindUserByID(ctx context.Context, id string) (*model.User, error) } @@ -137,15 +154,20 @@ type ProductResolver interface { type QueryResolver interface { Me(ctx context.Context) (*model.User, error) Cat(ctx context.Context) (*model.Cat, error) + ReviewWithError(ctx context.Context) (*model.Review, error) + TopReviews(ctx context.Context) ([]*model.Review, error) } type ReviewResolver interface { + AuthorWithoutProvides(ctx context.Context, obj *model.Review) (*model.User, error) + Attachments(ctx context.Context, obj *model.Review) ([]model.Attachment, error) Comment(ctx context.Context, obj *model.Review) (model.Comment, error) } type UserResolver interface { - Username(ctx context.Context, obj *model.User) (string, error) Reviews(ctx context.Context, obj *model.User) ([]*model.Review, error) RealName(ctx context.Context, obj *model.User) (string, error) + CoReviewers(ctx context.Context, obj *model.User) ([]*model.User, error) + SameUserReviewers(ctx context.Context, obj *model.User) ([]*model.User, error) } type executableSchema struct { @@ -167,6 +189,27 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin _ = ec switch typeName + "." + field { + case "CacheEntity.a": + if e.complexity.CacheEntity.A == nil { + break + } + + return e.complexity.CacheEntity.A(childComplexity), true + + case "CacheEntity.id": + if e.complexity.CacheEntity.ID == nil { + break + } + + return e.complexity.CacheEntity.ID(childComplexity), true + + case "CacheEntity.nested": + if e.complexity.CacheEntity.Nested == nil { + break + } + + return e.complexity.CacheEntity.Nested(childComplexity), true + case "Cat.name": if e.complexity.Cat.Name == nil { break @@ -188,6 +231,18 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.DetatchedQuestion.Upc(childComplexity), true + case "Entity.findCacheEntityByID": + if e.complexity.Entity.FindCacheEntityByID == nil { + break + } + + args, err := ec.field_Entity_findCacheEntityByID_args(ctx, rawArgs) + if err != nil { + return 0, false + } + + return e.complexity.Entity.FindCacheEntityByID(childComplexity, args["id"].(string)), true + case "Entity.findProductByUpc": if e.complexity.Entity.FindProductByUpc == nil { break @@ -266,6 +321,20 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.Query.Me(childComplexity), true + case "Query.reviewWithError": + if e.complexity.Query.ReviewWithError == nil { + break + } + + return e.complexity.Query.ReviewWithError(childComplexity), true + + case "Query.topReviews": + if e.complexity.Query.TopReviews == nil { + break + } + + return e.complexity.Query.TopReviews(childComplexity), true + case "Query._service": if e.complexity.Query.__resolve__service == nil { break @@ -341,6 +410,13 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.Review.Author(childComplexity), true + case "Review.authorWithoutProvides": + if e.complexity.Review.AuthorWithoutProvides == nil { + break + } + + return e.complexity.Review.AuthorWithoutProvides(childComplexity), true + case "Review.body": if e.complexity.Review.Body == nil { break @@ -362,6 +438,13 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.Review.Product(childComplexity), true + case "User.coReviewers": + if e.complexity.User.CoReviewers == nil { + break + } + + return e.complexity.User.CoReviewers(childComplexity), true + case "User.id": if e.complexity.User.ID == nil { break @@ -383,6 +466,13 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return e.complexity.User.Reviews(childComplexity), true + case "User.sameUserReviewers": + if e.complexity.User.SameUserReviewers == nil { + break + } + + return e.complexity.User.SameUserReviewers(childComplexity), true + case "User.username": if e.complexity.User.Username == nil { break @@ -525,6 +615,13 @@ var sources = []*ast.Source{ {Name: "../schema.graphqls", Input: `type Query { me: User cat: Cat + # reviewWithError returns a review whose author (error-user) triggers an error in accounts subgraph. + # Used for testing cache error handling - caches should NOT be populated on errors. + reviewWithError: Review + # topReviews returns all reviews. Review is NOT an entity (no @key), + # but contains entities (author: User, product: Product). + # Used for testing L1 cache with non-entity root fields containing nested entities. + topReviews: [Review] } type Cat { @@ -539,6 +636,10 @@ interface Comment { type Review { body: String! author: User! @provides(fields: "username") + # authorWithoutProvides is the same as author but without @provides + # This forces gateway to fetch User entity from accounts for username + # Used for testing L1/L2 caching scenarios where we want entity resolution + authorWithoutProvides: User! product: Product! attachments: [Attachment] comment: Comment @@ -583,6 +684,16 @@ type User @key(fields: "id") { username: String! @external reviews: [Review] realName: String! + # Returns other users who reviewed the same products as this user. + # This field returns User references that need entity resolution from accounts. + # @requires forces the gateway to first resolve username from accounts + # before calling this resolver, creating sequential execution. + coReviewers: [User!]! @requires(fields: "username") + # Returns a list containing only the same user - used for L1 cache testing. + # The @requires ensures sequential execution: username must be resolved first. + # When queried after the user is already fetched, the entire batch should be L1 hits, + # allowing the HTTP call to be completely skipped. + sameUserReviewers: [User!]! @requires(fields: "username") } type Product @key(fields: "upc") { @@ -590,6 +701,18 @@ type Product @key(fields: "upc") { reviews: [Review] } +# CacheEntity extension: adds a ` + "`" + `nested` + "`" + ` field that always returns the same entity. +# @requires(fields: "a") forces sequential execution — the gateway must resolve +# field "a" from accounts before calling this resolver. +# The resolver always returns {id: obj.ID}, creating a new entity fetch to accounts +# for whatever fields the query selects at the next nesting level. +# This enables arbitrary-depth sequential entity fetch chains for L1 cache testing. +type CacheEntity @key(fields: "id") { + id: ID! @external + a: String! @external + nested: CacheEntity! @requires(fields: "a") +} + type Mutation { addReview(authorID: String! upc: String!, review: String!): Review! } @@ -605,10 +728,11 @@ type Mutation { `, BuiltIn: true}, {Name: "../../federation/entity.graphql", Input: ` # a union of all types that use the @key directive -union _Entity = Product | User +union _Entity = CacheEntity | Product | User # fake type to build resolver interfaces for users to implement type Entity { + findCacheEntityByID(id: ID!,): CacheEntity! findProductByUpc(upc: String!,): Product! findUserByID(id: ID!,): User! } @@ -629,6 +753,34 @@ var parsedSchema = gqlparser.MustLoadSchema(sources...) // region ***************************** args.gotpl ***************************** +func (ec *executionContext) field_Entity_findCacheEntityByID_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { + var err error + args := map[string]any{} + arg0, err := ec.field_Entity_findCacheEntityByID_argsID(ctx, rawArgs) + if err != nil { + return nil, err + } + args["id"] = arg0 + return args, nil +} +func (ec *executionContext) field_Entity_findCacheEntityByID_argsID( + ctx context.Context, + rawArgs map[string]any, +) (string, error) { + if _, ok := rawArgs["id"]; !ok { + var zeroVal string + return zeroVal, nil + } + + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("id")) + if tmp, ok := rawArgs["id"]; ok { + return ec.unmarshalNID2string(ctx, tmp) + } + + var zeroVal string + return zeroVal, nil +} + func (ec *executionContext) field_Entity_findProductByUpc_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} @@ -935,6 +1087,146 @@ func (ec *executionContext) field___Type_fields_argsIncludeDeprecated( // region **************************** field.gotpl ***************************** +func (ec *executionContext) _CacheEntity_id(ctx context.Context, field graphql.CollectedField, obj *model.CacheEntity) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_CacheEntity_id(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.ID, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNID2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_CacheEntity_id(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "CacheEntity", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type ID does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _CacheEntity_a(ctx context.Context, field graphql.CollectedField, obj *model.CacheEntity) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_CacheEntity_a(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return obj.A, nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(string) + fc.Result = res + return ec.marshalNString2string(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_CacheEntity_a(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "CacheEntity", + Field: field, + IsMethod: false, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type String does not have child fields") + }, + } + return fc, nil +} + +func (ec *executionContext) _CacheEntity_nested(ctx context.Context, field graphql.CollectedField, obj *model.CacheEntity) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_CacheEntity_nested(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.CacheEntity().Nested(rctx, obj) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*model.CacheEntity) + fc.Result = res + return ec.marshalNCacheEntity2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋreviewsᚋgraphᚋmodelᚐCacheEntity(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_CacheEntity_nested(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "CacheEntity", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "id": + return ec.fieldContext_CacheEntity_id(ctx, field) + case "a": + return ec.fieldContext_CacheEntity_a(ctx, field) + case "nested": + return ec.fieldContext_CacheEntity_nested(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type CacheEntity", field.Name) + }, + } + return fc, nil +} + func (ec *executionContext) _Cat_name(ctx context.Context, field graphql.CollectedField, obj *model.Cat) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Cat_name(ctx, field) if err != nil { @@ -1067,6 +1359,69 @@ func (ec *executionContext) fieldContext_DetatchedQuestion_body(_ context.Contex return fc, nil } +func (ec *executionContext) _Entity_findCacheEntityByID(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Entity_findCacheEntityByID(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Entity().FindCacheEntityByID(rctx, fc.Args["id"].(string)) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*model.CacheEntity) + fc.Result = res + return ec.marshalNCacheEntity2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋreviewsᚋgraphᚋmodelᚐCacheEntity(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Entity_findCacheEntityByID(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Entity", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "id": + return ec.fieldContext_CacheEntity_id(ctx, field) + case "a": + return ec.fieldContext_CacheEntity_a(ctx, field) + case "nested": + return ec.fieldContext_CacheEntity_nested(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type CacheEntity", field.Name) + }, + } + defer func() { + if r := recover(); r != nil { + err = ec.Recover(ctx, r) + ec.Error(ctx, err) + } + }() + ctx = graphql.WithFieldContext(ctx, fc) + if fc.Args, err = ec.field_Entity_findCacheEntityByID_args(ctx, field.ArgumentMap(ec.Variables)); err != nil { + ec.Error(ctx, err) + return fc, err + } + return fc, nil +} + func (ec *executionContext) _Entity_findProductByUpc(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Entity_findProductByUpc(ctx, field) if err != nil { @@ -1175,6 +1530,10 @@ func (ec *executionContext) fieldContext_Entity_findUserByID(ctx context.Context return ec.fieldContext_User_reviews(ctx, field) case "realName": return ec.fieldContext_User_realName(ctx, field) + case "coReviewers": + return ec.fieldContext_User_coReviewers(ctx, field) + case "sameUserReviewers": + return ec.fieldContext_User_sameUserReviewers(ctx, field) } return nil, fmt.Errorf("no field named %q was found under type User", field.Name) }, @@ -1236,6 +1595,8 @@ func (ec *executionContext) fieldContext_Mutation_addReview(ctx context.Context, return ec.fieldContext_Review_body(ctx, field) case "author": return ec.fieldContext_Review_author(ctx, field) + case "authorWithoutProvides": + return ec.fieldContext_Review_authorWithoutProvides(ctx, field) case "product": return ec.fieldContext_Review_product(ctx, field) case "attachments": @@ -1432,6 +1793,8 @@ func (ec *executionContext) fieldContext_Product_reviews(_ context.Context, fiel return ec.fieldContext_Review_body(ctx, field) case "author": return ec.fieldContext_Review_author(ctx, field) + case "authorWithoutProvides": + return ec.fieldContext_Review_authorWithoutProvides(ctx, field) case "product": return ec.fieldContext_Review_product(ctx, field) case "attachments": @@ -1489,6 +1852,10 @@ func (ec *executionContext) fieldContext_Query_me(_ context.Context, field graph return ec.fieldContext_User_reviews(ctx, field) case "realName": return ec.fieldContext_User_realName(ctx, field) + case "coReviewers": + return ec.fieldContext_User_coReviewers(ctx, field) + case "sameUserReviewers": + return ec.fieldContext_User_sameUserReviewers(ctx, field) } return nil, fmt.Errorf("no field named %q was found under type User", field.Name) }, @@ -1541,8 +1908,8 @@ func (ec *executionContext) fieldContext_Query_cat(_ context.Context, field grap return fc, nil } -func (ec *executionContext) _Query__entities(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { - fc, err := ec.fieldContext_Query__entities(ctx, field) +func (ec *executionContext) _Query_reviewWithError(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_reviewWithError(ctx, field) if err != nil { return graphql.Null } @@ -1555,34 +1922,144 @@ func (ec *executionContext) _Query__entities(ctx context.Context, field graphql. }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return ec.__resolve_entities(ctx, fc.Args["representations"].([]map[string]any)), nil + return ec.resolvers.Query().ReviewWithError(rctx) }) if err != nil { ec.Error(ctx, err) return graphql.Null } if resTmp == nil { - if !graphql.HasFieldError(ctx, fc) { - ec.Errorf(ctx, "must not be null") - } return graphql.Null } - res := resTmp.([]fedruntime.Entity) + res := resTmp.(*model.Review) fc.Result = res - return ec.marshalN_Entity2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋpluginᚋfederationᚋfedruntimeᚐEntity(ctx, field.Selections, res) + return ec.marshalOReview2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋreviewsᚋgraphᚋmodelᚐReview(ctx, field.Selections, res) } -func (ec *executionContext) fieldContext_Query__entities(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { +func (ec *executionContext) fieldContext_Query_reviewWithError(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { fc = &graphql.FieldContext{ Object: "Query", Field: field, IsMethod: true, - IsResolver: false, + IsResolver: true, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { - return nil, errors.New("field of type _Entity does not have child fields") - }, - } - defer func() { + switch field.Name { + case "body": + return ec.fieldContext_Review_body(ctx, field) + case "author": + return ec.fieldContext_Review_author(ctx, field) + case "authorWithoutProvides": + return ec.fieldContext_Review_authorWithoutProvides(ctx, field) + case "product": + return ec.fieldContext_Review_product(ctx, field) + case "attachments": + return ec.fieldContext_Review_attachments(ctx, field) + case "comment": + return ec.fieldContext_Review_comment(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type Review", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _Query_topReviews(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query_topReviews(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Query().TopReviews(rctx) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + return graphql.Null + } + res := resTmp.([]*model.Review) + fc.Result = res + return ec.marshalOReview2ᚕᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋreviewsᚋgraphᚋmodelᚐReview(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query_topReviews(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "body": + return ec.fieldContext_Review_body(ctx, field) + case "author": + return ec.fieldContext_Review_author(ctx, field) + case "authorWithoutProvides": + return ec.fieldContext_Review_authorWithoutProvides(ctx, field) + case "product": + return ec.fieldContext_Review_product(ctx, field) + case "attachments": + return ec.fieldContext_Review_attachments(ctx, field) + case "comment": + return ec.fieldContext_Review_comment(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type Review", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _Query__entities(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Query__entities(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.__resolve_entities(ctx, fc.Args["representations"].([]map[string]any)), nil + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]fedruntime.Entity) + fc.Result = res + return ec.marshalN_Entity2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋpluginᚋfederationᚋfedruntimeᚐEntity(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Query__entities(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Query", + Field: field, + IsMethod: true, + IsResolver: false, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + return nil, errors.New("field of type _Entity does not have child fields") + }, + } + defer func() { if r := recover(); r != nil { err = ec.Recover(ctx, r) ec.Error(ctx, err) @@ -2130,6 +2607,68 @@ func (ec *executionContext) fieldContext_Review_author(_ context.Context, field return ec.fieldContext_User_reviews(ctx, field) case "realName": return ec.fieldContext_User_realName(ctx, field) + case "coReviewers": + return ec.fieldContext_User_coReviewers(ctx, field) + case "sameUserReviewers": + return ec.fieldContext_User_sameUserReviewers(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type User", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _Review_authorWithoutProvides(ctx context.Context, field graphql.CollectedField, obj *model.Review) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_Review_authorWithoutProvides(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.Review().AuthorWithoutProvides(rctx, obj) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.(*model.User) + fc.Result = res + return ec.marshalNUser2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋreviewsᚋgraphᚋmodelᚐUser(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_Review_authorWithoutProvides(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "Review", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "id": + return ec.fieldContext_User_id(ctx, field) + case "username": + return ec.fieldContext_User_username(ctx, field) + case "reviews": + return ec.fieldContext_User_reviews(ctx, field) + case "realName": + return ec.fieldContext_User_realName(ctx, field) + case "coReviewers": + return ec.fieldContext_User_coReviewers(ctx, field) + case "sameUserReviewers": + return ec.fieldContext_User_sameUserReviewers(ctx, field) } return nil, fmt.Errorf("no field named %q was found under type User", field.Name) }, @@ -2327,7 +2866,7 @@ func (ec *executionContext) _User_username(ctx context.Context, field graphql.Co }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.User().Username(rctx, obj) + return obj.Username, nil }) if err != nil { ec.Error(ctx, err) @@ -2348,8 +2887,8 @@ func (ec *executionContext) fieldContext_User_username(_ context.Context, field fc = &graphql.FieldContext{ Object: "User", Field: field, - IsMethod: true, - IsResolver: true, + IsMethod: false, + IsResolver: false, Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { return nil, errors.New("field of type String does not have child fields") }, @@ -2397,6 +2936,8 @@ func (ec *executionContext) fieldContext_User_reviews(_ context.Context, field g return ec.fieldContext_Review_body(ctx, field) case "author": return ec.fieldContext_Review_author(ctx, field) + case "authorWithoutProvides": + return ec.fieldContext_Review_authorWithoutProvides(ctx, field) case "product": return ec.fieldContext_Review_product(ctx, field) case "attachments": @@ -2454,6 +2995,122 @@ func (ec *executionContext) fieldContext_User_realName(_ context.Context, field return fc, nil } +func (ec *executionContext) _User_coReviewers(ctx context.Context, field graphql.CollectedField, obj *model.User) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_User_coReviewers(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.User().CoReviewers(rctx, obj) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]*model.User) + fc.Result = res + return ec.marshalNUser2ᚕᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋreviewsᚋgraphᚋmodelᚐUserᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_User_coReviewers(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "User", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "id": + return ec.fieldContext_User_id(ctx, field) + case "username": + return ec.fieldContext_User_username(ctx, field) + case "reviews": + return ec.fieldContext_User_reviews(ctx, field) + case "realName": + return ec.fieldContext_User_realName(ctx, field) + case "coReviewers": + return ec.fieldContext_User_coReviewers(ctx, field) + case "sameUserReviewers": + return ec.fieldContext_User_sameUserReviewers(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type User", field.Name) + }, + } + return fc, nil +} + +func (ec *executionContext) _User_sameUserReviewers(ctx context.Context, field graphql.CollectedField, obj *model.User) (ret graphql.Marshaler) { + fc, err := ec.fieldContext_User_sameUserReviewers(ctx, field) + if err != nil { + return graphql.Null + } + ctx = graphql.WithFieldContext(ctx, fc) + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = graphql.Null + } + }() + resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { + ctx = rctx // use context from middleware stack in children + return ec.resolvers.User().SameUserReviewers(rctx, obj) + }) + if err != nil { + ec.Error(ctx, err) + return graphql.Null + } + if resTmp == nil { + if !graphql.HasFieldError(ctx, fc) { + ec.Errorf(ctx, "must not be null") + } + return graphql.Null + } + res := resTmp.([]*model.User) + fc.Result = res + return ec.marshalNUser2ᚕᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋreviewsᚋgraphᚋmodelᚐUserᚄ(ctx, field.Selections, res) +} + +func (ec *executionContext) fieldContext_User_sameUserReviewers(_ context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) { + fc = &graphql.FieldContext{ + Object: "User", + Field: field, + IsMethod: true, + IsResolver: true, + Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) { + switch field.Name { + case "id": + return ec.fieldContext_User_id(ctx, field) + case "username": + return ec.fieldContext_User_username(ctx, field) + case "reviews": + return ec.fieldContext_User_reviews(ctx, field) + case "realName": + return ec.fieldContext_User_realName(ctx, field) + case "coReviewers": + return ec.fieldContext_User_coReviewers(ctx, field) + case "sameUserReviewers": + return ec.fieldContext_User_sameUserReviewers(ctx, field) + } + return nil, fmt.Errorf("no field named %q was found under type User", field.Name) + }, + } + return fc, nil +} + func (ec *executionContext) _Video_upc(ctx context.Context, field graphql.CollectedField, obj *model.Video) (ret graphql.Marshaler) { fc, err := ec.fieldContext_Video_upc(ctx, field) if err != nil { @@ -4649,55 +5306,142 @@ func (ec *executionContext) _Comment(ctx context.Context, sel ast.SelectionSet, } } -func (ec *executionContext) _Iface(ctx context.Context, sel ast.SelectionSet, obj model.Iface) graphql.Marshaler { - switch obj := (obj).(type) { - case nil: - return graphql.Null - case model.Question: - return ec._Question(ctx, sel, &obj) - case *model.Question: - if obj == nil { - return graphql.Null - } - return ec._Question(ctx, sel, obj) - case model.Video: - return ec._Video(ctx, sel, &obj) - case *model.Video: - if obj == nil { - return graphql.Null +func (ec *executionContext) _Iface(ctx context.Context, sel ast.SelectionSet, obj model.Iface) graphql.Marshaler { + switch obj := (obj).(type) { + case nil: + return graphql.Null + case model.Question: + return ec._Question(ctx, sel, &obj) + case *model.Question: + if obj == nil { + return graphql.Null + } + return ec._Question(ctx, sel, obj) + case model.Video: + return ec._Video(ctx, sel, &obj) + case *model.Video: + if obj == nil { + return graphql.Null + } + return ec._Video(ctx, sel, obj) + default: + panic(fmt.Errorf("unexpected type %T", obj)) + } +} + +func (ec *executionContext) __Entity(ctx context.Context, sel ast.SelectionSet, obj fedruntime.Entity) graphql.Marshaler { + switch obj := (obj).(type) { + case nil: + return graphql.Null + case model.User: + return ec._User(ctx, sel, &obj) + case *model.User: + if obj == nil { + return graphql.Null + } + return ec._User(ctx, sel, obj) + case model.Product: + return ec._Product(ctx, sel, &obj) + case *model.Product: + if obj == nil { + return graphql.Null + } + return ec._Product(ctx, sel, obj) + case model.CacheEntity: + return ec._CacheEntity(ctx, sel, &obj) + case *model.CacheEntity: + if obj == nil { + return graphql.Null + } + return ec._CacheEntity(ctx, sel, obj) + default: + panic(fmt.Errorf("unexpected type %T", obj)) + } +} + +// endregion ************************** interface.gotpl *************************** + +// region **************************** object.gotpl **************************** + +var cacheEntityImplementors = []string{"CacheEntity", "_Entity"} + +func (ec *executionContext) _CacheEntity(ctx context.Context, sel ast.SelectionSet, obj *model.CacheEntity) graphql.Marshaler { + fields := graphql.CollectFields(ec.OperationContext, sel, cacheEntityImplementors) + + out := graphql.NewFieldSet(fields) + deferred := make(map[string]*graphql.FieldSet) + for i, field := range fields { + switch field.Name { + case "__typename": + out.Values[i] = graphql.MarshalString("CacheEntity") + case "id": + out.Values[i] = ec._CacheEntity_id(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&out.Invalids, 1) + } + case "a": + out.Values[i] = ec._CacheEntity_a(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&out.Invalids, 1) + } + case "nested": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._CacheEntity_nested(ctx, field, obj) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + default: + panic("unknown field " + strconv.Quote(field.Name)) } - return ec._Video(ctx, sel, obj) - default: - panic(fmt.Errorf("unexpected type %T", obj)) } -} - -func (ec *executionContext) __Entity(ctx context.Context, sel ast.SelectionSet, obj fedruntime.Entity) graphql.Marshaler { - switch obj := (obj).(type) { - case nil: + out.Dispatch(ctx) + if out.Invalids > 0 { return graphql.Null - case model.User: - return ec._User(ctx, sel, &obj) - case *model.User: - if obj == nil { - return graphql.Null - } - return ec._User(ctx, sel, obj) - case model.Product: - return ec._Product(ctx, sel, &obj) - case *model.Product: - if obj == nil { - return graphql.Null - } - return ec._Product(ctx, sel, obj) - default: - panic(fmt.Errorf("unexpected type %T", obj)) } -} -// endregion ************************** interface.gotpl *************************** + atomic.AddInt32(&ec.deferred, int32(len(deferred))) -// region **************************** object.gotpl **************************** + for label, dfs := range deferred { + ec.processDeferredGroup(graphql.DeferredGroup{ + Label: label, + Path: graphql.GetPath(ctx), + FieldSet: dfs, + Context: ctx, + }) + } + + return out +} var catImplementors = []string{"Cat"} @@ -4801,6 +5545,28 @@ func (ec *executionContext) _Entity(ctx context.Context, sel ast.SelectionSet) g switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("Entity") + case "findCacheEntityByID": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Entity_findCacheEntityByID(ctx, field) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "findProductByUpc": field := field @@ -5089,6 +5855,44 @@ func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) gr func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "reviewWithError": + field := field + + innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_reviewWithError(ctx, field) + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) + case "topReviews": + field := field + + innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Query_topReviews(ctx, field) + return res + } + + rrm := func(ctx context.Context) graphql.Marshaler { + return ec.OperationContext.RootResolverMiddleware(ctx, + func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + } + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) }) case "_entities": field := field @@ -5284,6 +6088,42 @@ func (ec *executionContext) _Review(ctx context.Context, sel ast.SelectionSet, o if out.Values[i] == graphql.Null { atomic.AddUint32(&out.Invalids, 1) } + case "authorWithoutProvides": + field := field + + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._Review_authorWithoutProvides(ctx, field, obj) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } + return res + } + + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) case "product": out.Values[i] = ec._Review_product(ctx, field, obj) if out.Values[i] == graphql.Null { @@ -5395,6 +6235,44 @@ func (ec *executionContext) _User(ctx context.Context, sel ast.SelectionSet, obj atomic.AddUint32(&out.Invalids, 1) } case "username": + out.Values[i] = ec._User_username(ctx, field, obj) + if out.Values[i] == graphql.Null { + atomic.AddUint32(&out.Invalids, 1) + } + case "reviews": + field := field + + innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + } + }() + res = ec._User_reviews(ctx, field, obj) + return res + } + + if field.Deferrable != nil { + dfs, ok := deferred[field.Deferrable.Label] + di := 0 + if ok { + dfs.AddField(field) + di = len(dfs.Values) - 1 + } else { + dfs = graphql.NewFieldSet([]graphql.CollectedField{field}) + deferred[field.Deferrable.Label] = dfs + } + dfs.Concurrently(di, func(ctx context.Context) graphql.Marshaler { + return innerFunc(ctx, dfs) + }) + + // don't run the out.Concurrently() call below + out.Values[i] = graphql.Null + continue + } + + out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) + case "realName": field := field innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { @@ -5403,7 +6281,7 @@ func (ec *executionContext) _User(ctx context.Context, sel ast.SelectionSet, obj ec.Error(ctx, ec.Recover(ctx, r)) } }() - res = ec._User_username(ctx, field, obj) + res = ec._User_realName(ctx, field, obj) if res == graphql.Null { atomic.AddUint32(&fs.Invalids, 1) } @@ -5430,16 +6308,19 @@ func (ec *executionContext) _User(ctx context.Context, sel ast.SelectionSet, obj } out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) - case "reviews": + case "coReviewers": field := field - innerFunc := func(ctx context.Context, _ *graphql.FieldSet) (res graphql.Marshaler) { + innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { defer func() { if r := recover(); r != nil { ec.Error(ctx, ec.Recover(ctx, r)) } }() - res = ec._User_reviews(ctx, field, obj) + res = ec._User_coReviewers(ctx, field, obj) + if res == graphql.Null { + atomic.AddUint32(&fs.Invalids, 1) + } return res } @@ -5463,7 +6344,7 @@ func (ec *executionContext) _User(ctx context.Context, sel ast.SelectionSet, obj } out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) }) - case "realName": + case "sameUserReviewers": field := field innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) { @@ -5472,7 +6353,7 @@ func (ec *executionContext) _User(ctx context.Context, sel ast.SelectionSet, obj ec.Error(ctx, ec.Recover(ctx, r)) } }() - res = ec._User_realName(ctx, field, obj) + res = ec._User_sameUserReviewers(ctx, field, obj) if res == graphql.Null { atomic.AddUint32(&fs.Invalids, 1) } @@ -5958,6 +6839,20 @@ func (ec *executionContext) marshalNBoolean2bool(ctx context.Context, sel ast.Se return res } +func (ec *executionContext) marshalNCacheEntity2githubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋreviewsᚋgraphᚋmodelᚐCacheEntity(ctx context.Context, sel ast.SelectionSet, v model.CacheEntity) graphql.Marshaler { + return ec._CacheEntity(ctx, sel, &v) +} + +func (ec *executionContext) marshalNCacheEntity2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋreviewsᚋgraphᚋmodelᚐCacheEntity(ctx context.Context, sel ast.SelectionSet, v *model.CacheEntity) graphql.Marshaler { + if v == nil { + if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { + ec.Errorf(ctx, "the requested element is null which the schema does not allow") + } + return graphql.Null + } + return ec._CacheEntity(ctx, sel, v) +} + func (ec *executionContext) unmarshalNFloat2float64(ctx context.Context, v any) (float64, error) { res, err := graphql.UnmarshalFloatContext(ctx, v) return res, graphql.ErrorOnPath(ctx, err) @@ -6054,6 +6949,50 @@ func (ec *executionContext) marshalNUser2githubᚗcomᚋwundergraphᚋgraphqlᚑ return ec._User(ctx, sel, &v) } +func (ec *executionContext) marshalNUser2ᚕᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋreviewsᚋgraphᚋmodelᚐUserᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.User) graphql.Marshaler { + ret := make(graphql.Array, len(v)) + var wg sync.WaitGroup + isLen1 := len(v) == 1 + if !isLen1 { + wg.Add(len(v)) + } + for i := range v { + i := i + fc := &graphql.FieldContext{ + Index: &i, + Result: &v[i], + } + ctx := graphql.WithFieldContext(ctx, fc) + f := func(i int) { + defer func() { + if r := recover(); r != nil { + ec.Error(ctx, ec.Recover(ctx, r)) + ret = nil + } + }() + if !isLen1 { + defer wg.Done() + } + ret[i] = ec.marshalNUser2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋreviewsᚋgraphᚋmodelᚐUser(ctx, sel, v[i]) + } + if isLen1 { + f(i) + } else { + go f(i) + } + + } + wg.Wait() + + for _, e := range ret { + if e == graphql.Null { + return graphql.Null + } + } + + return ret +} + func (ec *executionContext) marshalNUser2ᚖgithubᚗcomᚋwundergraphᚋgraphqlᚑgoᚑtoolsᚋexecutionᚋfederationtestingᚋreviewsᚋgraphᚋmodelᚐUser(ctx context.Context, sel ast.SelectionSet, v *model.User) graphql.Marshaler { if v == nil { if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) { diff --git a/execution/federationtesting/reviews/graph/model/models.go b/execution/federationtesting/reviews/graph/model/models.go index 5139403523..f2aa200c33 100644 --- a/execution/federationtesting/reviews/graph/model/models.go +++ b/execution/federationtesting/reviews/graph/model/models.go @@ -13,7 +13,8 @@ type Review struct { } type User struct { - ID string `json:"id"` + ID string `json:"id"` + Username string `json:"username"` } func (User) IsEntity() {} diff --git a/execution/federationtesting/reviews/graph/model/models_gen.go b/execution/federationtesting/reviews/graph/model/models_gen.go index 54a18c3fb7..043484d042 100644 --- a/execution/federationtesting/reviews/graph/model/models_gen.go +++ b/execution/federationtesting/reviews/graph/model/models_gen.go @@ -17,6 +17,14 @@ type Iface interface { GetSubject() string } +type CacheEntity struct { + ID string `json:"id"` + A string `json:"a"` + Nested *CacheEntity `json:"nested"` +} + +func (CacheEntity) IsEntity() {} + type Cat struct { Name string `json:"name"` } diff --git a/execution/federationtesting/reviews/graph/reviews.go b/execution/federationtesting/reviews/graph/reviews.go index f6645dafad..16dec17600 100644 --- a/execution/federationtesting/reviews/graph/reviews.go +++ b/execution/federationtesting/reviews/graph/reviews.go @@ -9,17 +9,36 @@ func newReviews() []*model.Review { { Body: "A highly effective form of birth control.", Product: &model.Product{Upc: "top-1"}, - Author: &model.User{ID: "1234"}, + Author: &model.User{ID: "1234", Username: "Me"}, }, { Body: "Fedoras are one of the most fashionable hats around and can look great with a variety of outfits.", Product: &model.Product{Upc: "top-2"}, - Author: &model.User{ID: "1234"}, + Author: &model.User{ID: "1234", Username: "Me"}, }, { Body: "This is the last straw. Hat you will wear. 11/10", Product: &model.Product{Upc: "top-3"}, - Author: &model.User{ID: "7777"}, + Author: &model.User{ID: "7777", Username: "User 7777"}, + }, + { + Body: "Perfect summer hat.", + Product: &model.Product{Upc: "top-4"}, + Author: &model.User{ID: "5678", Username: "User 5678"}, + }, + { + Body: "A bit too fancy for my taste.", + Product: &model.Product{Upc: "top-4"}, + Author: &model.User{ID: "8888", Username: "User 8888"}, }, } } + +// errorReview is a separate review used for cache error testing. +// It has an author ID "error-user" which triggers an error in the accounts subgraph. +// This is accessed via the reviewWithError query, not through normal reviews. +var errorReview = &model.Review{ + Body: "This review triggers an error when resolving the author", + Product: &model.Product{Upc: "error-product"}, + Author: &model.User{ID: "error-user", Username: ""}, +} diff --git a/execution/federationtesting/reviews/graph/schema.graphqls b/execution/federationtesting/reviews/graph/schema.graphqls index 9df5262ed0..7ff2d1ae88 100644 --- a/execution/federationtesting/reviews/graph/schema.graphqls +++ b/execution/federationtesting/reviews/graph/schema.graphqls @@ -1,6 +1,13 @@ type Query { me: User cat: Cat + # reviewWithError returns a review whose author (error-user) triggers an error in accounts subgraph. + # Used for testing cache error handling - caches should NOT be populated on errors. + reviewWithError: Review + # topReviews returns all reviews. Review is NOT an entity (no @key), + # but contains entities (author: User, product: Product). + # Used for testing L1 cache with non-entity root fields containing nested entities. + topReviews: [Review] } type Cat { @@ -15,6 +22,10 @@ interface Comment { type Review { body: String! author: User! @provides(fields: "username") + # authorWithoutProvides is the same as author but without @provides + # This forces gateway to fetch User entity from accounts for username + # Used for testing L1/L2 caching scenarios where we want entity resolution + authorWithoutProvides: User! product: Product! attachments: [Attachment] comment: Comment @@ -59,6 +70,16 @@ type User @key(fields: "id") { username: String! @external reviews: [Review] realName: String! + # Returns other users who reviewed the same products as this user. + # This field returns User references that need entity resolution from accounts. + # @requires forces the gateway to first resolve username from accounts + # before calling this resolver, creating sequential execution. + coReviewers: [User!]! @requires(fields: "username") + # Returns a list containing only the same user - used for L1 cache testing. + # The @requires ensures sequential execution: username must be resolved first. + # When queried after the user is already fetched, the entire batch should be L1 hits, + # allowing the HTTP call to be completely skipped. + sameUserReviewers: [User!]! @requires(fields: "username") } type Product @key(fields: "upc") { @@ -66,6 +87,18 @@ type Product @key(fields: "upc") { reviews: [Review] } +# CacheEntity extension: adds a `nested` field that always returns the same entity. +# @requires(fields: "a") forces sequential execution — the gateway must resolve +# field "a" from accounts before calling this resolver. +# The resolver always returns {id: obj.ID}, creating a new entity fetch to accounts +# for whatever fields the query selects at the next nesting level. +# This enables arbitrary-depth sequential entity fetch chains for L1 cache testing. +type CacheEntity @key(fields: "id") { + id: ID! @external + a: String! @external + nested: CacheEntity! @requires(fields: "a") +} + type Mutation { addReview(authorID: String! upc: String!, review: String!): Review! } diff --git a/execution/federationtesting/reviews/graph/schema.resolvers.go b/execution/federationtesting/reviews/graph/schema.resolvers.go index 862d855f20..bd83fc83a5 100644 --- a/execution/federationtesting/reviews/graph/schema.resolvers.go +++ b/execution/federationtesting/reviews/graph/schema.resolvers.go @@ -12,11 +12,27 @@ import ( "github.com/wundergraph/graphql-go-tools/execution/federationtesting/reviews/graph/model" ) +// Nested is the resolver for the nested field. +// Always returns the same entity (same ID as parent), creating a self-referential +// chain for L1 cache testing. Each nesting level triggers a new entity fetch +// to the accounts subgraph for whatever fields the query selects. +func (r *cacheEntityResolver) Nested(ctx context.Context, obj *model.CacheEntity) (*model.CacheEntity, error) { + return &model.CacheEntity{ID: obj.ID}, nil +} + // AddReview is the resolver for the addReview field. func (r *mutationResolver) AddReview(ctx context.Context, authorID string, upc string, review string) (*model.Review, error) { + // Generate username matching accounts service pattern. + // Required by @provides(fields: "username") on Review.author — reviews promises to supply + // this field, so the gateway uses this value directly instead of re-fetching from accounts. + username := fmt.Sprintf("User %s", authorID) + if authorID == "1234" { + username = "Me" + } + record := &model.Review{ Body: review, - Author: &model.User{ID: authorID}, + Author: &model.User{ID: authorID, Username: username}, Product: &model.Product{Upc: upc}, } @@ -52,6 +68,29 @@ func (r *queryResolver) Cat(ctx context.Context) (*model.Cat, error) { }, nil } +// ReviewWithError is the resolver for the reviewWithError field. +// Returns a review whose author (error-user) triggers an error in the accounts subgraph. +// Used for testing cache error handling - caches should NOT be populated on errors. +func (r *queryResolver) ReviewWithError(ctx context.Context) (*model.Review, error) { + // Return the dedicated error review (separate from normal reviews list) + return errorReview, nil +} + +// TopReviews is the resolver for the topReviews field. +// Returns all reviews. Review is NOT an entity (no @key), but contains +// entities (author: User, product: Product). Used for L1 cache testing +// with non-entity root fields containing nested entities. +func (r *queryResolver) TopReviews(ctx context.Context) ([]*model.Review, error) { + return r.reviews, nil +} + +// AuthorWithoutProvides is the resolver for the authorWithoutProvides field. +// Returns the same Author as the regular author field, but without @provides directive +// in the schema. This forces the gateway to fetch username from accounts subgraph. +func (r *reviewResolver) AuthorWithoutProvides(ctx context.Context, obj *model.Review) (*model.User, error) { + return obj.Author, nil +} + // Attachments is the resolver for the attachments field. func (r *reviewResolver) Attachments(ctx context.Context, obj *model.Review) ([]model.Attachment, error) { var res []model.Attachment @@ -85,15 +124,6 @@ func (r *reviewResolver) Comment(ctx context.Context, obj *model.Review) (model. }, nil } -// Username is the resolver for the username field. -func (r *userResolver) Username(ctx context.Context, obj *model.User) (string, error) { - username := fmt.Sprintf("User %s", obj.ID) - if obj.ID == "1234" { - username = "Me" - } - return username, nil -} - // Reviews is the resolver for the reviews field. func (r *userResolver) Reviews(ctx context.Context, obj *model.User) ([]*model.Review, error) { var res []*model.Review @@ -116,6 +146,53 @@ func (r *userResolver) RealName(ctx context.Context, obj *model.User) (string, e return realName, nil } +// CoReviewers is the resolver for the coReviewers field. +// Returns users who reviewed the same products as this user. +// These are returned as User references (ID only) that need entity resolution from accounts. +// This creates a dependency chain for L1 cache testing: +// 1. First, this User is resolved via entity fetch from accounts +// 2. Then, coReviewers returns User IDs +// 3. Those Users need entity resolution from accounts -> L1 HIT if same user! +func (r *userResolver) CoReviewers(ctx context.Context, obj *model.User) ([]*model.User, error) { + // Return co-reviewers based on the user ID. + // User 1234 reviewed top-1 and top-2, User 7777 reviewed top-3. + // For L1 cache testing, we return users that include the original user (self-reference). + switch obj.ID { + case "1234": + // User 1234's co-reviewers include themselves and User 7777 + return []*model.User{ + {ID: "1234"}, // Self-reference for L1 hit + {ID: "7777"}, + }, nil + case "7777": + // User 7777's co-reviewers include themselves and User 1234 + return []*model.User{ + {ID: "7777"}, // Self-reference for L1 hit + {ID: "1234"}, + }, nil + default: + // Other users have no co-reviewers + return []*model.User{}, nil + } +} + +// SameUserReviewers is the resolver for the sameUserReviewers field. +// Returns a list containing only the same user - used for L1 cache testing. +// The @requires(fields: "username") ensures this runs AFTER the User entity +// is fetched from accounts, populating L1. The returned User references +// should then be complete L1 hits (no HTTP call needed). +func (r *userResolver) SameUserReviewers(ctx context.Context, obj *model.User) ([]*model.User, error) { + // Return a list containing only the same user. + // This ensures the entire batch for entity resolution consists of + // entities already in L1, allowing the HTTP call to be skipped. + return []*model.User{ + {ID: obj.ID}, + }, nil +} + +// CacheEntity returns generated.CacheEntityResolver implementation. +func (r *Resolver) CacheEntity() generated.CacheEntityResolver { return &cacheEntityResolver{r} } + // Mutation returns generated.MutationResolver implementation. func (r *Resolver) Mutation() generated.MutationResolver { return &mutationResolver{r} } @@ -131,6 +208,7 @@ func (r *Resolver) Review() generated.ReviewResolver { return &reviewResolver{r} // User returns generated.UserResolver implementation. func (r *Resolver) User() generated.UserResolver { return &userResolver{r} } +type cacheEntityResolver struct{ *Resolver } type mutationResolver struct{ *Resolver } type productResolver struct{ *Resolver } type queryResolver struct{ *Resolver } diff --git a/execution/federationtesting/skipped_fetch_test.go b/execution/federationtesting/skipped_fetch_test.go index ed3ea8c467..f2dae81eda 100644 --- a/execution/federationtesting/skipped_fetch_test.go +++ b/execution/federationtesting/skipped_fetch_test.go @@ -17,6 +17,7 @@ import ( ) func TestSkippedFetchOnNullParent(t *testing.T) { + t.Parallel() // Users subgraph: returns null for the "user" field. usersServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") diff --git a/execution/federationtesting/testdata/mutations/add_review_without_provides.query b/execution/federationtesting/testdata/mutations/add_review_without_provides.query new file mode 100644 index 0000000000..4e89feed71 --- /dev/null +++ b/execution/federationtesting/testdata/mutations/add_review_without_provides.query @@ -0,0 +1,8 @@ +mutation AddReviewWithoutProvides($authorID: String!, $upc: String!, $review: String!) { + addReview(authorID: $authorID, upc: $upc, review: $review) { + body + authorWithoutProvides { + username + } + } +} diff --git a/execution/federationtesting/testdata/queries/me_reviews_without_provides.query b/execution/federationtesting/testdata/queries/me_reviews_without_provides.query new file mode 100644 index 0000000000..bb17a065c1 --- /dev/null +++ b/execution/federationtesting/testdata/queries/me_reviews_without_provides.query @@ -0,0 +1,10 @@ +query MeReviewsWithoutProvides { + me { + reviews { + body + authorWithoutProvides { + username + } + } + } +} diff --git a/execution/federationtesting/testdata/queries/me_reviews_without_provides_with_nickname.query b/execution/federationtesting/testdata/queries/me_reviews_without_provides_with_nickname.query new file mode 100644 index 0000000000..0a574d3273 --- /dev/null +++ b/execution/federationtesting/testdata/queries/me_reviews_without_provides_with_nickname.query @@ -0,0 +1,11 @@ +query MeReviewsWithoutProvidesWithNickname { + me { + reviews { + body + authorWithoutProvides { + username + nickname + } + } + } +} diff --git a/execution/federationtesting/testdata/queries/multiple_upstream_without_provides.query b/execution/federationtesting/testdata/queries/multiple_upstream_without_provides.query new file mode 100644 index 0000000000..a323953a61 --- /dev/null +++ b/execution/federationtesting/testdata/queries/multiple_upstream_without_provides.query @@ -0,0 +1,11 @@ +query MultipleServersWithoutProvides { + topProducts { + name + reviews { + body + authorWithoutProvides { + username + } + } + } +} diff --git a/execution/federationtesting/testdata/queries/product_by_upc.query b/execution/federationtesting/testdata/queries/product_by_upc.query new file mode 100644 index 0000000000..eb1b4d4d42 --- /dev/null +++ b/execution/federationtesting/testdata/queries/product_by_upc.query @@ -0,0 +1,6 @@ +query ProductByUpc($upc: String!) { + product(upc: $upc) { + upc + name + } +} diff --git a/execution/federationtesting/testdata/queries/user_by_id.query b/execution/federationtesting/testdata/queries/user_by_id.query new file mode 100644 index 0000000000..be9c2280da --- /dev/null +++ b/execution/federationtesting/testdata/queries/user_by_id.query @@ -0,0 +1,6 @@ +query UserById($id: ID!) { + user(id: $id) { + id + username + } +} diff --git a/execution/federationtesting/testdata/queries/user_by_id_and_name.query b/execution/federationtesting/testdata/queries/user_by_id_and_name.query new file mode 100644 index 0000000000..7801dec701 --- /dev/null +++ b/execution/federationtesting/testdata/queries/user_by_id_and_name.query @@ -0,0 +1,6 @@ +query UserByIdAndName($id: ID!, $username: String!) { + userByIdAndName(id: $id, username: $username) { + id + username + } +} diff --git a/execution/federationtesting/testdata/queries/user_by_id_with_reviews.query b/execution/federationtesting/testdata/queries/user_by_id_with_reviews.query new file mode 100644 index 0000000000..dd253ec27a --- /dev/null +++ b/execution/federationtesting/testdata/queries/user_by_id_with_reviews.query @@ -0,0 +1,9 @@ +query UserByIdWithReviews($id: ID!) { + user(id: $id) { + id + username + reviews { + body + } + } +} diff --git a/execution/federationtesting/testdata/subscriptions/subscription_all_prices_with_reviews.query b/execution/federationtesting/testdata/subscriptions/subscription_all_prices_with_reviews.query new file mode 100644 index 0000000000..d223980fd9 --- /dev/null +++ b/execution/federationtesting/testdata/subscriptions/subscription_all_prices_with_reviews.query @@ -0,0 +1,13 @@ +subscription AllPricesWithReviews { + updatedPrices { + upc + name + price + reviews { + body + authorWithoutProvides { + username + } + } + } +} diff --git a/execution/federationtesting/testdata/subscriptions/subscription_digital_product_interface.query b/execution/federationtesting/testdata/subscriptions/subscription_digital_product_interface.query new file mode 100644 index 0000000000..d204b356a6 --- /dev/null +++ b/execution/federationtesting/testdata/subscriptions/subscription_digital_product_interface.query @@ -0,0 +1,9 @@ +subscription UpdateDigitalProductPriceInterface($upc: String!) { + updateDigitalProductPriceInterface(upc: $upc) { + ... on DigitalProduct { + upc + name + price + } + } +} diff --git a/execution/federationtesting/testdata/subscriptions/subscription_digital_product_union.query b/execution/federationtesting/testdata/subscriptions/subscription_digital_product_union.query new file mode 100644 index 0000000000..df5bd0380d --- /dev/null +++ b/execution/federationtesting/testdata/subscriptions/subscription_digital_product_union.query @@ -0,0 +1,9 @@ +subscription UpdateDigitalProductPriceUnion($upc: String!) { + updateDigitalProductPriceUnion(upc: $upc) { + ... on DigitalProduct { + upc + name + price + } + } +} diff --git a/execution/federationtesting/testdata/subscriptions/subscription_product_alias.query b/execution/federationtesting/testdata/subscriptions/subscription_product_alias.query new file mode 100644 index 0000000000..6f794f075d --- /dev/null +++ b/execution/federationtesting/testdata/subscriptions/subscription_product_alias.query @@ -0,0 +1,7 @@ +subscription UpdatePriceAlias($upc: String!) { + priceUpdate: updateProductPrice(upc: $upc) { + upc + name + price + } +} diff --git a/execution/federationtesting/testdata/subscriptions/subscription_product_interface.query b/execution/federationtesting/testdata/subscriptions/subscription_product_interface.query new file mode 100644 index 0000000000..a61811c5c5 --- /dev/null +++ b/execution/federationtesting/testdata/subscriptions/subscription_product_interface.query @@ -0,0 +1,9 @@ +subscription UpdatePriceInterface($upc: String!) { + updateProductPriceInterface(upc: $upc) { + ... on Product { + upc + name + price + } + } +} diff --git a/execution/federationtesting/testdata/subscriptions/subscription_product_key_only.query b/execution/federationtesting/testdata/subscriptions/subscription_product_key_only.query new file mode 100644 index 0000000000..881b8283fa --- /dev/null +++ b/execution/federationtesting/testdata/subscriptions/subscription_product_key_only.query @@ -0,0 +1,11 @@ +subscription UpdatePriceKeyOnly($upc: String!) { + updateProductPrice(upc: $upc) { + upc + reviews { + body + authorWithoutProvides { + username + } + } + } +} diff --git a/execution/federationtesting/testdata/subscriptions/subscription_product_only.query b/execution/federationtesting/testdata/subscriptions/subscription_product_only.query new file mode 100644 index 0000000000..f44cf9e4e5 --- /dev/null +++ b/execution/federationtesting/testdata/subscriptions/subscription_product_only.query @@ -0,0 +1,7 @@ +subscription UpdatePrice($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + } +} diff --git a/execution/federationtesting/testdata/subscriptions/subscription_product_union.query b/execution/federationtesting/testdata/subscriptions/subscription_product_union.query new file mode 100644 index 0000000000..e1077577b0 --- /dev/null +++ b/execution/federationtesting/testdata/subscriptions/subscription_product_union.query @@ -0,0 +1,9 @@ +subscription UpdatePriceUnion($upc: String!) { + updateProductPriceUnion(upc: $upc) { + ... on Product { + upc + name + price + } + } +} diff --git a/execution/federationtesting/testdata/subscriptions/subscription_product_with_author_nickname.query b/execution/federationtesting/testdata/subscriptions/subscription_product_with_author_nickname.query new file mode 100644 index 0000000000..fe9eb43097 --- /dev/null +++ b/execution/federationtesting/testdata/subscriptions/subscription_product_with_author_nickname.query @@ -0,0 +1,14 @@ +subscription UpdatePriceWithNickname($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + reviews { + body + authorWithoutProvides { + username + nickname + } + } + } +} diff --git a/execution/federationtesting/testdata/subscriptions/subscription_product_with_provides.query b/execution/federationtesting/testdata/subscriptions/subscription_product_with_provides.query new file mode 100644 index 0000000000..44f3e93214 --- /dev/null +++ b/execution/federationtesting/testdata/subscriptions/subscription_product_with_provides.query @@ -0,0 +1,13 @@ +subscription UpdatePriceWithProvides($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + reviews { + body + author { + username + } + } + } +} diff --git a/execution/federationtesting/testdata/subscriptions/subscription_product_with_reviews.query b/execution/federationtesting/testdata/subscriptions/subscription_product_with_reviews.query new file mode 100644 index 0000000000..297c185e97 --- /dev/null +++ b/execution/federationtesting/testdata/subscriptions/subscription_product_with_reviews.query @@ -0,0 +1,13 @@ +subscription UpdatePriceWithReviews($upc: String!) { + updateProductPrice(upc: $upc) { + upc + name + price + reviews { + body + authorWithoutProvides { + username + } + } + } +} diff --git a/execution/federationtesting/testdata/subscriptions/subscription_updated_price.query b/execution/federationtesting/testdata/subscriptions/subscription_updated_price.query new file mode 100644 index 0000000000..3df7cb56e6 --- /dev/null +++ b/execution/federationtesting/testdata/subscriptions/subscription_updated_price.query @@ -0,0 +1,7 @@ +subscription UpdatedPrice { + updatedPrice { + upc + name + price + } +} diff --git a/execution/federationtesting/util.go b/execution/federationtesting/util.go index 6fe81b5b75..a467465a6b 100644 --- a/execution/federationtesting/util.go +++ b/execution/federationtesting/util.go @@ -1,10 +1,13 @@ package federationtesting import ( + "context" + "fmt" "net/http/httptest" "os" "path/filepath" "strings" + "time" accounts "github.com/wundergraph/graphql-go-tools/execution/federationtesting/accounts/graph" products "github.com/wundergraph/graphql-go-tools/execution/federationtesting/products/graph" @@ -47,14 +50,26 @@ func LoadTestingSubgraphSDL(upstream Upstream) ([]byte, error) { } func NewFederationSetup(addGateway ...func(s *FederationSetup) *httptest.Server) *FederationSetup { + return newFederationSetup(false, addGateway...) +} + +func NewManualFederationSetup(addGateway ...func(s *FederationSetup) *httptest.Server) *FederationSetup { + return newFederationSetup(true, addGateway...) +} + +func newFederationSetup(enableManualSubscriptionEvents bool, addGateway ...func(s *FederationSetup) *httptest.Server) *FederationSetup { accountUpstreamServer := httptest.NewServer(accounts.GraphQLEndpointHandler(accounts.TestOptions)) - productsUpstreamServer := httptest.NewServer(products.GraphQLEndpointHandler(products.TestOptions)) + productOptions := products.TestOptions + productOptions.EnableManualSubscriptionEvents = enableManualSubscriptionEvents + productsEndpoint := products.GraphQLEndpointHandler(productOptions) + productsUpstreamServer := httptest.NewServer(productsEndpoint) reviewsUpstreamServer := httptest.NewServer(reviews.GraphQLEndpointHandler(reviews.TestOptions)) setup := &FederationSetup{ - AccountsUpstreamServer: accountUpstreamServer, - ProductsUpstreamServer: productsUpstreamServer, - ReviewsUpstreamServer: reviewsUpstreamServer, + AccountsUpstreamServer: accountUpstreamServer, + ProductsUpstreamServer: productsUpstreamServer, + ReviewsUpstreamServer: reviewsUpstreamServer, + productsSubscriptionEvents: productsEndpoint.SubscriptionEvents(), } if len(addGateway) > 0 { @@ -65,10 +80,11 @@ func NewFederationSetup(addGateway ...func(s *FederationSetup) *httptest.Server) } type FederationSetup struct { - AccountsUpstreamServer *httptest.Server - ProductsUpstreamServer *httptest.Server - ReviewsUpstreamServer *httptest.Server - GatewayServer *httptest.Server + AccountsUpstreamServer *httptest.Server + ProductsUpstreamServer *httptest.Server + ReviewsUpstreamServer *httptest.Server + GatewayServer *httptest.Server + productsSubscriptionEvents *products.ManualSubscriptionEventSource } func (f *FederationSetup) Close() { @@ -79,3 +95,14 @@ func (f *FederationSetup) Close() { f.GatewayServer.Close() } } + +func (f *FederationSetup) NextProductSubscription(ctx context.Context) (*products.ManualSubscriptionHandle, error) { + if f.productsSubscriptionEvents == nil { + return nil, fmt.Errorf("manual product subscriptions are not enabled for this setup") + } + + waitCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + return f.productsSubscriptionEvents.NextSubscription(waitCtx) +} diff --git a/execution/go.mod b/execution/go.mod index 8fb7c1fcb4..b1eed7ba08 100644 --- a/execution/go.mod +++ b/execution/go.mod @@ -4,6 +4,7 @@ go 1.25 require ( github.com/99designs/gqlgen v0.17.76 + github.com/cespare/xxhash/v2 v2.3.0 github.com/gobwas/ws v1.4.0 github.com/golang/mock v1.6.0 github.com/google/uuid v1.6.0 @@ -14,12 +15,12 @@ require ( github.com/sebdah/goldie/v2 v2.7.1 github.com/stretchr/testify v1.11.1 github.com/vektah/gqlparser/v2 v2.5.30 - github.com/wundergraph/astjson v0.0.0-20250106123708-be463c97e083 + github.com/wundergraph/astjson v1.1.1-0.20260419105127-f600d161463f github.com/wundergraph/cosmo/composition-go v0.0.0-20241020204711-78f240a77c99 github.com/wundergraph/cosmo/router v0.0.0-20251013094319-c611abf26b17 github.com/wundergraph/graphql-go-tools/v2 v2.0.0-rc.231 go.uber.org/atomic v1.11.0 - google.golang.org/grpc v1.68.1 + google.golang.org/grpc v1.71.0 google.golang.org/protobuf v1.36.9 ) @@ -28,7 +29,6 @@ require ( github.com/bitfield/gotestdox v0.2.2 // indirect github.com/bufbuild/protocompile v0.14.1 // indirect github.com/buger/jsonparser v1.1.1 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dlclark/regexp2 v1.11.0 // indirect @@ -65,6 +65,7 @@ require ( github.com/tidwall/pretty v1.2.1 // indirect github.com/tidwall/sjson v1.2.5 // indirect github.com/urfave/cli/v2 v2.27.7 // indirect + github.com/wundergraph/go-arena v1.2.0 // indirect github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect diff --git a/execution/go.sum b/execution/go.sum index babde00b17..2fe161eb05 100644 --- a/execution/go.sum +++ b/execution/go.sum @@ -44,6 +44,8 @@ github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= @@ -163,18 +165,27 @@ github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU= github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4= github.com/vektah/gqlparser/v2 v2.5.30 h1:EqLwGAFLIzt1wpx1IPpY67DwUujF1OfzgEyDsLrN6kE= github.com/vektah/gqlparser/v2 v2.5.30/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo= -github.com/wundergraph/astjson v0.0.0-20250106123708-be463c97e083 h1:8/D7f8gKxTBjW+SZK4mhxTTBVpxcqeBgWF1Rfmltbfk= -github.com/wundergraph/astjson v0.0.0-20250106123708-be463c97e083/go.mod h1:eOTL6acwctsN4F3b7YE+eE2t8zcJ/doLm9sZzsxxxrE= +github.com/wundergraph/astjson v1.1.1-0.20260418181506-345133162d36 h1:xf9ZfqdSRYgqf2l2TYFGHXIzagWvFRefvbJW3StWSiM= +github.com/wundergraph/astjson v1.1.1-0.20260418181506-345133162d36/go.mod h1:uHSJv7uowLN/nIPvkTFqUDt1sXk4qQU0KNwHfwfDcQE= +github.com/wundergraph/astjson v1.1.1-0.20260419105127-f600d161463f h1:MoVoeMlgY9Ej1aoF3Y/kniBZ8pv+WfIA3YSCnPBh+6M= +github.com/wundergraph/astjson v1.1.1-0.20260419105127-f600d161463f/go.mod h1:uHSJv7uowLN/nIPvkTFqUDt1sXk4qQU0KNwHfwfDcQE= github.com/wundergraph/cosmo/composition-go v0.0.0-20241020204711-78f240a77c99 h1:TGXDYfDhwFLFTuNuCwkuqXT5aXGz47zcurXLfTBS9w4= github.com/wundergraph/cosmo/composition-go v0.0.0-20241020204711-78f240a77c99/go.mod h1:fUuOAUAXUFB/mlSkAaImGeE4A841AKR5dTMWhV4ibxI= github.com/wundergraph/cosmo/router v0.0.0-20251013094319-c611abf26b17 h1:GjO2E8LTf3U5JiQJCY4MmlRcAjVt7IvAbWFSgEjQdl8= github.com/wundergraph/cosmo/router v0.0.0-20251013094319-c611abf26b17/go.mod h1:7kt64e0LOLMBqOzrfu9PuLRn9cVT9YN1Bb3EennVtws= -github.com/wundergraph/graphql-go-tools/v2 v2.0.0-rc.231 h1:2C8LNFGs8MtI2yPy2/a2WRf9/X2FoMqXlEJkpTjvsTg= +github.com/wundergraph/go-arena v1.2.0 h1:6MlhEy0NBY3Z+BuK3rj0F9YoT3bM0SlahGkzK0lKRZ4= +github.com/wundergraph/go-arena v1.2.0/go.mod h1:ROOysEHWJjLQ8FSfNxZCziagb7Qw2nXY3/vgKRh7eWw= github.com/wundergraph/graphql-go-tools/v2 v2.0.0-rc.231/go.mod h1:ErOQH1ki2+SZB8JjpTyGVnoBpg5picIyjvuWQJP4abg= github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 h1:FnBeRrxr7OU4VvAzt5X7s6266i6cSVkkFPS0TuXWbIg= github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -258,8 +269,8 @@ gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0= gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= -google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= -google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= +google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= +google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y= diff --git a/execution/graphql/normalization_test.go b/execution/graphql/normalization_test.go index 0ed33345f9..5e9fd135ee 100644 --- a/execution/graphql/normalization_test.go +++ b/execution/graphql/normalization_test.go @@ -14,7 +14,9 @@ import ( ) func TestRequest_Normalize(t *testing.T) { + t.Parallel() t.Run("should return error when schema is nil", func(t *testing.T) { + t.Parallel() request := Request{ OperationName: "Hello", Variables: nil, @@ -29,6 +31,7 @@ func TestRequest_Normalize(t *testing.T) { }) t.Run("should successfully normalize request with fragments", func(t *testing.T) { + t.Parallel() schema := StarwarsSchema(t) request := StarwarsRequestForQuery(t, starwars.FileFragmentsQuery) request.OperationName = "Fragments" @@ -77,6 +80,7 @@ func TestRequest_Normalize(t *testing.T) { } t.Run("should successfully normalize single query with arguments", func(t *testing.T) { + t.Parallel() request := StarwarsRequestForQuery(t, starwars.FileDroidWithArgQuery) runNormalization(t, &request, `{"a":"R2D2"}`, `query($a: ID!){ @@ -87,6 +91,7 @@ func TestRequest_Normalize(t *testing.T) { }) t.Run("should successfully normalize query and remove unused variables", func(t *testing.T) { + t.Parallel() request := Request{ OperationName: "MySearch", Variables: stringify(map[string]interface{}{ @@ -106,6 +111,7 @@ func TestRequest_Normalize(t *testing.T) { }) t.Run("should successfully normalize query and remove variables with no value provided", func(t *testing.T) { + t.Parallel() request := Request{ OperationName: "MySearch", Variables: stringify(map[string]interface{}{ @@ -123,6 +129,7 @@ func TestRequest_Normalize(t *testing.T) { }) t.Run("should successfully normalize multiple queries with arguments", func(t *testing.T) { + t.Parallel() request := StarwarsRequestForQuery(t, starwars.FileMultiQueriesWithArguments) request.OperationName = "GetDroid" @@ -135,6 +142,7 @@ func TestRequest_Normalize(t *testing.T) { }) t.Run("input coercion for lists without variables", func(t *testing.T) { + t.Parallel() schema := InputCoercionForListSchema(t) request := Request{ OperationName: "charactersByIds", @@ -149,6 +157,7 @@ func TestRequest_Normalize(t *testing.T) { }) t.Run("input coercion for lists with variable extraction", func(t *testing.T) { + t.Parallel() schema := InputCoercionForListSchema(t) request := Request{ OperationName: "GetCharactersByIds", @@ -163,6 +172,7 @@ func TestRequest_Normalize(t *testing.T) { }) t.Run("input coercion for lists with variables", func(t *testing.T) { + t.Parallel() schema := InputCoercionForListSchema(t) request := Request{ OperationName: "charactersByIds", @@ -180,7 +190,9 @@ func TestRequest_Normalize(t *testing.T) { } func Test_normalizationResultFromReport(t *testing.T) { + t.Parallel() t.Run("should return successful result when report does not have errors", func(t *testing.T) { + t.Parallel() report := operationreport.Report{} result, err := NormalizationResultFromReport(report) @@ -189,6 +201,7 @@ func Test_normalizationResultFromReport(t *testing.T) { }) t.Run("should return graphql errors and internal error when report contains them", func(t *testing.T) { + t.Parallel() internalErr := errors.New("errors occurred") externalErr := operationreport.ExternalError{ Message: "graphql error", diff --git a/execution/graphql/request.go b/execution/graphql/request.go index bccffebd67..425fe19218 100644 --- a/execution/graphql/request.go +++ b/execution/graphql/request.go @@ -38,6 +38,7 @@ type Request struct { OperationName string `json:"operationName"` Variables json.RawMessage `json:"variables,omitempty"` Query string `json:"query"` + Extensions json.RawMessage `json:"extensions,omitempty"` document ast.Document isParsed bool @@ -50,6 +51,31 @@ type Request struct { actualCost int } +// extensionsOnError is used for parsing the onError field from extensions +type extensionsOnError struct { + OnError string `json:"onError"` +} + +// GetOnErrorBehavior extracts the onError value from the extensions field. +// Returns the parsed ErrorBehavior and true if a valid value was found. +// Returns ErrorBehaviorPropagate and false if not found or invalid. +func (r *Request) GetOnErrorBehavior() (resolve.ErrorBehavior, bool) { + if len(r.Extensions) == 0 { + return resolve.ErrorBehaviorPropagate, false + } + + var ext extensionsOnError + if err := json.Unmarshal(r.Extensions, &ext); err != nil { + return resolve.ErrorBehaviorPropagate, false + } + + if ext.OnError == "" { + return resolve.ErrorBehaviorPropagate, false + } + + return resolve.ParseErrorBehavior(ext.OnError) +} + func UnmarshalRequest(reader io.Reader, request *Request) error { requestBytes, err := io.ReadAll(reader) if err != nil { diff --git a/execution/graphql/request_fields_validator_test.go b/execution/graphql/request_fields_validator_test.go index 9c155820fe..ed2002091d 100644 --- a/execution/graphql/request_fields_validator_test.go +++ b/execution/graphql/request_fields_validator_test.go @@ -9,10 +9,13 @@ import ( ) func TestFieldsValidator_Validate(t *testing.T) { - schema := StarwarsSchema(t) - request := StarwarsRequestForQuery(t, starwars.FileSimpleHeroQuery) + t.Parallel() t.Run("should invalidate if blocked fields are used", func(t *testing.T) { + t.Parallel() + + schema := StarwarsSchema(t) + request := StarwarsRequestForQuery(t, starwars.FileSimpleHeroQuery) blockedFields := []Type{ { @@ -29,6 +32,10 @@ func TestFieldsValidator_Validate(t *testing.T) { }) t.Run("should validate if non-blocked fields are used", func(t *testing.T) { + t.Parallel() + + schema := StarwarsSchema(t) + request := StarwarsRequestForQuery(t, starwars.FileSimpleHeroQuery) blockedFields := []Type{ { @@ -46,11 +53,14 @@ func TestFieldsValidator_Validate(t *testing.T) { } func TestFieldsValidator_ValidateByFieldList(t *testing.T) { - schema := StarwarsSchema(t) - request := StarwarsRequestForQuery(t, starwars.FileSimpleHeroQuery) + t.Parallel() t.Run("block list", func(t *testing.T) { + t.Parallel() t.Run("should invalidate if blocked fields are used", func(t *testing.T) { + t.Parallel() + schema := StarwarsSchema(t) + request := StarwarsRequestForQuery(t, starwars.FileSimpleHeroQuery) blockList := FieldRestrictionList{ Kind: BlockList, Types: []Type{ @@ -69,6 +79,9 @@ func TestFieldsValidator_ValidateByFieldList(t *testing.T) { }) t.Run("should validate if non-blocked fields are used", func(t *testing.T) { + t.Parallel() + schema := StarwarsSchema(t) + request := StarwarsRequestForQuery(t, starwars.FileSimpleHeroQuery) blockList := FieldRestrictionList{ Kind: BlockList, Types: []Type{ @@ -88,7 +101,11 @@ func TestFieldsValidator_ValidateByFieldList(t *testing.T) { }) t.Run("allow list", func(t *testing.T) { + t.Parallel() t.Run("should invalidate if a field which is not allowed is used", func(t *testing.T) { + t.Parallel() + schema := StarwarsSchema(t) + request := StarwarsRequestForQuery(t, starwars.FileSimpleHeroQuery) allowList := FieldRestrictionList{ Kind: AllowList, Types: []Type{ @@ -111,6 +128,9 @@ func TestFieldsValidator_ValidateByFieldList(t *testing.T) { }) t.Run("should validate if all fields are allowed", func(t *testing.T) { + t.Parallel() + schema := StarwarsSchema(t) + request := StarwarsRequestForQuery(t, starwars.FileSimpleHeroQuery) allowList := FieldRestrictionList{ Kind: AllowList, Types: []Type{ diff --git a/execution/graphql/request_onerror_test.go b/execution/graphql/request_onerror_test.go new file mode 100644 index 0000000000..20d7854457 --- /dev/null +++ b/execution/graphql/request_onerror_test.go @@ -0,0 +1,108 @@ +package graphql + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +func TestRequest_GetOnErrorBehavior(t *testing.T) { + t.Parallel() + tests := []struct { + name string + extensions string + expected resolve.ErrorBehavior + ok bool + }{ + { + name: "NULL mode", + extensions: `{"onError":"NULL"}`, + expected: resolve.ErrorBehaviorNull, + ok: true, + }, + { + name: "PROPAGATE mode", + extensions: `{"onError":"PROPAGATE"}`, + expected: resolve.ErrorBehaviorPropagate, + ok: true, + }, + { + name: "HALT mode", + extensions: `{"onError":"HALT"}`, + expected: resolve.ErrorBehaviorHalt, + ok: true, + }, + { + name: "lowercase null", + extensions: `{"onError":"null"}`, + expected: resolve.ErrorBehaviorNull, + ok: true, + }, + { + name: "mixed case", + extensions: `{"onError":"Halt"}`, + expected: resolve.ErrorBehaviorHalt, + ok: true, + }, + { + name: "empty extensions", + extensions: ``, + expected: resolve.ErrorBehaviorPropagate, + ok: false, + }, + { + name: "no onError field", + extensions: `{"other":"value"}`, + expected: resolve.ErrorBehaviorPropagate, + ok: false, + }, + { + name: "empty onError value", + extensions: `{"onError":""}`, + expected: resolve.ErrorBehaviorPropagate, + ok: false, + }, + { + name: "invalid onError value", + extensions: `{"onError":"INVALID"}`, + expected: resolve.ErrorBehaviorPropagate, + ok: false, + }, + { + name: "invalid JSON", + extensions: `{invalid}`, + expected: resolve.ErrorBehaviorPropagate, + ok: false, + }, + { + name: "extensions with other fields", + extensions: `{"tracing":true,"onError":"NULL","persistedQuery":{"hash":"abc"}}`, + expected: resolve.ErrorBehaviorNull, + ok: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + req := &Request{ + Extensions: []byte(tc.extensions), + } + result, ok := req.GetOnErrorBehavior() + assert.Equal(t, tc.expected, result) + assert.Equal(t, tc.ok, ok) + }) + } +} + +func TestRequest_GetOnErrorBehavior_WithNilExtensions(t *testing.T) { + t.Parallel() + req := &Request{ + Query: "{ hello }", + } + result, ok := req.GetOnErrorBehavior() + assert.Equal(t, resolve.ErrorBehaviorPropagate, result) + assert.False(t, ok) +} diff --git a/execution/graphql/request_test.go b/execution/graphql/request_test.go index d59f1d72a5..3de770730a 100644 --- a/execution/graphql/request_test.go +++ b/execution/graphql/request_test.go @@ -12,7 +12,9 @@ import ( ) func TestUnmarshalRequest(t *testing.T) { + t.Parallel() t.Run("should return error when request is empty", func(t *testing.T) { + t.Parallel() requestBytes := []byte("") requestBuffer := bytes.NewBuffer(requestBytes) @@ -24,6 +26,7 @@ func TestUnmarshalRequest(t *testing.T) { }) t.Run("should successfully unmarshal request", func(t *testing.T) { + t.Parallel() requestBytes := []byte(`{"operationName": "Hello", "variables": "", "query": "query Hello { hello }"}`) requestBuffer := bytes.NewBuffer(requestBytes) @@ -37,6 +40,7 @@ func TestUnmarshalRequest(t *testing.T) { } func TestRequest_Print(t *testing.T) { + t.Parallel() query := "query Hello { hello }" request := Request{ OperationName: "Hello", @@ -53,6 +57,7 @@ func TestRequest_Print(t *testing.T) { } func TestRequest_parseQueryOnce(t *testing.T) { + t.Parallel() request := func() *Request { return &Request{ OperationName: "Hello", @@ -62,6 +67,7 @@ func TestRequest_parseQueryOnce(t *testing.T) { } t.Run("valid query", func(t *testing.T) { + t.Parallel() req := request() report := req.parseQueryOnce() assert.False(t, report.HasErrors()) @@ -69,6 +75,7 @@ func TestRequest_parseQueryOnce(t *testing.T) { }) t.Run("should not parse again", func(t *testing.T) { + t.Parallel() req := request() report := req.parseQueryOnce() assert.False(t, report.HasErrors()) @@ -80,6 +87,7 @@ func TestRequest_parseQueryOnce(t *testing.T) { }) t.Run("should not set is parsed for invalid query", func(t *testing.T) { + t.Parallel() req := request() req.Query = "{" report := req.parseQueryOnce() @@ -89,7 +97,9 @@ func TestRequest_parseQueryOnce(t *testing.T) { } func TestRequest_CalculateComplexity(t *testing.T) { + t.Parallel() t.Run("should successfully calculate the complexity of request", func(t *testing.T) { + t.Parallel() schema := StarwarsSchema(t) request := StarwarsRequestForQuery(t, starwars.FileSimpleHeroQuery) @@ -118,6 +128,7 @@ func TestRequest_CalculateComplexity(t *testing.T) { }) t.Run("should successfully calculate the complexity of request with multiple query fields", func(t *testing.T) { + t.Parallel() schema := StarwarsSchema(t) request := StarwarsRequestForQuery(t, starwars.FileHeroWithAliasesQuery) @@ -156,8 +167,10 @@ func TestRequest_CalculateComplexity(t *testing.T) { } func TestRequest_IsIntrospectionQuery(t *testing.T) { + t.Parallel() run := func(queryPayload string, expectedIsIntrospection bool) func(t *testing.T) { return func(t *testing.T) { + t.Parallel() t.Helper() var request Request @@ -171,6 +184,7 @@ func TestRequest_IsIntrospectionQuery(t *testing.T) { } t.Run("schema introspection query", func(t *testing.T) { + t.Parallel() t.Run("with operation name IntrospectionQuery", run(namedIntrospectionQuery, true)) t.Run("without operation name IntrospectionQuery but as single query", run(singleNamedIntrospectionQueryWithoutOperationName, true)) t.Run("with empty operation name", run(silentIntrospectionQuery, true)) @@ -182,11 +196,13 @@ func TestRequest_IsIntrospectionQuery(t *testing.T) { }) t.Run("type introspection query", func(t *testing.T) { + t.Parallel() t.Run("as single introspection", run(typeIntrospectionQuery, true)) t.Run("with multiple queries in payload", run(typeIntrospectionQueryWithMultipleQueries, true)) }) t.Run("not introspection query", func(t *testing.T) { + t.Parallel() t.Run("query with operation name IntrospectionQuery", run(nonIntrospectionQueryWithIntrospectionQueryName, false)) t.Run("Foo query", run(nonIntrospectionQuery, false)) t.Run("Foo mutation", run(mutationQuery, false)) @@ -200,69 +216,62 @@ func TestRequest_IsIntrospectionQuery(t *testing.T) { } func TestRequest_OperationType(t *testing.T) { - request := Request{ - OperationName: "", - Variables: nil, - Query: "query HelloQuery { hello: String } mutation HelloMutation { hello: String } subscription HelloSubscription { hello: String }", - } + t.Parallel() + + multiOpQuery := "query HelloQuery { hello: String } mutation HelloMutation { hello: String } subscription HelloSubscription { hello: String }" t.Run("should return operation type 'Query'", func(t *testing.T) { - request.OperationName = "HelloQuery" + t.Parallel() + request := Request{OperationName: "HelloQuery", Query: multiOpQuery} opType, err := request.OperationType() assert.NoError(t, err) assert.Equal(t, OperationTypeQuery, opType) }) t.Run("should return operation type 'Mutation'", func(t *testing.T) { - request.OperationName = "HelloMutation" + t.Parallel() + request := Request{OperationName: "HelloMutation", Query: multiOpQuery} opType, err := request.OperationType() assert.NoError(t, err) assert.Equal(t, OperationTypeMutation, opType) }) t.Run("should return operation type 'Subscription'", func(t *testing.T) { - request.OperationName = "HelloSubscription" + t.Parallel() + request := Request{OperationName: "HelloSubscription", Query: multiOpQuery} opType, err := request.OperationType() assert.NoError(t, err) assert.Equal(t, OperationTypeSubscription, opType) }) t.Run("should return operation type 'Unknown' on error", func(t *testing.T) { - emptyRequest := Request{ - Query: "Broken Query", - } - opType, err := emptyRequest.OperationType() + t.Parallel() + request := Request{Query: "Broken Query"} + opType, err := request.OperationType() assert.Error(t, err) assert.Equal(t, OperationTypeUnknown, opType) }) t.Run("should return operation type 'Unknown' when empty and parsable", func(t *testing.T) { - emptyRequest := Request{} - opType, err := emptyRequest.OperationType() + t.Parallel() + request := Request{} + opType, err := request.OperationType() assert.NoError(t, err) assert.Equal(t, OperationTypeUnknown, opType) }) t.Run("should return operation type 'Query' if no name and a single operation is provided", func(t *testing.T) { - singleOperationQueryRequest := Request{ - OperationName: "", - Variables: nil, - Query: "{ hello: String }", - } - - opType, err := singleOperationQueryRequest.OperationType() + t.Parallel() + request := Request{Query: "{ hello: String }"} + opType, err := request.OperationType() assert.NoError(t, err) assert.Equal(t, OperationTypeQuery, opType) }) t.Run("should return operation type 'Mutation' if mutation is the only operation", func(t *testing.T) { - singleOperationMutationRequest := Request{ - OperationName: "", - Variables: nil, - Query: "mutation HelloMutation { hello: String }", - } - - opType, err := singleOperationMutationRequest.OperationType() + t.Parallel() + request := Request{Query: "mutation HelloMutation { hello: String }"} + opType, err := request.OperationType() assert.NoError(t, err) assert.Equal(t, OperationTypeMutation, opType) }) diff --git a/execution/graphql/schema_test.go b/execution/graphql/schema_test.go index 5e4bc69227..b1ecab469a 100644 --- a/execution/graphql/schema_test.go +++ b/execution/graphql/schema_test.go @@ -14,7 +14,9 @@ import ( ) func TestNewSchemaFromReader(t *testing.T) { + t.Parallel() t.Run("should return error when an error occurs internally", func(t *testing.T) { + t.Parallel() schemaBytes := []byte("query: Query") schemaReader := bytes.NewBuffer(schemaBytes) schema, err := NewSchemaFromReader(schemaReader) @@ -24,6 +26,7 @@ func TestNewSchemaFromReader(t *testing.T) { }) t.Run("should successfully read from io.Reader", func(t *testing.T) { + t.Parallel() schemaBytes := []byte("schema { query: Query } type Query { hello: String }") schemaReader := bytes.NewBuffer(schemaBytes) schema, err := NewSchemaFromReader(schemaReader) @@ -34,7 +37,9 @@ func TestNewSchemaFromReader(t *testing.T) { } func TestNewSchemaFromString(t *testing.T) { + t.Parallel() t.Run("should return error when an error occurs internally", func(t *testing.T) { + t.Parallel() schemaBytes := []byte("query: Query") schema, err := NewSchemaFromString(string(schemaBytes)) @@ -43,6 +48,7 @@ func TestNewSchemaFromString(t *testing.T) { }) t.Run("should successfully read from string", func(t *testing.T) { + t.Parallel() schemaBytes := []byte("schema { query: Query } type Query { hello: String }") schema, err := NewSchemaFromString(string(schemaBytes)) @@ -52,7 +58,9 @@ func TestNewSchemaFromString(t *testing.T) { } func TestSchema_Normalize(t *testing.T) { + t.Parallel() t.Run("should successfully normalize schema", func(t *testing.T) { + t.Parallel() parsedSchema, err := NewSchemaFromString("type Query { me: String } extend type Query { you: String }") require.NoError(t, err) @@ -72,8 +80,10 @@ func TestSchema_Normalize(t *testing.T) { } func TestSchema_HasQueryType(t *testing.T) { + t.Parallel() run := func(schema string, expectation bool) func(t *testing.T) { return func(t *testing.T) { + t.Parallel() parsedSchema, err := createSchema([]byte(schema), false) require.NoError(t, err) @@ -83,6 +93,7 @@ func TestSchema_HasQueryType(t *testing.T) { } t.Run("schema without base definition", func(t *testing.T) { + t.Parallel() t.Run("should return false when there is no query type present", run(` schema { mutation: Mutation @@ -104,8 +115,10 @@ func TestSchema_HasQueryType(t *testing.T) { } func TestSchema_QueryTypeName(t *testing.T) { + t.Parallel() run := func(schema string, expectation string) func(t *testing.T) { return func(t *testing.T) { + t.Parallel() parsedSchema, err := NewSchemaFromString(schema) require.NoError(t, err) @@ -143,8 +156,10 @@ func TestSchema_QueryTypeName(t *testing.T) { } func TestSchema_HasMutationType(t *testing.T) { + t.Parallel() run := func(schema string, expectation bool) func(t *testing.T) { return func(t *testing.T) { + t.Parallel() parsedSchema, err := NewSchemaFromString(schema) require.NoError(t, err) @@ -173,8 +188,10 @@ func TestSchema_HasMutationType(t *testing.T) { } func TestSchema_MutationTypeName(t *testing.T) { + t.Parallel() run := func(schema string, expectation string) func(t *testing.T) { return func(t *testing.T) { + t.Parallel() parsedSchema, err := NewSchemaFromString(schema) require.NoError(t, err) @@ -212,8 +229,10 @@ func TestSchema_MutationTypeName(t *testing.T) { } func TestSchema_HasSubscriptionType(t *testing.T) { + t.Parallel() run := func(schema string, expectation bool) func(t *testing.T) { return func(t *testing.T) { + t.Parallel() parsedSchema, err := NewSchemaFromString(schema) require.NoError(t, err) @@ -242,8 +261,10 @@ func TestSchema_HasSubscriptionType(t *testing.T) { } func TestSchema_SubscriptionTypeName(t *testing.T) { + t.Parallel() run := func(schema string, expectation string) func(t *testing.T) { return func(t *testing.T) { + t.Parallel() parsedSchema, err := NewSchemaFromString(schema) require.NoError(t, err) @@ -281,6 +302,7 @@ func TestSchema_SubscriptionTypeName(t *testing.T) { } func TestSchema_Document(t *testing.T) { + t.Parallel() schemaBytes := []byte("schema { query: Query } type Query { hello: String }") schema, err := NewSchemaFromString(string(schemaBytes)) require.NoError(t, err) @@ -299,8 +321,10 @@ func TestSchema_Document(t *testing.T) { } func TestValidateSchemaString(t *testing.T) { + t.Parallel() run := func(schema string, expectedValid bool, expectedValidationErrorCount int) func(t *testing.T) { return func(t *testing.T) { + t.Parallel() validationResult, err := ValidateSchemaString(schema) assert.NoError(t, err) assert.Equal(t, expectedValid, validationResult.Valid) @@ -346,8 +370,10 @@ func TestValidateSchemaString(t *testing.T) { } func TestSchema_Validate(t *testing.T) { + t.Parallel() run := func(schema string, expectedValid bool, expectedValidationErrorCount int) func(t *testing.T) { return func(t *testing.T) { + t.Parallel() parsedSchema, err := NewSchemaFromString(schema) require.NoError(t, err) @@ -390,10 +416,12 @@ func TestSchema_Validate(t *testing.T) { } func TestSchema_GetAllFieldArguments(t *testing.T) { + t.Parallel() schema, err := NewSchemaFromString(schemaWithChildren) require.NoError(t, err) t.Run("should get all field arguments without skip function", func(t *testing.T) { + t.Parallel() fieldArguments := schema.GetAllFieldArguments() expectedFieldArguments := []TypeFieldArguments{ { @@ -456,6 +484,7 @@ func TestSchema_GetAllFieldArguments(t *testing.T) { }) t.Run("should get all field arguments excluding skipped fields by skip field funcs", func(t *testing.T) { + t.Parallel() fieldArguments := schema.GetAllFieldArguments(NewSkipReservedNamesFunc()) expectedFieldArguments := []TypeFieldArguments{ { @@ -489,15 +518,18 @@ func TestSchema_GetAllFieldArguments(t *testing.T) { } func TestSchema_GetAllNestedFieldChildrenFromTypeField(t *testing.T) { + t.Parallel() schema, err := NewSchemaFromString(schemaWithChildren) require.NoError(t, err) t.Run("should return nil when type or field does not exist", func(t *testing.T) { + t.Parallel() typeFields := schema.GetAllNestedFieldChildrenFromTypeField("Not", "existent") assert.Equal(t, []TypeFields(nil), typeFields) }) t.Run("should get field children without skip function", func(t *testing.T) { + t.Parallel() typeFields := schema.GetAllNestedFieldChildrenFromTypeField("Query", "withChildren") expectedTypeFields := []TypeFields{ { @@ -514,6 +546,7 @@ func TestSchema_GetAllNestedFieldChildrenFromTypeField(t *testing.T) { }) t.Run("should get field children without skip function on field with interface type", func(t *testing.T) { + t.Parallel() typeFields := schema.GetAllNestedFieldChildrenFromTypeField("Query", "idType") expectedTypeFields := []TypeFields{ { @@ -534,6 +567,7 @@ func TestSchema_GetAllNestedFieldChildrenFromTypeField(t *testing.T) { }) t.Run("should get field children with skip function for engine v2 data source config", func(t *testing.T) { + t.Parallel() dsCfg, _ := plan.NewDataSourceConfiguration[any]( "test", nil, @@ -561,6 +595,7 @@ func TestSchema_GetAllNestedFieldChildrenFromTypeField(t *testing.T) { }) t.Run("should get field children from schema with recursive references", func(t *testing.T) { + t.Parallel() schema := CreateCountriesSchema(t) typeFields := schema.GetAllNestedFieldChildrenFromTypeField("Query", "countries") @@ -587,6 +622,7 @@ func TestSchema_GetAllNestedFieldChildrenFromTypeField(t *testing.T) { }) t.Run("should get field children from schema with recursive references on field with interface type", func(t *testing.T) { + t.Parallel() schema := CreateCountriesSchema(t) typeFields := schema.GetAllNestedFieldChildrenFromTypeField("Query", "codeType") diff --git a/execution/graphql/schema_validation_errors_test.go b/execution/graphql/schema_validation_errors_test.go index 1a0d0fed1c..af5d319a83 100644 --- a/execution/graphql/schema_validation_errors_test.go +++ b/execution/graphql/schema_validation_errors_test.go @@ -7,6 +7,7 @@ import ( ) func TestSchemaValidationErrors_Error(t *testing.T) { + t.Parallel() validationErrs := SchemaValidationErrors{ SchemaValidationError{ Message: "there can be only one query type in schema", @@ -17,6 +18,7 @@ func TestSchemaValidationErrors_Error(t *testing.T) { } func TestSchemaValidationErrors_Count(t *testing.T) { + t.Parallel() validationErrs := SchemaValidationErrors{ SchemaValidationError{ Message: "there can be only one query type in schema", @@ -27,6 +29,7 @@ func TestSchemaValidationErrors_Count(t *testing.T) { } func TestSchemaValidationErrors_ErrorByIndex(t *testing.T) { + t.Parallel() existingValidationError := SchemaValidationError{ Message: "there can be only one query type in schema", } @@ -40,6 +43,7 @@ func TestSchemaValidationErrors_ErrorByIndex(t *testing.T) { } func TestSchemaValidationError_Error(t *testing.T) { + t.Parallel() validationError := SchemaValidationError{ Message: "there can be only one query type in schema", } diff --git a/execution/graphql/validation_test.go b/execution/graphql/validation_test.go index b71775846e..a47bc4fd51 100644 --- a/execution/graphql/validation_test.go +++ b/execution/graphql/validation_test.go @@ -14,7 +14,9 @@ import ( ) func TestRequest_ValidateForSchema(t *testing.T) { + t.Parallel() t.Run("should return error when schema is nil", func(t *testing.T) { + t.Parallel() request := Request{ OperationName: "Hello", Variables: nil, @@ -28,6 +30,7 @@ func TestRequest_ValidateForSchema(t *testing.T) { }) t.Run("should return gql errors no valid operation is in the the request", func(t *testing.T) { + t.Parallel() request := Request{} schema, err := NewSchemaFromString("schema { query: Query } type Query { hello: String }") @@ -40,6 +43,7 @@ func TestRequest_ValidateForSchema(t *testing.T) { }) t.Run("should return gql errors when validation fails", func(t *testing.T) { + t.Parallel() request := Request{ OperationName: "Goodbye", Variables: nil, @@ -56,6 +60,7 @@ func TestRequest_ValidateForSchema(t *testing.T) { }) t.Run("should successfully validate even when schema definition is missing", func(t *testing.T) { + t.Parallel() request := Request{ OperationName: "Hello", Variables: nil, @@ -72,6 +77,7 @@ func TestRequest_ValidateForSchema(t *testing.T) { }) t.Run("should return valid result for introspection query after normalization", func(t *testing.T) { + t.Parallel() schema := StarwarsSchema(t) request := StarwarsRequestForQuery(t, starwars.FileIntrospectionQuery) @@ -87,6 +93,7 @@ func TestRequest_ValidateForSchema(t *testing.T) { }) t.Run("should return valid result when validation is successful", func(t *testing.T) { + t.Parallel() schema := StarwarsSchema(t) request := StarwarsRequestForQuery(t, starwars.FileSimpleHeroQuery) @@ -98,7 +105,9 @@ func TestRequest_ValidateForSchema(t *testing.T) { } func TestRequest_ValidateRestrictedFields(t *testing.T) { + t.Parallel() t.Run("should return error when schema is nil", func(t *testing.T) { + t.Parallel() request := Request{} result, err := request.ValidateRestrictedFields(nil, nil) assert.Error(t, err) @@ -107,6 +116,7 @@ func TestRequest_ValidateRestrictedFields(t *testing.T) { }) t.Run("should allow request when no restrictions set", func(t *testing.T) { + t.Parallel() schema := StarwarsSchema(t) request := StarwarsRequestForQuery(t, starwars.FileSimpleHeroQuery) @@ -116,6 +126,7 @@ func TestRequest_ValidateRestrictedFields(t *testing.T) { }) t.Run("when restrictions set", func(t *testing.T) { + t.Parallel() schema := StarwarsSchema(t) restrictedFields := []Type{ {Name: "Query", Fields: []string{"droid"}}, @@ -125,7 +136,9 @@ func TestRequest_ValidateRestrictedFields(t *testing.T) { } t.Run("should allow request", func(t *testing.T) { + t.Parallel() t.Run("when only allowed fields requested", func(t *testing.T) { + t.Parallel() request := StarwarsRequestForQuery(t, starwars.FileSimpleHeroQuery) result, err := request.ValidateRestrictedFields(schema, restrictedFields) assert.NoError(t, err) @@ -141,7 +154,9 @@ func TestRequest_ValidateRestrictedFields(t *testing.T) { }) t.Run("should disallow request", func(t *testing.T) { + t.Parallel() t.Run("when query is restricted", func(t *testing.T) { + t.Parallel() request := StarwarsRequestForQuery(t, starwars.FileDroidWithArgAndVarQuery) result, err := request.ValidateRestrictedFields(schema, restrictedFields) assert.NoError(t, err) @@ -154,6 +169,7 @@ func TestRequest_ValidateRestrictedFields(t *testing.T) { }) t.Run("when mutation is restricted", func(t *testing.T) { + t.Parallel() request := StarwarsRequestForQuery(t, starwars.FileCreateReviewMutation) result, err := request.ValidateRestrictedFields(schema, restrictedFields) assert.NoError(t, err) @@ -162,6 +178,7 @@ func TestRequest_ValidateRestrictedFields(t *testing.T) { }) t.Run("when type field is restricted", func(t *testing.T) { + t.Parallel() request := StarwarsRequestForQuery(t, starwars.FileUnionQuery) result, err := request.ValidateRestrictedFields(schema, restrictedFields) assert.NoError(t, err) @@ -170,6 +187,7 @@ func TestRequest_ValidateRestrictedFields(t *testing.T) { }) t.Run("when mutation response type has restricted field", func(t *testing.T) { + t.Parallel() restrictedFields := []Type{ {Name: "Review", Fields: []string{"id"}}, } @@ -186,9 +204,11 @@ func TestRequest_ValidateRestrictedFields(t *testing.T) { } func TestRequest_ValidateFieldRestrictions(t *testing.T) { + t.Parallel() validator := DefaultFieldsValidator{} t.Run("should return error when schema is nil", func(t *testing.T) { + t.Parallel() request := Request{} result, err := request.ValidateFieldRestrictions(nil, FieldRestrictionList{}, validator) assert.Error(t, err) @@ -197,6 +217,7 @@ func TestRequest_ValidateFieldRestrictions(t *testing.T) { }) t.Run("should allow request when no restrictions set", func(t *testing.T) { + t.Parallel() schema := StarwarsSchema(t) request := StarwarsRequestForQuery(t, starwars.FileSimpleHeroQuery) @@ -208,6 +229,7 @@ func TestRequest_ValidateFieldRestrictions(t *testing.T) { }) t.Run("when restrictions set", func(t *testing.T) { + t.Parallel() schema := StarwarsSchema(t) restrictedFields := []Type{ {Name: "Query", Fields: []string{"droid"}}, @@ -217,7 +239,9 @@ func TestRequest_ValidateFieldRestrictions(t *testing.T) { } t.Run("should allow request", func(t *testing.T) { + t.Parallel() t.Run("when only allowed fields requested", func(t *testing.T) { + t.Parallel() request := StarwarsRequestForQuery(t, starwars.FileSimpleHeroQuery) result, err := request.ValidateFieldRestrictions(schema, FieldRestrictionList{ Kind: BlockList, @@ -236,7 +260,9 @@ func TestRequest_ValidateFieldRestrictions(t *testing.T) { }) t.Run("should disallow request", func(t *testing.T) { + t.Parallel() t.Run("when query is restricted", func(t *testing.T) { + t.Parallel() request := StarwarsRequestForQuery(t, starwars.FileDroidWithArgAndVarQuery) result, err := request.ValidateFieldRestrictions(schema, FieldRestrictionList{ Kind: BlockList, @@ -252,6 +278,7 @@ func TestRequest_ValidateFieldRestrictions(t *testing.T) { }) t.Run("when mutation is restricted", func(t *testing.T) { + t.Parallel() request := StarwarsRequestForQuery(t, starwars.FileCreateReviewMutation) result, err := request.ValidateFieldRestrictions(schema, FieldRestrictionList{ Kind: BlockList, @@ -263,6 +290,7 @@ func TestRequest_ValidateFieldRestrictions(t *testing.T) { }) t.Run("when type field is restricted", func(t *testing.T) { + t.Parallel() request := StarwarsRequestForQuery(t, starwars.FileUnionQuery) result, err := request.ValidateFieldRestrictions(schema, FieldRestrictionList{ Kind: BlockList, @@ -274,6 +302,7 @@ func TestRequest_ValidateFieldRestrictions(t *testing.T) { }) t.Run("when mutation response type has restricted field", func(t *testing.T) { + t.Parallel() restrictedFields := []Type{ {Name: "Review", Fields: []string{"id"}}, } @@ -293,7 +322,9 @@ func TestRequest_ValidateFieldRestrictions(t *testing.T) { } func Test_operationValidationResultFromReport(t *testing.T) { + t.Parallel() t.Run("should return result for valid when report does not have errors", func(t *testing.T) { + t.Parallel() report := operationreport.Report{} result, err := operationValidationResultFromReport(report) @@ -302,6 +333,7 @@ func Test_operationValidationResultFromReport(t *testing.T) { }) t.Run("should return validation error and internal error when report contain them", func(t *testing.T) { + t.Parallel() internalErr := errors.New("errors occurred") externalErr := operationreport.ExternalError{ Message: "graphql error", diff --git a/execution/subscription/context_test.go b/execution/subscription/context_test.go index ebe72f89ce..cf06a9db7a 100644 --- a/execution/subscription/context_test.go +++ b/execution/subscription/context_test.go @@ -12,6 +12,7 @@ import ( ) func TestNewInitialHttpRequestContext(t *testing.T) { + t.Parallel() ctx, cancelFn := context.WithCancel(context.Background()) defer cancelFn() @@ -23,7 +24,9 @@ func TestNewInitialHttpRequestContext(t *testing.T) { assert.Equal(t, req, initialReqCtx.Request) } +//nolint:tparallel // Subtests intentionally share the same cancellation map and context state. func TestSubscriptionCancellations(t *testing.T) { + t.Parallel() cancellations := subscriptionCancellations{} var ctx context.Context var err error @@ -52,6 +55,7 @@ func TestSubscriptionCancellations(t *testing.T) { } func TestSubscriptionIdsShouldBeUnique(t *testing.T) { + t.Parallel() sc := subscriptionCancellations{} var ctx context.Context var err error diff --git a/execution/subscription/engine_test.go b/execution/subscription/engine_test.go index 5b447ede0f..87e7630c4a 100644 --- a/execution/subscription/engine_test.go +++ b/execution/subscription/engine_test.go @@ -18,8 +18,11 @@ import ( ) func TestExecutorEngine_StartOperation(t *testing.T) { + t.Parallel() t.Run("execute non-subscription operation", func(t *testing.T) { + t.Parallel() t.Run("on execution failure", func(t *testing.T) { + t.Parallel() wg := &sync.WaitGroup{} wg.Add(2) @@ -94,6 +97,7 @@ func TestExecutorEngine_StartOperation(t *testing.T) { }) t.Run("on execution success", func(t *testing.T) { + t.Parallel() wg := &sync.WaitGroup{} wg.Add(2) @@ -168,7 +172,9 @@ func TestExecutorEngine_StartOperation(t *testing.T) { }) t.Run("execute subscription operation", func(t *testing.T) { + t.Parallel() t.Run("on execution failure", func(t *testing.T) { + t.Parallel() if runtime.GOOS == "windows" { t.Skip("this test fails on Windows due to different timings than unix, consider fixing it at some point") } @@ -230,6 +236,7 @@ func TestExecutorEngine_StartOperation(t *testing.T) { }) t.Run("on execution success", func(t *testing.T) { + t.Parallel() if runtime.GOOS == "windows" { t.Skip("this test fails on Windows due to different timings than unix, consider fixing it at some point") } @@ -295,6 +302,7 @@ func TestExecutorEngine_StartOperation(t *testing.T) { }) t.Run("error on duplicate id", func(t *testing.T) { + t.Parallel() wg := &sync.WaitGroup{} wg.Add(1) @@ -368,6 +376,7 @@ func TestExecutorEngine_StartOperation(t *testing.T) { } func TestExecutorEngine_StopSubscription(t *testing.T) { + t.Parallel() wg := &sync.WaitGroup{} wg.Add(1) @@ -436,6 +445,7 @@ func TestExecutorEngine_StopSubscription(t *testing.T) { } func TestExecutorEngine_TerminateAllConnections(t *testing.T) { + t.Parallel() wg := &sync.WaitGroup{} wg.Add(3) diff --git a/execution/subscription/executor_v2.go b/execution/subscription/executor_v2.go index 22640ce0f8..ae8ba4e38b 100644 --- a/execution/subscription/executor_v2.go +++ b/execution/subscription/executor_v2.go @@ -16,17 +16,19 @@ type ExecutorV2Pool struct { engine *engine.ExecutionEngine executorPool *sync.Pool connectionInitReqCtx context.Context // connectionInitReqCtx - holds original request context used to establish websocket connection + executionOptions []engine.ExecutionOptions } -func NewExecutorV2Pool(engine *engine.ExecutionEngine, connectionInitReqCtx context.Context) *ExecutorV2Pool { +func NewExecutorV2Pool(eng *engine.ExecutionEngine, connectionInitReqCtx context.Context, opts ...engine.ExecutionOptions) *ExecutorV2Pool { return &ExecutorV2Pool{ - engine: engine, + engine: eng, executorPool: &sync.Pool{ New: func() interface{} { return &ExecutorV2{} }, }, connectionInitReqCtx: connectionInitReqCtx, + executionOptions: opts, } } @@ -38,10 +40,11 @@ func (e *ExecutorV2Pool) Get(payload []byte) (Executor, error) { } return &ExecutorV2{ - engine: e.engine, - operation: &operation, - context: context.Background(), - reqCtx: e.connectionInitReqCtx, + engine: e.engine, + operation: &operation, + context: context.Background(), + reqCtx: e.connectionInitReqCtx, + executionOptions: e.executionOptions, }, nil } @@ -52,18 +55,20 @@ func (e *ExecutorV2Pool) Put(executor Executor) error { } type ExecutorV2 struct { - engine *engine.ExecutionEngine - operation *graphql.Request - context context.Context - reqCtx context.Context + engine *engine.ExecutionEngine + operation *graphql.Request + context context.Context + reqCtx context.Context + executionOptions []engine.ExecutionOptions } func (e *ExecutorV2) Execute(writer resolve.SubscriptionResponseWriter) error { - options := make([]engine.ExecutionOptions, 0) + options := make([]engine.ExecutionOptions, 0, len(e.executionOptions)+1) switch ctx := e.reqCtx.(type) { case *InitialHttpRequestContext: options = append(options, engine.WithAdditionalHttpHeaders(ctx.Request.Header)) } + options = append(options, e.executionOptions...) return e.engine.Execute(e.context, e.operation, writer, options...) } @@ -86,4 +91,5 @@ func (e *ExecutorV2) Reset() { e.operation = nil e.context = context.Background() e.reqCtx = context.TODO() + e.executionOptions = nil } diff --git a/execution/subscription/handler_test.go b/execution/subscription/handler_test.go index 125cee5161..a7f7ba8134 100644 --- a/execution/subscription/handler_test.go +++ b/execution/subscription/handler_test.go @@ -14,7 +14,9 @@ import ( ) func TestUniversalProtocolHandler_Handle(t *testing.T) { + t.Parallel() t.Run("should terminate when client is disconnected", func(t *testing.T) { + t.Parallel() wg := &sync.WaitGroup{} wg.Add(1) @@ -62,6 +64,7 @@ func TestUniversalProtocolHandler_Handle(t *testing.T) { }) t.Run("should terminate when reading on closed connection", func(t *testing.T) { + t.Parallel() wg := &sync.WaitGroup{} wg.Add(1) @@ -112,6 +115,7 @@ func TestUniversalProtocolHandler_Handle(t *testing.T) { }) t.Run("should sent event on client read error", func(t *testing.T) { + t.Parallel() wg := &sync.WaitGroup{} wg.Add(1) @@ -164,6 +168,7 @@ func TestUniversalProtocolHandler_Handle(t *testing.T) { }) t.Run("should handover message to protocol handler", func(t *testing.T) { + t.Parallel() wg := &sync.WaitGroup{} wg.Add(1) @@ -217,7 +222,9 @@ func TestUniversalProtocolHandler_Handle(t *testing.T) { }) t.Run("read error time out", func(t *testing.T) { + t.Parallel() t.Run("should stop handler when read error timer runs out", func(t *testing.T) { + t.Parallel() wg := &sync.WaitGroup{} wg.Add(1) @@ -270,6 +277,7 @@ func TestUniversalProtocolHandler_Handle(t *testing.T) { }) t.Run("should continue running handler after intermittent read error", func(t *testing.T) { + t.Parallel() wg := &sync.WaitGroup{} wg.Add(1) diff --git a/execution/subscription/legacy_handler_test.go b/execution/subscription/legacy_handler_test.go index 1ca4933258..bb6365b853 100644 --- a/execution/subscription/legacy_handler_test.go +++ b/execution/subscription/legacy_handler_test.go @@ -41,7 +41,9 @@ func (w *websocketHook) OnBeforeStart(reqCtx context.Context, operation *graphql return nil } +//nolint:tparallel // Subtests share websocket clients, hooks, and test servers; parallel execution is unsafe here. func TestHandler_Handle(t *testing.T) { + t.Parallel() t.Run("engine v2", func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/execution/subscription/time_out_test.go b/execution/subscription/time_out_test.go index 72968fb6c5..0accc72fbb 100644 --- a/execution/subscription/time_out_test.go +++ b/execution/subscription/time_out_test.go @@ -11,7 +11,9 @@ import ( ) func TestTimeOutChecker(t *testing.T) { + t.Parallel() t.Run("should stop timer if context is done before", func(t *testing.T) { + t.Parallel() timeOutActionExecuted := false timeOutAction := func() { timeOutActionExecuted = true @@ -33,6 +35,7 @@ func TestTimeOutChecker(t *testing.T) { }) t.Run("should stop process if timer runs out", func(t *testing.T) { + t.Parallel() wg := &sync.WaitGroup{} wg.Add(1) diff --git a/execution/subscription/websocket/client_test.go b/execution/subscription/websocket/client_test.go index aa1c822b3e..b142cbef5b 100644 --- a/execution/subscription/websocket/client_test.go +++ b/execution/subscription/websocket/client_test.go @@ -27,7 +27,9 @@ type testServerWebsocketResponse struct { } func TestClient_WriteToClient(t *testing.T) { + t.Parallel() t.Run("should write successfully to client", func(t *testing.T) { + t.Parallel() connToServer, connToClient := net.Pipe() websocketClient := NewClient(abstractlogger.NoopLogger, connToClient) messageToClient := []byte(`{ @@ -50,8 +52,11 @@ func TestClient_WriteToClient(t *testing.T) { }) t.Run("should not write to client when connection is closed", func(t *testing.T) { + t.Parallel() t.Run("when not wrapped", func(t *testing.T) { + t.Parallel() t.Run("io: read/write on closed pipe", func(t *testing.T) { + t.Parallel() connToServer, connToClient := net.Pipe() websocketClient := NewClient(abstractlogger.NoopLogger, connToClient) err := connToServer.Close() @@ -64,7 +69,9 @@ func TestClient_WriteToClient(t *testing.T) { }) t.Run("when wrapped", func(t *testing.T) { + t.Parallel() t.Run("io: read/write on closed pipe", func(t *testing.T) { + t.Parallel() connToClient := FakeConn{} wrappedErr := fmt.Errorf("outside wrapper: %w", fmt.Errorf("inner wrapper: %w", @@ -83,7 +90,9 @@ func TestClient_WriteToClient(t *testing.T) { } func TestClient_ReadFromClient(t *testing.T) { + t.Parallel() t.Run("should successfully read from client", func(t *testing.T) { + t.Parallel() connToServer, connToClient := net.Pipe() websocketClient := NewClient(abstractlogger.NoopLogger, connToClient) @@ -105,7 +114,9 @@ func TestClient_ReadFromClient(t *testing.T) { assert.Equal(t, messageToServer, messageFromClient) }) t.Run("should detect a closed connection", func(t *testing.T) { + t.Parallel() t.Run("before read", func(t *testing.T) { + t.Parallel() _, connToClient := net.Pipe() websocketClient := NewClient(abstractlogger.NoopLogger, connToClient) defer connToClient.Close() @@ -117,7 +128,9 @@ func TestClient_ReadFromClient(t *testing.T) { }, 1*time.Second, 2*time.Millisecond) }) t.Run("when not wrapped", func(t *testing.T) { + t.Parallel() t.Run("io.EOF", func(t *testing.T) { + t.Parallel() connToServer, connToClient := net.Pipe() websocketClient := NewClient(abstractlogger.NoopLogger, connToClient) err := connToServer.Close() @@ -128,6 +141,7 @@ func TestClient_ReadFromClient(t *testing.T) { assert.True(t, websocketClient.isClosedConnection) }) t.Run("io: read/write on closed pipe", func(t *testing.T) { + t.Parallel() connToClient := &FakeConn{} connToClient.setReadReturns(0, io.ErrClosedPipe) websocketClient := NewClient(abstractlogger.NoopLogger, connToClient) @@ -137,6 +151,7 @@ func TestClient_ReadFromClient(t *testing.T) { assert.True(t, websocketClient.isClosedConnection) }) t.Run("unexpected EOF", func(t *testing.T) { + t.Parallel() connToClient := &FakeConn{} connToClient.setReadReturns(0, io.ErrUnexpectedEOF) websocketClient := NewClient(abstractlogger.NoopLogger, connToClient) @@ -148,7 +163,9 @@ func TestClient_ReadFromClient(t *testing.T) { }) t.Run("when wrapped", func(t *testing.T) { + t.Parallel() t.Run("io.EOF", func(t *testing.T) { + t.Parallel() connToClient := &FakeConn{} wrappedErr := fmt.Errorf("outside wrapper: %w", fmt.Errorf("inner wrapper: %w", @@ -163,6 +180,7 @@ func TestClient_ReadFromClient(t *testing.T) { assert.True(t, websocketClient.isClosedConnection) }) t.Run("io: read/write on closed pipe", func(t *testing.T) { + t.Parallel() connToClient := &FakeConn{} wrappedErr := fmt.Errorf("outside wrapper: %w", fmt.Errorf("inner wrapper: %w", @@ -177,6 +195,7 @@ func TestClient_ReadFromClient(t *testing.T) { assert.True(t, websocketClient.isClosedConnection) }) t.Run("unexpected EOF", func(t *testing.T) { + t.Parallel() connToClient := &FakeConn{} wrappedErr := fmt.Errorf("outside wrapper: %w", fmt.Errorf("inner wrapper: %w", @@ -196,19 +215,29 @@ func TestClient_ReadFromClient(t *testing.T) { } func TestClient_IsConnected(t *testing.T) { - _, connToClient := net.Pipe() - websocketClient := NewClient(abstractlogger.NoopLogger, connToClient) + t.Parallel() t.Run("should return true when a connection is established", func(t *testing.T) { + t.Parallel() + _, connToClient := net.Pipe() + t.Cleanup(func() { + _ = connToClient.Close() + }) + websocketClient := NewClient(abstractlogger.NoopLogger, connToClient) + isConnected := websocketClient.IsConnected() assert.True(t, isConnected) }) t.Run("should return false when a connection is closed", func(t *testing.T) { + t.Parallel() + _, connToClient := net.Pipe() + websocketClient := NewClient(abstractlogger.NoopLogger, connToClient) + err := connToClient.Close() require.NoError(t, err) - websocketClient.isClosedConnection = true + websocketClient.changeConnectionStateToClosed() isConnected := websocketClient.IsConnected() assert.False(t, isConnected) @@ -216,10 +245,12 @@ func TestClient_IsConnected(t *testing.T) { } func TestClient_Disconnect(t *testing.T) { + t.Parallel() _, connToClient := net.Pipe() websocketClient := NewClient(abstractlogger.NoopLogger, connToClient) t.Run("should disconnect and indicate a closed connection", func(t *testing.T) { + t.Parallel() err := websocketClient.Disconnect() assert.NoError(t, err) assert.Equal(t, true, websocketClient.isClosedConnection) @@ -227,7 +258,9 @@ func TestClient_Disconnect(t *testing.T) { } func TestClient_DisconnectWithReason(t *testing.T) { + t.Parallel() t.Run("disconnect with invalid reason", func(t *testing.T) { + t.Parallel() connToServer, connToClient := net.Pipe() websocketClient := NewClient(abstractlogger.NoopLogger, connToClient) serverResponseChan := make(chan testServerWebsocketResponse) @@ -255,6 +288,7 @@ func TestClient_DisconnectWithReason(t *testing.T) { }) t.Run("disconnect with reason", func(t *testing.T) { + t.Parallel() connToServer, connToClient := net.Pipe() websocketClient := NewClient(abstractlogger.NoopLogger, connToClient) serverResponseChan := make(chan testServerWebsocketResponse) @@ -282,6 +316,7 @@ func TestClient_DisconnectWithReason(t *testing.T) { }) t.Run("disconnect with compiled reason", func(t *testing.T) { + t.Parallel() connToServer, connToClient := net.Pipe() websocketClient := NewClient(abstractlogger.NoopLogger, connToClient) serverResponseChan := make(chan testServerWebsocketResponse) @@ -310,9 +345,11 @@ func TestClient_DisconnectWithReason(t *testing.T) { } func TestClient_isClosedConnectionError(t *testing.T) { + t.Parallel() _, connToClient := net.Pipe() t.Run("should not close connection when it is not a closed connection error", func(t *testing.T) { + t.Parallel() websocketClient := NewClient(abstractlogger.NoopLogger, connToClient) require.False(t, websocketClient.isClosedConnection) @@ -321,6 +358,7 @@ func TestClient_isClosedConnectionError(t *testing.T) { }) t.Run("should close connection when it is a closed connection error", func(t *testing.T) { + t.Parallel() websocketClient := NewClient(abstractlogger.NoopLogger, connToClient) require.False(t, websocketClient.isClosedConnection) diff --git a/execution/subscription/websocket/handler_test.go b/execution/subscription/websocket/handler_test.go index acfe4d4e24..43af47448c 100644 --- a/execution/subscription/websocket/handler_test.go +++ b/execution/subscription/websocket/handler_test.go @@ -26,8 +26,10 @@ import ( ) func TestHandleWithOptions(t *testing.T) { + t.Parallel() t.Skip("timing not compatible with async rewrite of resolver") t.Run("should handle protocol graphql-ws", func(t *testing.T) { + t.Parallel() if runtime.GOOS == "windows" { t.Skip("this test fails on Windows due to different timings than unix, consider fixing it at some point") } @@ -106,6 +108,7 @@ func TestHandleWithOptions(t *testing.T) { }) t.Run("should handle protocol graphql-transport-ws", func(t *testing.T) { + t.Parallel() chatServer := httptest.NewServer(subscriptiontesting.ChatGraphQLEndpointHandler()) defer chatServer.Close() @@ -181,6 +184,7 @@ func TestHandleWithOptions(t *testing.T) { }) t.Run("should handle on before start error", func(t *testing.T) { + t.Parallel() chatServer := httptest.NewServer(subscriptiontesting.ChatGraphQLEndpointHandler()) defer chatServer.Close() @@ -228,8 +232,10 @@ func TestHandleWithOptions(t *testing.T) { } func TestWithProtocolFromRequestHeaders(t *testing.T) { + t.Parallel() runTest := func(headerKey string, headerValue string, expectedProtocol Protocol) func(t *testing.T) { return func(t *testing.T) { + t.Parallel() request, err := http.NewRequest("", "", nil) require.NoError(t, err) request.Header.Set(headerKey, headerValue) @@ -247,6 +253,7 @@ func TestWithProtocolFromRequestHeaders(t *testing.T) { t.Run("should fallback to default protocol", runTest(HeaderSecWebSocketProtocol, "something-else", DefaultProtocol)) t.Run("should fallback to default protocol when header is missing", runTest("Different-Header-Key", "missing-header", DefaultProtocol)) t.Run("should fallback to default protocol when request is nil", func(t *testing.T) { + t.Parallel() options := &HandleOptions{} optionFunc := WithProtocolFromRequestHeaders(nil) optionFunc(options) diff --git a/execution/subscription/websocket/protocol_graphql_transport_ws_test.go b/execution/subscription/websocket/protocol_graphql_transport_ws_test.go index 867c02ec39..3c6e047445 100644 --- a/execution/subscription/websocket/protocol_graphql_transport_ws_test.go +++ b/execution/subscription/websocket/protocol_graphql_transport_ws_test.go @@ -17,7 +17,9 @@ import ( ) func TestGraphQLTransportWSMessageReader_Read(t *testing.T) { + t.Parallel() t.Run("should read a minimal message", func(t *testing.T) { + t.Parallel() data := []byte(`{ "type": "connection_init" }`) expectedMessage := &GraphQLTransportWSMessage{ Type: "connection_init", @@ -32,6 +34,7 @@ func TestGraphQLTransportWSMessageReader_Read(t *testing.T) { }) t.Run("should message with json payload", func(t *testing.T) { + t.Parallel() data := []byte(`{ "id": "1", "type": "connection_init", "payload": { "Authorization": "Bearer ey123" } }`) expectedMessage := &GraphQLTransportWSMessage{ Id: "1", @@ -48,6 +51,7 @@ func TestGraphQLTransportWSMessageReader_Read(t *testing.T) { }) t.Run("should read and deserialize subscribe message", func(t *testing.T) { + t.Parallel() data := []byte(`{ "id": "1", "type": "subscribe", @@ -89,7 +93,9 @@ func TestGraphQLTransportWSMessageReader_Read(t *testing.T) { } func TestGraphQLTransportWSMessageWriter_WriteConnectionAck(t *testing.T) { + t.Parallel() t.Run("should return error when error occurs on underlying call", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(true) writer := GraphQLTransportWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -100,6 +106,7 @@ func TestGraphQLTransportWSMessageWriter_WriteConnectionAck(t *testing.T) { assert.Error(t, err) }) t.Run("should successfully write ack message to client", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writer := GraphQLTransportWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -114,7 +121,9 @@ func TestGraphQLTransportWSMessageWriter_WriteConnectionAck(t *testing.T) { } func TestGraphQLTransportWSMessageWriter_WritePing(t *testing.T) { + t.Parallel() t.Run("should return error when error occurs on underlying call", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(true) writer := GraphQLTransportWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -125,6 +134,7 @@ func TestGraphQLTransportWSMessageWriter_WritePing(t *testing.T) { assert.Error(t, err) }) t.Run("should successfully write ping message to client", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writer := GraphQLTransportWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -137,6 +147,7 @@ func TestGraphQLTransportWSMessageWriter_WritePing(t *testing.T) { assert.Equal(t, expectedMessage, testClient.readMessageToClient()) }) t.Run("should successfully write ping message with payload to client", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writer := GraphQLTransportWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -151,7 +162,9 @@ func TestGraphQLTransportWSMessageWriter_WritePing(t *testing.T) { } func TestGraphQLTransportWSMessageWriter_WritePong(t *testing.T) { + t.Parallel() t.Run("should return error when error occurs on underlying call", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(true) writer := GraphQLTransportWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -162,6 +175,7 @@ func TestGraphQLTransportWSMessageWriter_WritePong(t *testing.T) { assert.Error(t, err) }) t.Run("should successfully write pong message to client", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writer := GraphQLTransportWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -174,6 +188,7 @@ func TestGraphQLTransportWSMessageWriter_WritePong(t *testing.T) { assert.Equal(t, expectedMessage, testClient.readMessageToClient()) }) t.Run("should successfully write pong message with payload to client", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writer := GraphQLTransportWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -188,7 +203,9 @@ func TestGraphQLTransportWSMessageWriter_WritePong(t *testing.T) { } func TestGraphQLTransportWSMessageWriter_WriteNext(t *testing.T) { + t.Parallel() t.Run("should return error when error occurs on underlying call", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(true) writer := GraphQLTransportWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -199,6 +216,7 @@ func TestGraphQLTransportWSMessageWriter_WriteNext(t *testing.T) { assert.Error(t, err) }) t.Run("should successfully write next message with payload to client", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writer := GraphQLTransportWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -213,7 +231,9 @@ func TestGraphQLTransportWSMessageWriter_WriteNext(t *testing.T) { } func TestGraphQLTransportWSMessageWriter_WriteError(t *testing.T) { + t.Parallel() t.Run("should return error when error occurs on underlying call", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(true) writer := GraphQLTransportWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -224,6 +244,7 @@ func TestGraphQLTransportWSMessageWriter_WriteError(t *testing.T) { assert.Error(t, err) }) t.Run("should successfully write error message with payload to client", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writer := GraphQLTransportWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -239,7 +260,9 @@ func TestGraphQLTransportWSMessageWriter_WriteError(t *testing.T) { } func TestGraphQLTransportWSMessageWriter_WriteComplete(t *testing.T) { + t.Parallel() t.Run("should return error when error occurs on underlying call", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(true) writer := GraphQLTransportWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -250,6 +273,7 @@ func TestGraphQLTransportWSMessageWriter_WriteComplete(t *testing.T) { assert.Error(t, err) }) t.Run("should successfully write complete message to client", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writer := GraphQLTransportWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -264,7 +288,9 @@ func TestGraphQLTransportWSMessageWriter_WriteComplete(t *testing.T) { } func TestGraphQLTransportWSEventHandler_Emit(t *testing.T) { + t.Parallel() t.Run("should write on completed", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) eventHandler := NewTestGraphQLTransportWSEventHandler(testClient) eventHandler.Emit(subscription.EventTypeOnSubscriptionCompleted, "1", nil, nil) @@ -272,6 +298,7 @@ func TestGraphQLTransportWSEventHandler_Emit(t *testing.T) { assert.Equal(t, expectedMessage, testClient.readMessageToClient()) }) t.Run("should write on data", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) eventHandler := NewTestGraphQLTransportWSEventHandler(testClient) eventHandler.Emit(subscription.EventTypeOnSubscriptionData, "1", []byte(`{ "data": { "hello": "world" } }`), nil) @@ -279,6 +306,7 @@ func TestGraphQLTransportWSEventHandler_Emit(t *testing.T) { assert.Equal(t, expectedMessage, testClient.readMessageToClient()) }) t.Run("should write on non-subscription execution result", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) eventHandler := NewTestGraphQLTransportWSEventHandler(testClient) go func() { @@ -296,6 +324,7 @@ func TestGraphQLTransportWSEventHandler_Emit(t *testing.T) { }, 1*time.Second, 2*time.Millisecond) }) t.Run("should write on error", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) eventHandler := NewTestGraphQLTransportWSEventHandler(testClient) eventHandler.Emit(subscription.EventTypeOnError, "1", nil, errors.New("error occurred")) @@ -303,6 +332,7 @@ func TestGraphQLTransportWSEventHandler_Emit(t *testing.T) { assert.Equal(t, expectedMessage, testClient.readMessageToClient()) }) t.Run("should execute the OnConnectionOpened event function", func(t *testing.T) { + t.Parallel() counter := 0 testClient := NewTestClient(false) eventHandler := NewTestGraphQLTransportWSEventHandler(testClient) @@ -313,6 +343,7 @@ func TestGraphQLTransportWSEventHandler_Emit(t *testing.T) { assert.Equal(t, counter, 1) }) t.Run("should disconnect on duplicated subscriber id", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) eventHandler := NewTestGraphQLTransportWSEventHandler(testClient) eventHandler.Emit(subscription.EventTypeOnDuplicatedSubscriberID, "1", nil, errors.New("subscriber already exists")) @@ -321,7 +352,9 @@ func TestGraphQLTransportWSEventHandler_Emit(t *testing.T) { } func TestGraphQLTransportWSWriteEventHandler_HandleWriteEvent(t *testing.T) { + t.Parallel() t.Run("should write connection_ack", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writeEventHandler := NewTestGraphQLTransportWSEventHandler(testClient) writeEventHandler.HandleWriteEvent(GraphQLTransportWSMessageTypeConnectionAck, "", nil, nil) @@ -329,6 +362,7 @@ func TestGraphQLTransportWSWriteEventHandler_HandleWriteEvent(t *testing.T) { assert.Equal(t, expectedMessage, testClient.readMessageToClient()) }) t.Run("should write ping", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writeEventHandler := NewTestGraphQLTransportWSEventHandler(testClient) writeEventHandler.HandleWriteEvent(GraphQLTransportWSMessageTypePing, "", nil, nil) @@ -336,6 +370,7 @@ func TestGraphQLTransportWSWriteEventHandler_HandleWriteEvent(t *testing.T) { assert.Equal(t, expectedMessage, testClient.readMessageToClient()) }) t.Run("should write pong", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writeEventHandler := NewTestGraphQLTransportWSEventHandler(testClient) writeEventHandler.HandleWriteEvent(GraphQLTransportWSMessageTypePong, "", nil, nil) @@ -343,6 +378,7 @@ func TestGraphQLTransportWSWriteEventHandler_HandleWriteEvent(t *testing.T) { assert.Equal(t, expectedMessage, testClient.readMessageToClient()) }) t.Run("should close connection on invalid type", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writeEventHandler := NewTestGraphQLTransportWSEventHandler(testClient) writeEventHandler.HandleWriteEvent(GraphQLTransportWSMessageType("invalid"), "", nil, nil) @@ -351,7 +387,9 @@ func TestGraphQLTransportWSWriteEventHandler_HandleWriteEvent(t *testing.T) { } func TestProtocolGraphQLTransportWSHandler_Handle(t *testing.T) { + t.Parallel() t.Run("should close connection when an unexpected message type is used", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) protocol := NewTestProtocolGraphQLTransportWSHandler(testClient) @@ -367,7 +405,9 @@ func TestProtocolGraphQLTransportWSHandler_Handle(t *testing.T) { }) t.Run("for connection_init", func(t *testing.T) { + t.Parallel() t.Run("should time out if no connection_init message is sent", func(t *testing.T) { + t.Parallel() if runtime.GOOS == "windows" { t.Skip("this test fails on Windows due to different timings than unix, consider fixing it at some point") } @@ -383,6 +423,7 @@ func TestProtocolGraphQLTransportWSHandler_Handle(t *testing.T) { }) t.Run("should close connection after multiple connection_init messages", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) protocol := NewTestProtocolGraphQLTransportWSHandler(testClient) protocol.connectionInitTimeOutDuration = 50 * time.Millisecond @@ -411,6 +452,7 @@ func TestProtocolGraphQLTransportWSHandler_Handle(t *testing.T) { }) t.Run("should not time out if connection_init message is sent before time out", func(t *testing.T) { + t.Parallel() if runtime.GOOS == "windows" { t.Skip("this test fails on Windows due to different timings than unix, consider fixing it at some point") } @@ -447,6 +489,7 @@ func TestProtocolGraphQLTransportWSHandler_Handle(t *testing.T) { }) t.Run("should return pong on ping", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) protocol := NewTestProtocolGraphQLTransportWSHandler(testClient) @@ -467,6 +510,7 @@ func TestProtocolGraphQLTransportWSHandler_Handle(t *testing.T) { }) t.Run("should handle subscribe", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) protocol := NewTestProtocolGraphQLTransportWSHandler(testClient) @@ -490,6 +534,7 @@ func TestProtocolGraphQLTransportWSHandler_Handle(t *testing.T) { }) t.Run("should handle complete", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) protocol := NewTestProtocolGraphQLTransportWSHandler(testClient) @@ -509,6 +554,7 @@ func TestProtocolGraphQLTransportWSHandler_Handle(t *testing.T) { }) t.Run("should allow pong messages from client", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) protocol := NewTestProtocolGraphQLTransportWSHandler(testClient) @@ -528,6 +574,7 @@ func TestProtocolGraphQLTransportWSHandler_Handle(t *testing.T) { }) t.Run("should not panic on broken input", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) protocol := NewTestProtocolGraphQLTransportWSHandler(testClient) diff --git a/execution/subscription/websocket/protocol_graphql_ws_test.go b/execution/subscription/websocket/protocol_graphql_ws_test.go index bcc911106e..37c0af5fc0 100644 --- a/execution/subscription/websocket/protocol_graphql_ws_test.go +++ b/execution/subscription/websocket/protocol_graphql_ws_test.go @@ -17,6 +17,7 @@ import ( ) func TestGraphQLWSMessageReader_Read(t *testing.T) { + t.Parallel() data := []byte(`{ "id": "1", "type": "connection_init", "payload": { "headers": { "key": "value" } } }`) expectedMessage := &GraphQLWSMessage{ Id: "1", @@ -33,7 +34,9 @@ func TestGraphQLWSMessageReader_Read(t *testing.T) { } func TestGraphQLWSMessageWriter_WriteData(t *testing.T) { + t.Parallel() t.Run("should return error when error occurs on underlying call", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(true) writer := GraphQLWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -44,6 +47,7 @@ func TestGraphQLWSMessageWriter_WriteData(t *testing.T) { assert.Error(t, err) }) t.Run("should successfully write message data to client", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writer := GraphQLWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -58,7 +62,9 @@ func TestGraphQLWSMessageWriter_WriteData(t *testing.T) { } func TestGraphQLWSMessageWriter_WriteComplete(t *testing.T) { + t.Parallel() t.Run("should return error when error occurs on underlying call", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(true) writer := GraphQLWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -69,6 +75,7 @@ func TestGraphQLWSMessageWriter_WriteComplete(t *testing.T) { assert.Error(t, err) }) t.Run("should successfully write complete message to client", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writer := GraphQLWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -83,7 +90,9 @@ func TestGraphQLWSMessageWriter_WriteComplete(t *testing.T) { } func TestGraphQLWSMessageWriter_WriteKeepAlive(t *testing.T) { + t.Parallel() t.Run("should return error when error occurs on underlying call", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(true) writer := GraphQLWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -94,6 +103,7 @@ func TestGraphQLWSMessageWriter_WriteKeepAlive(t *testing.T) { assert.Error(t, err) }) t.Run("should successfully write keep-alive (ka) message to client", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writer := GraphQLWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -108,7 +118,9 @@ func TestGraphQLWSMessageWriter_WriteKeepAlive(t *testing.T) { } func TestGraphQLWSMessageWriter_WriteTerminate(t *testing.T) { + t.Parallel() t.Run("should return error when error occurs on underlying call", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(true) writer := GraphQLWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -119,6 +131,7 @@ func TestGraphQLWSMessageWriter_WriteTerminate(t *testing.T) { assert.Error(t, err) }) t.Run("should successfully write terminate message to client", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writer := GraphQLWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -133,7 +146,9 @@ func TestGraphQLWSMessageWriter_WriteTerminate(t *testing.T) { } func TestGraphQLWSMessageWriter_WriteConnectionError(t *testing.T) { + t.Parallel() t.Run("should return error when error occurs on underlying call", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(true) writer := GraphQLWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -144,6 +159,7 @@ func TestGraphQLWSMessageWriter_WriteConnectionError(t *testing.T) { assert.Error(t, err) }) t.Run("should successfully write connection error message to client", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writer := GraphQLWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -158,7 +174,9 @@ func TestGraphQLWSMessageWriter_WriteConnectionError(t *testing.T) { } func TestGraphQLWSMessageWriter_WriteError(t *testing.T) { + t.Parallel() t.Run("should return error when error occurs on underlying call", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(true) writer := GraphQLWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -170,6 +188,7 @@ func TestGraphQLWSMessageWriter_WriteError(t *testing.T) { assert.Error(t, err) }) t.Run("should successfully write error message to client", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writer := GraphQLWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -185,7 +204,9 @@ func TestGraphQLWSMessageWriter_WriteError(t *testing.T) { } func TestGraphQLWSMessageWriter_WriteAck(t *testing.T) { + t.Parallel() t.Run("should return error when error occurs on underlying call", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(true) writer := GraphQLWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -196,6 +217,7 @@ func TestGraphQLWSMessageWriter_WriteAck(t *testing.T) { assert.Error(t, err) }) t.Run("should successfully write ack message to client", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writer := GraphQLWSMessageWriter{ logger: abstractlogger.Noop{}, @@ -210,7 +232,9 @@ func TestGraphQLWSMessageWriter_WriteAck(t *testing.T) { } func TestGraphQLWSWriteEventHandler_Emit(t *testing.T) { + t.Parallel() t.Run("should write on completed", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writeEventHandler := NewTestGraphQLWSWriteEventHandler(testClient) writeEventHandler.Emit(subscription.EventTypeOnSubscriptionCompleted, "1", nil, nil) @@ -218,6 +242,7 @@ func TestGraphQLWSWriteEventHandler_Emit(t *testing.T) { assert.Equal(t, expectedMessage, testClient.readMessageToClient()) }) t.Run("should write on data", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writeEventHandler := NewTestGraphQLWSWriteEventHandler(testClient) writeEventHandler.Emit(subscription.EventTypeOnSubscriptionData, "1", []byte(`{ "data": { "hello": "world" } }`), nil) @@ -225,6 +250,7 @@ func TestGraphQLWSWriteEventHandler_Emit(t *testing.T) { assert.Equal(t, expectedMessage, testClient.readMessageToClient()) }) t.Run("should write on error", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writeEventHandler := NewTestGraphQLWSWriteEventHandler(testClient) writeEventHandler.Emit(subscription.EventTypeOnError, "1", nil, errors.New("error occurred")) @@ -232,6 +258,7 @@ func TestGraphQLWSWriteEventHandler_Emit(t *testing.T) { assert.Equal(t, expectedMessage, testClient.readMessageToClient()) }) t.Run("should write on duplicated subscriber id", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writeEventHandler := NewTestGraphQLWSWriteEventHandler(testClient) writeEventHandler.Emit(subscription.EventTypeOnDuplicatedSubscriberID, "1", nil, subscription.ErrSubscriberIDAlreadyExists) @@ -239,6 +266,7 @@ func TestGraphQLWSWriteEventHandler_Emit(t *testing.T) { assert.Equal(t, expectedMessage, testClient.readMessageToClient()) }) t.Run("should write on connection_error", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writeEventHandler := NewTestGraphQLWSWriteEventHandler(testClient) writeEventHandler.Emit(subscription.EventTypeOnConnectionError, "", nil, errors.New("connection error occurred")) @@ -246,6 +274,7 @@ func TestGraphQLWSWriteEventHandler_Emit(t *testing.T) { assert.Equal(t, expectedMessage, testClient.readMessageToClient()) }) t.Run("should write on non-subscription execution result", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writeEventHandler := NewTestGraphQLWSWriteEventHandler(testClient) go func() { @@ -265,7 +294,9 @@ func TestGraphQLWSWriteEventHandler_Emit(t *testing.T) { } func TestGraphQLWSWriteEventHandler_HandleWriteEvent(t *testing.T) { + t.Parallel() t.Run("should write keep_alive", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writeEventHandler := NewTestGraphQLWSWriteEventHandler(testClient) writeEventHandler.HandleWriteEvent(GraphQLWSMessageTypeConnectionKeepAlive, "", nil, nil) @@ -273,6 +304,7 @@ func TestGraphQLWSWriteEventHandler_HandleWriteEvent(t *testing.T) { assert.Equal(t, expectedMessage, testClient.readMessageToClient()) }) t.Run("should write ack", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) writeEventHandler := NewTestGraphQLWSWriteEventHandler(testClient) writeEventHandler.HandleWriteEvent(GraphQLWSMessageTypeConnectionAck, "", nil, nil) @@ -282,7 +314,9 @@ func TestGraphQLWSWriteEventHandler_HandleWriteEvent(t *testing.T) { } func TestProtocolGraphQLWSHandler_Handle(t *testing.T) { + t.Parallel() t.Run("should return connection_error when an unexpected message type is used", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) protocol := NewTestProtocolGraphQLWSHandler(testClient) @@ -299,6 +333,7 @@ func TestProtocolGraphQLWSHandler_Handle(t *testing.T) { }) t.Run("should terminate connections on connection_terminate from client", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) protocol := NewTestProtocolGraphQLWSHandler(testClient) @@ -314,6 +349,7 @@ func TestProtocolGraphQLWSHandler_Handle(t *testing.T) { }) t.Run("should init connection and respond with ack and ka", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) protocol := NewTestProtocolGraphQLWSHandler(testClient) protocol.keepAliveInterval = 5 * time.Millisecond @@ -340,6 +376,7 @@ func TestProtocolGraphQLWSHandler_Handle(t *testing.T) { }) t.Run("should start an operation on start from client", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) protocol := NewTestProtocolGraphQLWSHandler(testClient) @@ -355,6 +392,7 @@ func TestProtocolGraphQLWSHandler_Handle(t *testing.T) { }) t.Run("should stop a subscription on stop from client", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) protocol := NewTestProtocolGraphQLWSHandler(testClient) @@ -370,6 +408,7 @@ func TestProtocolGraphQLWSHandler_Handle(t *testing.T) { }) t.Run("should not panic on broken input", func(t *testing.T) { + t.Parallel() testClient := NewTestClient(false) protocol := NewTestProtocolGraphQLWSHandler(testClient) diff --git a/go.work b/go.work index 29af43e6d3..abd8c21b23 100644 --- a/go.work +++ b/go.work @@ -7,5 +7,3 @@ use ( ) replace github.com/tidwall/sjson => github.com/tidwall/sjson v1.0.4 - -//replace github.com/wundergraph/astjson => ../wundergraph-projects/astjson diff --git a/go.work.sum b/go.work.sum index 1aecd8d220..7bb0936c00 100644 --- a/go.work.sum +++ b/go.work.sum @@ -116,8 +116,6 @@ github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA= @@ -243,12 +241,13 @@ github.com/twmb/franz-go v1.16.1 h1:rpWc7fB9jd7TgmCyfxzenBI+QbgS8ZfJOUQE+tzPtbE= github.com/twmb/franz-go v1.16.1/go.mod h1:/pER254UPPGp/4WfGqRi+SIRGE50RSQzVubQp6+N4FA= github.com/twmb/franz-go/pkg/kmsg v1.7.0 h1:a457IbvezYfA5UkiBvyV3zj0Is3y1i8EJgqjJYoij2E= github.com/twmb/franz-go/pkg/kmsg v1.7.0/go.mod h1:se9Mjdt0Nwzc9lnjJ0HyDtLyBnaBDAd7pCje47OhSyw= +github.com/valyala/fastjson v1.6.10/go.mod h1:e6FubmQouUNP73jtMLmcbxS6ydWIpOfhz34TSfO3JaE= github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= +github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= github.com/wundergraph/go-arena v0.0.0-20251008210416-55cb97e6f68f h1:5snewyMaIpajTu4wj22L/DgrGimICqXtUVjkZInBH3Y= github.com/wundergraph/go-arena v0.0.0-20251008210416-55cb97e6f68f/go.mod h1:ROOysEHWJjLQ8FSfNxZCziagb7Qw2nXY3/vgKRh7eWw= -github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= @@ -345,8 +344,6 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= -golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= -golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= diff --git a/v2/doc.go b/v2/doc.go index d951b83554..12d4fa8fb3 100644 --- a/v2/doc.go +++ b/v2/doc.go @@ -529,7 +529,7 @@ func ExampleExecuteOperation() { switch p := preparedPlan.(type) { case *plan.SynchronousResponsePlan: out := &bytes.Buffer{} - _, err := resolver.ResolveGraphQLResponse(ctx, p.Response, nil, out) + _, err := resolver.ResolveGraphQLResponse(ctx, p.Response, out) if err != nil { panic(err) } diff --git a/v2/go.mod b/v2/go.mod index ad5d096fc1..a034a15a4e 100644 --- a/v2/go.mod +++ b/v2/go.mod @@ -16,7 +16,7 @@ require ( github.com/gorilla/websocket v1.5.1 github.com/hashicorp/go-plugin v1.6.3 github.com/jensneuse/abstractlogger v0.0.4 - github.com/jensneuse/byte-template v0.0.0-20200214152254-4f3cf06e5c68 + github.com/jensneuse/byte-template v0.0.0-20231025215717-69252eb3ed56 github.com/jensneuse/diffview v1.0.0 github.com/kingledion/go-tools v0.6.0 github.com/kylelemons/godebug v1.1.0 @@ -26,19 +26,19 @@ require ( github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 github.com/sebdah/goldie/v2 v2.7.1 github.com/stretchr/testify v1.11.1 - github.com/tidwall/gjson v1.17.0 - github.com/tidwall/sjson v1.0.4 + github.com/tidwall/gjson v1.18.0 + github.com/tidwall/sjson v1.2.5 github.com/vektah/gqlparser/v2 v2.5.30 - github.com/wundergraph/astjson v1.1.0 - github.com/wundergraph/go-arena v1.1.0 + github.com/wundergraph/astjson v1.1.1-0.20260419105127-f600d161463f + github.com/wundergraph/go-arena v1.2.0 go.uber.org/atomic v1.11.0 go.uber.org/goleak v1.3.0 - go.uber.org/zap v1.26.0 + go.uber.org/zap v1.27.0 golang.org/x/sync v0.17.0 golang.org/x/sys v0.37.0 golang.org/x/text v0.30.0 gonum.org/v1/gonum v0.14.0 - google.golang.org/grpc v1.68.1 + google.golang.org/grpc v1.71.0 google.golang.org/protobuf v1.36.9 gopkg.in/yaml.v2 v2.4.0 ) @@ -50,12 +50,13 @@ require ( github.com/dnephin/pflag v1.0.7 // indirect github.com/fatih/color v1.18.0 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/gobwas/httphead v0.1.0 // indirect github.com/gobwas/pool v0.2.1 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/hashicorp/go-hclog v0.14.1 // indirect + github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/yamux v0.1.1 // indirect github.com/kr/pretty v0.3.1 // indirect @@ -72,6 +73,8 @@ require ( github.com/tidwall/pretty v1.2.1 // indirect github.com/urfave/cli/v2 v2.27.7 // indirect github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect + go.opentelemetry.io/otel v1.36.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/mod v0.29.0 // indirect golang.org/x/net v0.46.0 // indirect diff --git a/v2/go.sum b/v2/go.sum index 13adfeb881..006d4cd6c7 100644 --- a/v2/go.sum +++ b/v2/go.sum @@ -27,11 +27,14 @@ github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7c github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/dnephin/pflag v1.0.7 h1:oxONGlWxhmUct0YzKTgrpQv9AUA1wtPBn7zuSjJqptk= github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= @@ -53,8 +56,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= -github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= -github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-plugin v1.6.3 h1:xgHB+ZUSYeuJi96WtxEjzi23uh7YQpznjGh0U0UUrwg= github.com/hashicorp/go-plugin v1.6.3/go.mod h1:MRobyh+Wc/nYy1V4KAXUiYfzxoYhs7V1mlH1Z7iY2h0= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -63,8 +66,8 @@ github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/jensneuse/abstractlogger v0.0.4 h1:sa4EH8fhWk3zlTDbSncaWKfwxYM8tYSlQ054ETLyyQY= github.com/jensneuse/abstractlogger v0.0.4/go.mod h1:6WuamOHuykJk8zED/R0LNiLhWR6C7FIAo43ocUEB3mo= -github.com/jensneuse/byte-template v0.0.0-20200214152254-4f3cf06e5c68 h1:E80wOd3IFQcoBxLkAUpUQ3BoGrZ4DxhQdP21+HH1s6A= -github.com/jensneuse/byte-template v0.0.0-20200214152254-4f3cf06e5c68/go.mod h1:0D5r/VSW6D/o65rKLL9xk7sZxL2+oku2HvFPYeIMFr4= +github.com/jensneuse/byte-template v0.0.0-20231025215717-69252eb3ed56 h1:wo26fh6a6Za0cOMZIopD2sfH/kq83SJ89ixUWl7pCWc= +github.com/jensneuse/byte-template v0.0.0-20231025215717-69252eb3ed56/go.mod h1:0D5r/VSW6D/o65rKLL9xk7sZxL2+oku2HvFPYeIMFr4= github.com/jensneuse/diffview v1.0.0 h1:4b6FQJ7y3295JUHU3tRko6euyEboL825ZsXeZZM47Z4= github.com/jensneuse/diffview v1.0.0/go.mod h1:i6IacuD8LnEaPuiyzMHA+Wfz5mAuycMOf3R/orUY9y4= github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= @@ -83,11 +86,12 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= @@ -126,28 +130,40 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= -github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tidwall/sjson v1.0.4 h1:UcdIRXff12Lpnu3OLtZvnc03g4vH2suXDXhBwBqmzYg= -github.com/tidwall/sjson v1.0.4/go.mod h1:bURseu1nuBkFpIES5cz6zBtjmYeOQmEESshn7VpF15Y= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU= github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4= github.com/vektah/gqlparser/v2 v2.5.30 h1:EqLwGAFLIzt1wpx1IPpY67DwUujF1OfzgEyDsLrN6kE= github.com/vektah/gqlparser/v2 v2.5.30/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo= -github.com/wundergraph/astjson v1.1.0 h1:xORDosrZ87zQFJwNGe/HIHXqzpdHOFmqWgykCLVL040= -github.com/wundergraph/astjson v1.1.0/go.mod h1:h12D/dxxnedtLzsKyBLK7/Oe4TAoGpRVC9nDpDrZSWw= -github.com/wundergraph/go-arena v1.1.0 h1:9+wSRkJAkA2vbYHp6s8tEGhPViRGQNGXqPHT0QzhdIc= -github.com/wundergraph/go-arena v1.1.0/go.mod h1:ROOysEHWJjLQ8FSfNxZCziagb7Qw2nXY3/vgKRh7eWw= +github.com/wundergraph/astjson v1.1.1-0.20260418181506-345133162d36 h1:xf9ZfqdSRYgqf2l2TYFGHXIzagWvFRefvbJW3StWSiM= +github.com/wundergraph/astjson v1.1.1-0.20260418181506-345133162d36/go.mod h1:uHSJv7uowLN/nIPvkTFqUDt1sXk4qQU0KNwHfwfDcQE= +github.com/wundergraph/astjson v1.1.1-0.20260419105127-f600d161463f h1:MoVoeMlgY9Ej1aoF3Y/kniBZ8pv+WfIA3YSCnPBh+6M= +github.com/wundergraph/astjson v1.1.1-0.20260419105127-f600d161463f/go.mod h1:uHSJv7uowLN/nIPvkTFqUDt1sXk4qQU0KNwHfwfDcQE= +github.com/wundergraph/go-arena v1.2.0 h1:6MlhEy0NBY3Z+BuK3rj0F9YoT3bM0SlahGkzK0lKRZ4= +github.com/wundergraph/go-arena v1.2.0/go.mod h1:ROOysEHWJjLQ8FSfNxZCziagb7Qw2nXY3/vgKRh7eWw= github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 h1:FnBeRrxr7OU4VvAzt5X7s6266i6cSVkkFPS0TuXWbIg= github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -158,8 +174,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -180,13 +196,16 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= @@ -214,8 +233,8 @@ gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0= gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= -google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= -google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= +google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= +google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y= diff --git a/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource.go b/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource.go index f4268d1f6a..912f53a36f 100644 --- a/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource.go +++ b/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource.go @@ -83,6 +83,16 @@ type Planner[T Configuration] struct { // to the downstream subgraph fetch. propagatedOperationName string + // caching + + entityCacheKeyTemplate resolve.CacheKeyTemplate + rootFields []resolve.QueryField // tracks root fields and their arguments for cache key generation + // rootFieldEntityCacheKeyTemplates tracks root field types (plural in case of interfaces/unions) + // and their correlating cache keys (excluding @requires) to allow L1 cache population + // for root fields that return an entity + rootFieldEntityCacheKeyTemplates map[string]resolve.CacheKeyTemplate + requestScopedResponseKeys map[string]string // schema field name → response key (alias or name) for @requestScoped entity fields + // federation addedInlineFragments map[onTypeInlineFragment]struct{} @@ -271,6 +281,7 @@ func (p *Planner[T]) DownstreamResponseFieldAlias(downstreamFieldRef int) (alias } func (p *Planner[T]) Register(visitor *plan.Visitor, configuration plan.DataSourceConfiguration[T], dataSourcePlannerConfiguration plan.DataSourcePlannerConfiguration) error { + p.rootFieldEntityCacheKeyTemplates = nil p.visitor = visitor p.visitor.Walker.RegisterDocumentVisitor(p) @@ -377,6 +388,110 @@ func (p *Planner[T]) ConfigureFetch() resolve.FetchConfiguration { } } + // Set cache key template for non-entity calls (root queries) + if !requiresEntityFetch && !requiresEntityBatchFetch { + if len(p.rootFields) > 0 { + rootFieldsCopy := make([]resolve.QueryField, len(p.rootFields)) + copy(rootFieldsCopy, p.rootFields) + entityKeyMappings := make([]resolve.EntityKeyMappingConfig, 0) + // Populate entity key mappings from federation config. + // ArgumentPath in the plan config uses schema argument names (e.g., "upc"), + // but ctx.Variables uses normalized variable names (e.g., "a") after variable + // extraction. We resolve each ArgumentPath through the root field's tracked + // arguments to find the actual ContextVariable path. + fedMeta := p.dataSourceConfig.FederationConfiguration() + for _, rf := range p.rootFields { + rfConfig := fedMeta.RootFieldCacheConfig(rf.Coordinate.TypeName, rf.Coordinate.FieldName) + if rfConfig != nil && len(rfConfig.EntityKeyMappings) > 0 { + for _, ekm := range rfConfig.EntityKeyMappings { + mappingConfig := resolve.EntityKeyMappingConfig{ + EntityTypeName: ekm.EntityTypeName, + } + for _, fm := range ekm.FieldMappings { + mappingConfig.FieldMappings = append(mappingConfig.FieldMappings, resolve.EntityFieldMappingConfig{ + EntityKeyField: fm.EntityKeyField, + ArgumentPath: resolveArgumentPath(fm.ArgumentPath, rf.Args), + ArgumentIsEntityKey: fm.ArgumentIsEntityKey, + }) + } + entityKeyMappings = append(entityKeyMappings, mappingConfig) + } + } + } + p.entityCacheKeyTemplate = resolve.NewRootQueryCacheKeyTemplate(rootFieldsCopy, entityKeyMappings) + } + } + + // Build requestScoped fields from federation metadata. + // + // Symmetric model: every field annotated with @requestScoped in the subgraph + // participates in per-request L1 caching as both reader (inject from L1 and + // skip the fetch) and writer (populate L1 after the fetch). Fields that share + // the same L1Key (i.e., same `key` within the same subgraph) share the same + // L1 entry. + var requestScopedFields []resolve.RequestScopedField + + fedMeta := p.dataSourceConfig.FederationConfiguration() + + addRequestScoped := func(fieldName, responsePath, l1Key string) { + requestScopedFields = append(requestScopedFields, resolve.RequestScopedField{ + FieldName: responsePath, + FieldPath: []string{responsePath}, + L1Key: l1Key, + }) + } + + if !requiresEntityFetch && !requiresEntityBatchFetch { + // Root field fetches: iterate the query's root fields. + for _, rf := range p.rootFields { + l1Keys := fedMeta.RequestScopedExportsForField(rf.Coordinate.TypeName, rf.Coordinate.FieldName) + if len(l1Keys) == 0 { + continue + } + responsePath := rf.ResponseKey + for _, l1Key := range l1Keys { + addRequestScoped(rf.Coordinate.FieldName, responsePath, l1Key) + } + } + } else { + // Entity fetches: iterate fields on the entity type (and any interfaceObject + // types the entity implements — @requestScoped may be declared on the + // interface e.g. Personalized, while the concrete entity is Article). + var entityTypeName string + if len(p.dataSourcePlannerConfig.RequiredFields) > 0 { + entityTypeName = p.dataSourcePlannerConfig.RequiredFields[0].TypeName + } + if entityTypeName != "" { + typesToCheck := []string{entityTypeName} + for _, io := range fedMeta.InterfaceObjects { + for _, concrete := range io.ConcreteTypeNames { + if concrete == entityTypeName { + typesToCheck = append(typesToCheck, io.InterfaceTypeName) + } + } + } + seen := make(map[string]struct{}) + for _, t := range typesToCheck { + for _, rsf := range fedMeta.RequestScopedFieldsForType(t) { + // Dedup by (FieldName, L1Key) in case a field appears on both + // the concrete and interface type lists. + // e.g. FieldName="viewer", L1Key="accounts.userId" + // → dedupKey = "viewer\x00accounts.userId" + dedupKey := rsf.FieldName + "\x00" + rsf.L1Key + if _, dup := seen[dedupKey]; dup { + continue + } + seen[dedupKey] = struct{}{} + responsePath := rsf.FieldName + if rk, ok := p.requestScopedResponseKeys[rsf.FieldName]; ok { + responsePath = rk + } + addRequestScoped(rsf.FieldName, responsePath, rsf.L1Key) + } + } + } + } + return resolve.FetchConfiguration{ Input: string(input), DataSource: dataSource, @@ -387,6 +502,11 @@ func (p *Planner[T]) ConfigureFetch() resolve.FetchConfiguration { SetTemplateOutputToNullOnVariableNull: requiresEntityFetch || requiresEntityBatchFetch, QueryPlan: p.queryPlan, OperationName: p.propagatedOperationName, + Caching: resolve.FetchCacheConfiguration{ + CacheKeyTemplate: p.entityCacheKeyTemplate, + RootFieldL1EntityCacheKeyTemplates: p.rootFieldEntityCacheKeyTemplates, + RequestScopedFields: requestScopedFields, + }, } } @@ -718,6 +838,30 @@ func (p *Planner[T]) EnterField(ref int) { } } + // Track all root fields for cache key generation + if p.isRootField() { + coordinate := resolve.GraphCoordinate{ + TypeName: p.visitor.Walker.EnclosingTypeDefinition.NameString(p.visitor.Definition), + FieldName: fieldName, + } + responseKey := p.visitor.Operation.FieldAliasOrNameString(ref) + p.trackCacheKeyCoordinate(coordinate, responseKey) + p.handlePotentialEntityRootField(ref) + } + + // Track response keys for @requestScoped entity fields so ConfigureFetch + // can emit correct aliases without needing a downstream rewrite. + if !p.isRootField() { + fedMeta := p.dataSourceConfig.FederationConfiguration() + if l1Keys := fedMeta.RequestScopedExportsForField(typeName, fieldName); len(l1Keys) > 0 { + responseKey := p.visitor.Operation.FieldAliasOrNameString(ref) + if p.requestScopedResponseKeys == nil { + p.requestScopedResponseKeys = make(map[string]string) + } + p.requestScopedResponseKeys[fieldName] = responseKey + } + } + // store root field name and ref if p.rootFieldName == "" { p.rootFieldName = fieldName @@ -733,6 +877,125 @@ func (p *Planner[T]) EnterField(ref int) { p.addFieldArguments(p.addField(ref), ref, fieldConfiguration) } +// isRootField returns false if an ancestor ast.Node is of kind field +func (p *Planner[T]) isRootField() bool { + for i := 0; i < len(p.visitor.Walker.Ancestors); i++ { + if p.visitor.Walker.Ancestors[i].Kind == ast.NodeKindField { + return false + } + } + return true +} + +func (p *Planner[T]) handlePotentialEntityRootField(ref int) { + fieldDefinition, ok := p.visitor.Walker.FieldDefinition(ref) + if !ok { + return + } + typeName := p.visitor.Definition.FieldDefinitionTypeNameString(fieldDefinition) + fieldName := p.visitor.Operation.FieldAliasOrNameString(ref) + + // Get all entity type names that could be returned by this field + // This handles object types directly, as well as interface/union types + entityTypeNames := p.resolveEntityTypeNames(typeName) + if len(entityTypeNames) == 0 { + return + } + + meta := p.dataSourceConfig.FederationConfiguration() + + // Initialize map if needed + if p.rootFieldEntityCacheKeyTemplates == nil { + p.rootFieldEntityCacheKeyTemplates = make(map[string]resolve.CacheKeyTemplate) + } + + // Build cache key templates for each entity type + for _, entityTypeName := range entityTypeNames { + p.buildAndStoreEntityCacheKeyTemplate(entityTypeName, fieldName, meta) + } +} + +// resolveEntityTypeNames returns all entity type names that could be returned by a field. +// For object types: returns the type name if it's an entity. +// For interface types: returns all implementing object types that are entities. +// For union types: returns all member types that are entities. +func (p *Planner[T]) resolveEntityTypeNames(typeName string) []string { + // First, check if the type itself is an entity (object type) + if p.dataSourceConfig.HasEntity(typeName) { + return []string{typeName} + } + + // Check if it's an interface type + typeNode, ok := p.visitor.Definition.Index.FirstNodeByNameStr(typeName) + if !ok { + return nil + } + + var candidateTypes []string + + switch typeNode.Kind { + case ast.NodeKindInterfaceTypeDefinition: + // Get all object types that implement this interface + implementors, ok := p.visitor.Definition.InterfaceTypeDefinitionImplementedByObjectWithNames(typeNode.Ref) + if ok { + candidateTypes = implementors + } + case ast.NodeKindUnionTypeDefinition: + // Get all member types of this union + members, ok := p.visitor.Definition.UnionTypeDefinitionMemberTypeNames(typeNode.Ref) + if ok { + candidateTypes = members + } + default: + return nil + } + + // Filter to only include entity types + var entityTypes []string + for _, candidate := range candidateTypes { + if p.dataSourceConfig.HasEntity(candidate) { + entityTypes = append(entityTypes, candidate) + } + } + + return entityTypes +} + +// buildAndStoreEntityCacheKeyTemplate builds a cache key template for the given entity type +// and stores it in the rootFieldEntityCacheKeyTemplates map. +func (p *Planner[T]) buildAndStoreEntityCacheKeyTemplate(entityTypeName, fieldName string, meta plan.FederationMetaData) { + // Get all @key configurations for this entity type (excludes @requires) + entityKeys := meta.Keys.FilterByTypeAndResolvability(entityTypeName, true) + if len(entityKeys) == 0 { + return + } + + // Build representation variable nodes from the entity keys + var objects []*resolve.Object + for _, key := range entityKeys { + node, err := buildRepresentationVariableNode(p.visitor.Definition, key, meta) + if err != nil { + continue + } + objects = append(objects, node) + } + + if len(objects) == 0 { + return + } + + // Merge all key objects into a single representation + mergedObject := mergeRepresentationVariableNodes(objects) + + // Set the path to the root field name so the cache key template + // knows where to find the entity data in the response + mergedObject.Path = []string{fieldName} + + // Create cache key template with only @key fields (no @requires fields) + keys := resolve.NewResolvableObjectVariable(mergedObject) + p.rootFieldEntityCacheKeyTemplates[fieldName+":"+entityTypeName] = &resolve.EntityQueryCacheKeyTemplate{Keys: keys, TypeName: entityTypeName} +} + func (p *Planner[T]) addFieldArguments(upstreamFieldRef int, fieldRef int, fieldConfiguration *plan.FieldConfiguration) { if fieldConfiguration != nil { for i := range fieldConfiguration.Arguments { @@ -742,6 +1005,76 @@ func (p *Planner[T]) addFieldArguments(upstreamFieldRef int, fieldRef int, field } } +// resolveArgumentPath translates a schema-level argument name to the actual variable +// path used in ctx.Variables. After variable extraction, inline literals become +// variables with sequential names (a, b, c, ...) that differ from the original +// argument names. The root field's tracked Args contain the resolved ContextVariable +// paths, so we look up by argument name to find the real path. +// +// When the argument name doesn't match any root field argument, the original path +// is returned unchanged. This is intentional: some EntityKeyMappings reference entity +// fields that aren't root field arguments (e.g., "username" on a root field that only +// takes "id"). These "derived keys" are populated from entity response data on the +// write path via renderDerivedEntityKeyFromValue — the read path will naturally skip them. +func resolveArgumentPath(argumentPath []string, args []resolve.FieldArgument) []string { + if len(argumentPath) == 0 { + return argumentPath + } + for _, arg := range args { + if arg.Name == argumentPath[0] { + if cv, ok := arg.Variable.(*resolve.ContextVariable); ok { + if len(argumentPath) == 1 { + return cv.Path + } + // For nested argument paths (e.g., ["key", "sellerId"]), + // resolve the root argument to its variable path and append + // the remaining nested field path. + resolved := make([]string, len(cv.Path)+len(argumentPath)-1) + copy(resolved, cv.Path) + copy(resolved[len(cv.Path):], argumentPath[1:]) + return resolved + } + return argumentPath + } + } + return argumentPath +} + +// trackCacheKeyCoordinate ensures a root field is tracked for cache key generation, +// initializing an empty args slice if it doesn't exist yet. +// responseKey is the alias if present, else the field name — used by requestScoped +// export to read the field value from the response JSON. +func (p *Planner[T]) trackCacheKeyCoordinate(coordinate resolve.GraphCoordinate, responseKey string) { + p.rootFields = append(p.rootFields, resolve.QueryField{ + Coordinate: coordinate, + ResponseKey: responseKey, + }) +} + +// trackFieldWithArgument adds an argument (name + variable) to the field's tracking for cache key generation +func (p *Planner[T]) trackFieldWithArgument(coordinate resolve.GraphCoordinate, argName string, variable resolve.Variable) { + if coordinate.FieldName == "" { + return + } + // Find the last entry with this coordinate (most recently created by EnterField) + idx := -1 + for i := len(p.rootFields) - 1; i >= 0; i-- { + if p.rootFields[i].Coordinate.TypeName == coordinate.TypeName && + p.rootFields[i].Coordinate.FieldName == coordinate.FieldName { + idx = i + break + } + } + if idx == -1 { + // Should not happen — EnterField always runs before arg tracking + return + } + p.rootFields[idx].Args = append(p.rootFields[idx].Args, resolve.FieldArgument{ + Name: argName, + Variable: variable, + }) +} + func (p *Planner[T]) addCustomField(ref int) (upstreamFieldRef int) { fieldName, alias := p.handleFieldAlias(ref) fieldNode := p.upstreamOperation.AddField(ast.Field{ @@ -823,6 +1156,13 @@ func (p *Planner[T]) EnterDocument(_, _ *ast.Document) { p.addDirectivesToVariableDefinitions = map[int][]int{} p.addedInlineFragments = map[onTypeInlineFragment]struct{}{} + + // reset root fields tracking for cache key generation + for i := 0; i < len(p.rootFields); i++ { + p.rootFields[i].Args = nil + } + p.rootFields = p.rootFields[:0] + clear(p.requestScopedResponseKeys) } func (p *Planner[T]) LeaveDocument(_, _ *ast.Document) { @@ -838,12 +1178,41 @@ func (p *Planner[T]) addRepresentationsVariable() { return } - variable, _ := p.variables.AddVariable(p.buildRepresentationsVariable()) + representationsVariable := resolve.NewResolvableObjectVariable(p.buildRepresentationsVariable()) + + // Build cache key template from only @key fields (no @requires fields) + // This ensures stable entity identity for both L1 and L2 cache + cacheKeysObject := p.buildCacheKeyVariable() + var cacheKeysVar *resolve.ResolvableObjectVariable + if cacheKeysObject != nil { + cacheKeysVar = resolve.NewResolvableObjectVariable(cacheKeysObject) + } else { + // Fallback to full representations if no @key-only fields found. + // This can happen when all RequiredFields are @requires/@provides (no pure @key entries). + // In practice this is rare since entity resolution typically requires at least one @key field. + cacheKeysVar = representationsVariable + } + + // Extract entity type name for cache key fallback when __typename is missing from response. + // All RequiredFields entries share the same entity type, so use the first one. + var entityTypeName string + if len(p.dataSourcePlannerConfig.RequiredFields) > 0 { + entityTypeName = p.dataSourcePlannerConfig.RequiredFields[0].TypeName + } + + entityCacheKeyTemplate := &resolve.EntityQueryCacheKeyTemplate{ + Keys: cacheKeysVar, + TypeName: entityTypeName, + } + + p.entityCacheKeyTemplate = entityCacheKeyTemplate + + variable, _ := p.variables.AddVariable(representationsVariable) p.upstreamVariables, _ = sjson.SetRawBytes(p.upstreamVariables, "representations", []byte(fmt.Sprintf("[%s]", variable))) } -func (p *Planner[T]) buildRepresentationsVariable() resolve.Variable { +func (p *Planner[T]) buildRepresentationsVariable() *resolve.Object { objects := make([]*resolve.Object, 0, len(p.dataSourcePlannerConfig.RequiredFields)) for _, cfg := range p.dataSourcePlannerConfig.RequiredFields { node, err := buildRepresentationVariableNode(p.visitor.Definition, cfg, p.dataSourceConfig.FederationConfiguration()) @@ -855,9 +1224,37 @@ func (p *Planner[T]) buildRepresentationsVariable() resolve.Variable { objects = append(objects, node) } - return resolve.NewResolvableObjectVariable( - mergeRepresentationVariableNodes(objects), - ) + return mergeRepresentationVariableNodes(objects) +} + +// buildCacheKeyVariable builds a representation variable containing ONLY @key fields. +// This is used for cache keys (both L1 and L2) to ensure stable entity identity. +// @requires fields are excluded because they vary between fetches but don't affect entity identity. +// Returns nil if no @key configurations are found. +func (p *Planner[T]) buildCacheKeyVariable() *resolve.Object { + var objects []*resolve.Object + for _, cfg := range p.dataSourcePlannerConfig.RequiredFields { + // Only include @key configurations (FieldName is empty for keys) + // @requires/@provides have FieldName set to the field they apply to + if cfg.FieldName != "" { + continue + } + + node, err := buildRepresentationVariableNode(p.visitor.Definition, cfg, p.dataSourceConfig.FederationConfiguration()) + if err != nil { + // Don't fail the whole request, just skip this key configuration for cache keys. + // This may cause cache misses for this entity type. + continue + } + + objects = append(objects, node) + } + + if len(objects) == 0 { + return nil + } + + return mergeRepresentationVariableNodes(objects) } func (p *Planner[T]) addRepresentationsQuery() { @@ -1093,7 +1490,7 @@ func (p *Planner[T]) configureArgument(upstreamFieldRef, downstreamFieldRef int, switch argumentConfiguration.SourceType { case plan.FieldArgumentSource: - p.configureFieldArgumentSource(upstreamFieldRef, downstreamFieldRef, argumentConfiguration) + p.configureFieldArgumentSource(upstreamFieldRef, downstreamFieldRef, fieldConfig, argumentConfiguration) case plan.ObjectFieldSource: p.configureObjectFieldSource(upstreamFieldRef, downstreamFieldRef, fieldConfig, argumentConfiguration) } @@ -1102,7 +1499,7 @@ func (p *Planner[T]) configureArgument(upstreamFieldRef, downstreamFieldRef int, } // configureFieldArgumentSource - creates variables for a plain argument types, in case object or list types goes deep and calls applyInlineFieldArgument -func (p *Planner[T]) configureFieldArgumentSource(upstreamFieldRef, downstreamFieldRef int, argumentConfiguration plan.ArgumentConfiguration) { +func (p *Planner[T]) configureFieldArgumentSource(upstreamFieldRef, downstreamFieldRef int, fieldConfig plan.FieldConfiguration, argumentConfiguration plan.ArgumentConfiguration) { fieldArgument, ok := p.visitor.Operation.FieldArgument(downstreamFieldRef, []byte(argumentConfiguration.Name)) if !ok { return @@ -1124,6 +1521,16 @@ func (p *Planner[T]) configureFieldArgumentSource(upstreamFieldRef, downstreamFi variableValueRef, argRef := p.upstreamOperation.AddVariableValueArgument([]byte(argumentConfiguration.Name), variableName) // add the argument to the field, but don't redefine it p.upstreamOperation.AddArgumentToField(upstreamFieldRef, argRef) + // Only track arguments for root fields — nested entity fields use @key-based + // cache keys (EntityQueryCacheKeyTemplate) which don't include field arguments. + if p.isRootField() { + coordinate := resolve.GraphCoordinate{ + TypeName: fieldConfig.TypeName, + FieldName: fieldConfig.FieldName, + } + p.trackFieldWithArgument(coordinate, argumentConfiguration.Name, contextVariable) + } + if exists { // if the variable exists we don't have to put it onto the variables declaration again, skip return } @@ -1275,6 +1682,16 @@ func (p *Planner[T]) configureObjectFieldSource(upstreamFieldRef, downstreamFiel Renderer: resolve.NewJSONVariableRenderer(), } + // Only track arguments for root fields — nested entity fields use @key-based + // cache keys (EntityQueryCacheKeyTemplate) which don't include field arguments. + if p.isRootField() { + coordinate := resolve.GraphCoordinate{ + TypeName: fieldConfiguration.TypeName, + FieldName: fieldConfiguration.FieldName, + } + p.trackFieldWithArgument(coordinate, argumentConfiguration.Name, variable) + } + objectVariableName, exists := p.variables.AddVariable(variable) if !exists { p.upstreamVariables, _ = sjson.SetRawBytes(p.upstreamVariables, string(variableName), []byte(objectVariableName)) @@ -1653,6 +2070,11 @@ func (p *Planner[T]) handleFieldAlias(ref int) (newFieldName string, alias ast.A break } } + + if syntheticAlias, ok := p.visitor.RequestScopedFetchAlias(ref); ok { + alias.IsDefined = true + alias.Name = p.upstreamOperation.Input.AppendInputString(syntheticAlias) + } return fieldName, alias } diff --git a/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_entity_key_mapping_test.go b/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_entity_key_mapping_test.go new file mode 100644 index 0000000000..b68dde8baf --- /dev/null +++ b/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_entity_key_mapping_test.go @@ -0,0 +1,1181 @@ +package graphql_datasource + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/astnormalization" + "github.com/wundergraph/graphql-go-tools/v2/pkg/asttransform" + "github.com/wundergraph/graphql-go-tools/v2/pkg/astvalidation" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/postprocess" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" + "github.com/wundergraph/graphql-go-tools/v2/pkg/internal/unsafeparser" + "github.com/wundergraph/graphql-go-tools/v2/pkg/operationreport" +) + +// planAndExtractCacheConfig runs the planner on the given schema/query/config and returns +// the FetchCacheConfiguration for each SingleFetch in the plan, keyed by fetch index. +func planAndExtractCacheConfig(t *testing.T, definition, operation, operationName string, config plan.Configuration) []resolve.FetchCacheConfiguration { + t.Helper() + + def := unsafeparser.ParseGraphqlDocumentString(definition) + op := unsafeparser.ParseGraphqlDocumentString(operation) + err := asttransform.MergeDefinitionWithBaseSchema(&def) + require.NoError(t, err) + norm := astnormalization.NewWithOpts( + astnormalization.WithExtractVariables(), + astnormalization.WithInlineFragmentSpreads(), + astnormalization.WithRemoveFragmentDefinitions(), + astnormalization.WithRemoveUnusedVariables(), + ) + var report operationreport.Report + norm.NormalizeOperation(&op, &def, &report) + require.False(t, report.HasErrors(), report.Error()) + + valid := astvalidation.DefaultOperationValidator() + valid.Validate(&op, &def, &report) + require.False(t, report.HasErrors(), report.Error()) + + p, err := plan.NewPlanner(config) + require.NoError(t, err) + + actualPlan := p.Plan(&op, &def, operationName, &report) + require.False(t, report.HasErrors(), report.Error()) + + processor := postprocess.NewProcessor( + postprocess.DisableResolveInputTemplates(), + postprocess.DisableCreateConcreteSingleFetchTypes(), + postprocess.DisableCreateParallelNodes(), + postprocess.DisableMergeFields(), + ) + processor.Process(actualPlan) + + syncPlan, ok := actualPlan.(*plan.SynchronousResponsePlan) + require.True(t, ok, "expected SynchronousResponsePlan") + require.NotNil(t, syncPlan.Response) + require.NotNil(t, syncPlan.Response.Fetches) + + var configs []resolve.FetchCacheConfiguration + collectCacheConfigs(syncPlan.Response.Fetches, &configs) + return configs +} + +func collectCacheConfigs(node *resolve.FetchTreeNode, out *[]resolve.FetchCacheConfiguration) { + if node == nil { + return + } + if node.Item != nil && node.Item.Fetch != nil { + if sf, ok := node.Item.Fetch.(*resolve.SingleFetch); ok { + *out = append(*out, sf.FetchConfiguration.Caching) + } + } + if node.Trigger != nil { + collectCacheConfigs(node.Trigger, out) + } + for _, child := range node.ChildNodes { + collectCacheConfigs(child, out) + } +} + +func newExpectedRootQueryCacheKeyTemplate(rootFields []resolve.QueryField, entityKeyMappings []resolve.EntityKeyMappingConfig) *resolve.RootQueryCacheKeyTemplate { + return resolve.NewRootQueryCacheKeyTemplate(rootFields, entityKeyMappings) +} + +// newEntityKeyMappingTestConfig creates a plan.Configuration for entity key mapping tests +// with a single "accounts" subgraph that has a User entity. +func newEntityKeyMappingTestConfig(t *testing.T, rootFieldCaching plan.RootFieldCacheConfigurations, entityCaching plan.EntityCacheConfigurations, sdl string, keys plan.FederationFieldConfigurations) plan.Configuration { + t.Helper() + + ds := mustDataSourceConfiguration(t, + "accounts", + &plan.DataSourceMetadata{ + RootNodes: []plan.TypeField{ + {TypeName: "Query", FieldNames: []string{"user", "userByIdAndName"}}, + {TypeName: "User", FieldNames: []string{"id", "username"}}, + }, + FederationMetaData: plan.FederationMetaData{ + Keys: keys, + RootFieldCaching: rootFieldCaching, + EntityCaching: entityCaching, + }, + }, + mustCustomConfiguration(t, + ConfigurationInput{ + Fetch: &FetchConfiguration{URL: "http://accounts.service"}, + SchemaConfiguration: mustSchema(t, + &FederationConfiguration{Enabled: true, ServiceSDL: sdl}, + sdl, + ), + }, + ), + ) + + return plan.Configuration{ + DataSources: []plan.DataSource{ds}, + DisableIncludeInfo: false, + DisableIncludeFieldDependencies: false, + DisableEntityCaching: false, + DisableFetchProvidesData: false, + Fields: plan.FieldConfigurations{ + {TypeName: "Query", FieldName: "user", Arguments: plan.ArgumentsConfigurations{ + {Name: "id", SourceType: plan.FieldArgumentSource, SourcePath: []string{"id"}}, + }}, + {TypeName: "Query", FieldName: "userByIdAndName", Arguments: plan.ArgumentsConfigurations{ + {Name: "id", SourceType: plan.FieldArgumentSource, SourcePath: []string{"id"}}, + {Name: "username", SourceType: plan.FieldArgumentSource, SourcePath: []string{"username"}}, + }}, + }, + } +} + +func TestEntityKeyMappingPlanning(t *testing.T) { + definition := ` + type User { + id: ID! + username: String! + } + type Query { + user(id: ID!): User + userByIdAndName(id: ID!, username: String!): User + } + ` + + sdl := ` + type Query { + user(id: ID!): User + userByIdAndName(id: ID!, username: String!): User + } + type User @key(fields: "id") { + id: ID! + username: String! + } + ` + + keys := plan.FederationFieldConfigurations{ + {TypeName: "User", SelectionSet: "id"}, + } + + t.Run("simple scalar key", func(t *testing.T) { + // Root field user(id) with single EntityKeyMapping for @key(fields: "id") + rootFieldCaching := plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "user", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + }, + } + + config := newEntityKeyMappingTestConfig(t, rootFieldCaching, nil, sdl, keys) + cacheConfigs := planAndExtractCacheConfig(t, definition, `query Q($id: ID!) { user(id: $id) { id username } }`, "Q", config) + + require.Equal(t, 1, len(cacheConfigs), "should have 1 fetch") + assert.Equal(t, resolve.FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newExpectedRootQueryCacheKeyTemplate([]resolve.QueryField{ + { + Coordinate: resolve.GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []resolve.FieldArgument{ + {Name: "id", Variable: &resolve.ContextVariable{Path: []string{"id"}, Renderer: resolve.NewJSONVariableRenderer()}}, + }, + }, + }, []resolve.EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []resolve.EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }), + RootFieldL1EntityCacheKeyTemplates: map[string]resolve.CacheKeyTemplate{ + "user:User": &resolve.EntityQueryCacheKeyTemplate{ + TypeName: "User", + Keys: resolve.NewResolvableObjectVariable(&resolve.Object{ + Nullable: true, + Path: []string{"user"}, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.String{Path: []string{"__typename"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("id"), + Value: &resolve.Scalar{Path: []string{"id"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + }), + }, + }, + }, cacheConfigs[0]) + }) + + t.Run("composite scalar keys", func(t *testing.T) { + // Root field userByIdAndName(id, username) with single EntityKeyMapping + // that has 2 FieldMappings (composite key: id + username) + rootFieldCaching := plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "userByIdAndName", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }, + }, + }, + }, + } + + config := newEntityKeyMappingTestConfig(t, rootFieldCaching, nil, sdl, keys) + cacheConfigs := planAndExtractCacheConfig(t, definition, `query Q($id: ID!, $username: String!) { userByIdAndName(id: $id, username: $username) { id username } }`, "Q", config) + + require.Equal(t, 1, len(cacheConfigs), "should have 1 fetch") + assert.Equal(t, resolve.FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newExpectedRootQueryCacheKeyTemplate([]resolve.QueryField{ + { + Coordinate: resolve.GraphCoordinate{TypeName: "Query", FieldName: "userByIdAndName"}, + ResponseKey: "userByIdAndName", + Args: []resolve.FieldArgument{ + {Name: "id", Variable: &resolve.ContextVariable{Path: []string{"id"}, Renderer: resolve.NewJSONVariableRenderer()}}, + {Name: "username", Variable: &resolve.ContextVariable{Path: []string{"username"}, Renderer: resolve.NewJSONVariableRenderer()}}, + }, + }, + }, []resolve.EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []resolve.EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }, + }, + }), + RootFieldL1EntityCacheKeyTemplates: map[string]resolve.CacheKeyTemplate{ + "userByIdAndName:User": &resolve.EntityQueryCacheKeyTemplate{ + TypeName: "User", + Keys: resolve.NewResolvableObjectVariable(&resolve.Object{ + Nullable: true, + Path: []string{"userByIdAndName"}, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.String{Path: []string{"__typename"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("id"), + Value: &resolve.Scalar{Path: []string{"id"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + }), + }, + }, + }, cacheConfigs[0]) + }) + + t.Run("cross-lookup setup", func(t *testing.T) { + // Both root field entity key mapping AND entity caching for same type + // Verifies the planner produces both templates for cross-lookup + rootFieldCaching := plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "user", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + }, + } + entityCaching := plan.EntityCacheConfigurations{ + {TypeName: "User", CacheName: "default", TTL: 30 * time.Second}, + } + + config := newEntityKeyMappingTestConfig(t, rootFieldCaching, entityCaching, sdl, keys) + cacheConfigs := planAndExtractCacheConfig(t, definition, `query Q($id: ID!) { user(id: $id) { id username } }`, "Q", config) + + require.Equal(t, 1, len(cacheConfigs), "should have 1 fetch (root field only, no entity fetch for same subgraph)") + assert.Equal(t, resolve.FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newExpectedRootQueryCacheKeyTemplate([]resolve.QueryField{ + { + Coordinate: resolve.GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []resolve.FieldArgument{ + {Name: "id", Variable: &resolve.ContextVariable{Path: []string{"id"}, Renderer: resolve.NewJSONVariableRenderer()}}, + }, + }, + }, []resolve.EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []resolve.EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }), + RootFieldL1EntityCacheKeyTemplates: map[string]resolve.CacheKeyTemplate{ + "user:User": &resolve.EntityQueryCacheKeyTemplate{ + TypeName: "User", + Keys: resolve.NewResolvableObjectVariable(&resolve.Object{ + Nullable: true, + Path: []string{"user"}, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.String{Path: []string{"__typename"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("id"), + Value: &resolve.Scalar{Path: []string{"id"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + }), + }, + }, + }, cacheConfigs[0]) + }) + + t.Run("with header prefix", func(t *testing.T) { + // Same as simple scalar key but with IncludeSubgraphHeaderPrefix + rootFieldCaching := plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "user", + CacheName: "default", + TTL: 30 * time.Second, + IncludeSubgraphHeaderPrefix: true, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + }, + } + + config := newEntityKeyMappingTestConfig(t, rootFieldCaching, nil, sdl, keys) + cacheConfigs := planAndExtractCacheConfig(t, definition, `query Q($id: ID!) { user(id: $id) { id username } }`, "Q", config) + + require.Equal(t, 1, len(cacheConfigs)) + assert.Equal(t, resolve.FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + IncludeSubgraphHeaderPrefix: true, + CacheKeyTemplate: newExpectedRootQueryCacheKeyTemplate([]resolve.QueryField{ + { + Coordinate: resolve.GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []resolve.FieldArgument{ + {Name: "id", Variable: &resolve.ContextVariable{Path: []string{"id"}, Renderer: resolve.NewJSONVariableRenderer()}}, + }, + }, + }, []resolve.EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []resolve.EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }), + RootFieldL1EntityCacheKeyTemplates: map[string]resolve.CacheKeyTemplate{ + "user:User": &resolve.EntityQueryCacheKeyTemplate{ + TypeName: "User", + Keys: resolve.NewResolvableObjectVariable(&resolve.Object{ + Nullable: true, + Path: []string{"user"}, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.String{Path: []string{"__typename"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("id"), + Value: &resolve.Scalar{Path: []string{"id"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + }), + }, + }, + }, cacheConfigs[0]) + }) + + t.Run("without entity key mapping regression", func(t *testing.T) { + // Root field caching WITHOUT EntityKeyMappings → should use root field format + rootFieldCaching := plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "user", + CacheName: "default", + TTL: 30 * time.Second, + // No EntityKeyMappings + }, + } + + config := newEntityKeyMappingTestConfig(t, rootFieldCaching, nil, sdl, keys) + cacheConfigs := planAndExtractCacheConfig(t, definition, `query Q($id: ID!) { user(id: $id) { id username } }`, "Q", config) + + require.Equal(t, 1, len(cacheConfigs)) + assert.Equal(t, resolve.FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newExpectedRootQueryCacheKeyTemplate([]resolve.QueryField{ + { + Coordinate: resolve.GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []resolve.FieldArgument{ + {Name: "id", Variable: &resolve.ContextVariable{Path: []string{"id"}, Renderer: resolve.NewJSONVariableRenderer()}}, + }, + }, + }, []resolve.EntityKeyMappingConfig{}), + RootFieldL1EntityCacheKeyTemplates: map[string]resolve.CacheKeyTemplate{ + "user:User": &resolve.EntityQueryCacheKeyTemplate{ + TypeName: "User", + Keys: resolve.NewResolvableObjectVariable(&resolve.Object{ + Nullable: true, + Path: []string{"user"}, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.String{Path: []string{"__typename"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("id"), + Value: &resolve.Scalar{Path: []string{"id"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + }), + }, + }, + }, cacheConfigs[0]) + }) + + t.Run("caching globally disabled", func(t *testing.T) { + // DisableEntityCaching: true → CacheKeyTemplate preserved for L1 but Enabled: false + rootFieldCaching := plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "user", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + }, + } + + config := newEntityKeyMappingTestConfig(t, rootFieldCaching, nil, sdl, keys) + config.DisableEntityCaching = true + cacheConfigs := planAndExtractCacheConfig(t, definition, `query Q($id: ID!) { user(id: $id) { id username } }`, "Q", config) + + require.Equal(t, 1, len(cacheConfigs)) + assert.Equal(t, resolve.FetchCacheConfiguration{ + // When entity caching is globally disabled, Enabled is false but CacheKeyTemplate + // is preserved for L1 cache (which is controlled separately) + CacheKeyTemplate: newExpectedRootQueryCacheKeyTemplate([]resolve.QueryField{ + { + Coordinate: resolve.GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []resolve.FieldArgument{ + {Name: "id", Variable: &resolve.ContextVariable{Path: []string{"id"}, Renderer: resolve.NewJSONVariableRenderer()}}, + }, + }, + }, []resolve.EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []resolve.EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }), + RootFieldL1EntityCacheKeyTemplates: map[string]resolve.CacheKeyTemplate{ + "user:User": &resolve.EntityQueryCacheKeyTemplate{ + TypeName: "User", + Keys: resolve.NewResolvableObjectVariable(&resolve.Object{ + Nullable: true, + Path: []string{"user"}, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.String{Path: []string{"__typename"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("id"), + Value: &resolve.Scalar{Path: []string{"id"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + }), + }, + }, + }, cacheConfigs[0]) + }) + + t.Run("multiple keys single mapping", func(t *testing.T) { + // Entity with @key(fields: "id") @key(fields: "username"), but root field user(id) + // maps only to the "id" key. The config only has 1 EntityKeyMapping. + sdlMultiKey := ` + type Query { + user(id: ID!): User + userByIdAndName(id: ID!, username: String!): User + } + type User @key(fields: "id") @key(fields: "username") { + id: ID! + username: String! + } + ` + keysMulti := plan.FederationFieldConfigurations{ + {TypeName: "User", SelectionSet: "id"}, + {TypeName: "User", SelectionSet: "username"}, + } + + rootFieldCaching := plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "user", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + }, + } + + config := newEntityKeyMappingTestConfig(t, rootFieldCaching, nil, sdlMultiKey, keysMulti) + cacheConfigs := planAndExtractCacheConfig(t, definition, `query Q($id: ID!) { user(id: $id) { id username } }`, "Q", config) + + require.Equal(t, 1, len(cacheConfigs)) + assert.Equal(t, resolve.FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newExpectedRootQueryCacheKeyTemplate([]resolve.QueryField{ + { + Coordinate: resolve.GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []resolve.FieldArgument{ + {Name: "id", Variable: &resolve.ContextVariable{Path: []string{"id"}, Renderer: resolve.NewJSONVariableRenderer()}}, + }, + }, + }, []resolve.EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []resolve.EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }), + RootFieldL1EntityCacheKeyTemplates: map[string]resolve.CacheKeyTemplate{ + "user:User": &resolve.EntityQueryCacheKeyTemplate{ + TypeName: "User", + Keys: resolve.NewResolvableObjectVariable(&resolve.Object{ + Nullable: true, + Path: []string{"user"}, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.String{Path: []string{"__typename"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("id"), + Value: &resolve.Scalar{Path: []string{"id"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("username"), + Value: &resolve.String{Path: []string{"username"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + }), + }, + }, + }, cacheConfigs[0]) + }) + + t.Run("multiple keys multiple mappings", func(t *testing.T) { + // Entity with @key(fields: "id") @key(fields: "username"), + // root field userByIdAndName(id, username) maps to BOTH keys. + // Config has 2 EntityKeyMappings. + sdlMultiKey := ` + type Query { + user(id: ID!): User + userByIdAndName(id: ID!, username: String!): User + } + type User @key(fields: "id") @key(fields: "username") { + id: ID! + username: String! + } + ` + keysMulti := plan.FederationFieldConfigurations{ + {TypeName: "User", SelectionSet: "id"}, + {TypeName: "User", SelectionSet: "username"}, + } + + rootFieldCaching := plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "userByIdAndName", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }, + }, + }, + }, + } + + config := newEntityKeyMappingTestConfig(t, rootFieldCaching, nil, sdlMultiKey, keysMulti) + cacheConfigs := planAndExtractCacheConfig(t, definition, `query Q($id: ID!, $username: String!) { userByIdAndName(id: $id, username: $username) { id username } }`, "Q", config) + + require.Equal(t, 1, len(cacheConfigs)) + assert.Equal(t, resolve.FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newExpectedRootQueryCacheKeyTemplate([]resolve.QueryField{ + { + Coordinate: resolve.GraphCoordinate{TypeName: "Query", FieldName: "userByIdAndName"}, + ResponseKey: "userByIdAndName", + Args: []resolve.FieldArgument{ + {Name: "id", Variable: &resolve.ContextVariable{Path: []string{"id"}, Renderer: resolve.NewJSONVariableRenderer()}}, + {Name: "username", Variable: &resolve.ContextVariable{Path: []string{"username"}, Renderer: resolve.NewJSONVariableRenderer()}}, + }, + }, + }, []resolve.EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []resolve.EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + { + EntityTypeName: "User", + FieldMappings: []resolve.EntityFieldMappingConfig{ + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }, + }, + }), + RootFieldL1EntityCacheKeyTemplates: map[string]resolve.CacheKeyTemplate{ + "userByIdAndName:User": &resolve.EntityQueryCacheKeyTemplate{ + TypeName: "User", + Keys: resolve.NewResolvableObjectVariable(&resolve.Object{ + Nullable: true, + Path: []string{"userByIdAndName"}, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.String{Path: []string{"__typename"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("id"), + Value: &resolve.Scalar{Path: []string{"id"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("username"), + Value: &resolve.String{Path: []string{"username"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + }), + }, + }, + }, cacheConfigs[0]) + }) + + t.Run("aliased root fields get separate cache tracking", func(t *testing.T) { + // When query has `a: user(id: $id1) { ... } b: user(id: $id2) { ... }`, + // each aliased root field produces a separate fetch with its own RootFields entry and Args. + // The planner creates separate fetches because the aliases have different variables. + rootFieldCaching := plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "user", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + }, + } + + config := newEntityKeyMappingTestConfig(t, rootFieldCaching, nil, sdl, keys) + cacheConfigs := planAndExtractCacheConfig(t, definition, + `query Q($id1: ID!, $id2: ID!) { a: user(id: $id1) { id username } b: user(id: $id2) { id username } }`, "Q", config) + + // Each alias gets its own fetch because they have different variables, + // so the planner creates 2 separate fetches with 1 root field entry each. + require.Equal(t, 2, len(cacheConfigs), "should have 2 fetches (one per alias)") + + assert.Equal(t, resolve.FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newExpectedRootQueryCacheKeyTemplate([]resolve.QueryField{ + { + Coordinate: resolve.GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "a", // aliased as `a: user(...)` + Args: []resolve.FieldArgument{ + {Name: "id", Variable: &resolve.ContextVariable{Path: []string{"id1"}, Renderer: resolve.NewJSONVariableRenderer()}}, + }, + }, + }, []resolve.EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []resolve.EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id1"}}, + }, + }, + }), + RootFieldL1EntityCacheKeyTemplates: map[string]resolve.CacheKeyTemplate{ + "a:User": &resolve.EntityQueryCacheKeyTemplate{ + TypeName: "User", + Keys: resolve.NewResolvableObjectVariable(&resolve.Object{ + Nullable: true, + Path: []string{"a"}, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.String{Path: []string{"__typename"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("id"), + Value: &resolve.Scalar{Path: []string{"id"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + }), + }, + }, + }, cacheConfigs[0]) + + assert.Equal(t, resolve.FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newExpectedRootQueryCacheKeyTemplate([]resolve.QueryField{ + { + Coordinate: resolve.GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "b", // aliased as `b: user(...)` + Args: []resolve.FieldArgument{ + {Name: "id", Variable: &resolve.ContextVariable{Path: []string{"id2"}, Renderer: resolve.NewJSONVariableRenderer()}}, + }, + }, + }, []resolve.EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []resolve.EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id2"}}, + }, + }, + }), + RootFieldL1EntityCacheKeyTemplates: map[string]resolve.CacheKeyTemplate{ + "b:User": &resolve.EntityQueryCacheKeyTemplate{ + TypeName: "User", + Keys: resolve.NewResolvableObjectVariable(&resolve.Object{ + Nullable: true, + Path: []string{"b"}, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.String{Path: []string{"__typename"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("id"), + Value: &resolve.Scalar{Path: []string{"id"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + }), + }, + }, + }, cacheConfigs[1]) + }) + + t.Run("aliased root fields use alias in entity cache key path", func(t *testing.T) { + // When a query uses aliases like `a: user(id: $id1) { ... }`, the + // RootFieldL1EntityCacheKeyTemplates must use the alias ("a") as the + // response path, not the schema field name ("user"). The response JSON + // is keyed by alias, so the template path must match. + rootFieldCaching := plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "user", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + }, + } + + config := newEntityKeyMappingTestConfig(t, rootFieldCaching, nil, sdl, keys) + cacheConfigs := planAndExtractCacheConfig(t, definition, + `query Q($id: ID!) { myUser: user(id: $id) { id username } }`, "Q", config) + + require.Equal(t, 1, len(cacheConfigs), "should have 1 fetch") + + // The entity cache key template path must use the alias "myUser", not "user" + assert.Equal(t, resolve.FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newExpectedRootQueryCacheKeyTemplate([]resolve.QueryField{ + { + Coordinate: resolve.GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "myUser", // aliased as `myUser: user(...)` + Args: []resolve.FieldArgument{ + {Name: "id", Variable: &resolve.ContextVariable{Path: []string{"id"}, Renderer: resolve.NewJSONVariableRenderer()}}, + }, + }, + }, []resolve.EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []resolve.EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }), + RootFieldL1EntityCacheKeyTemplates: map[string]resolve.CacheKeyTemplate{ + "myUser:User": &resolve.EntityQueryCacheKeyTemplate{ + TypeName: "User", + Keys: resolve.NewResolvableObjectVariable(&resolve.Object{ + Nullable: true, + Path: []string{"myUser"}, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.String{Path: []string{"__typename"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("id"), + Value: &resolve.Scalar{Path: []string{"id"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + }), + }, + }, + }, cacheConfigs[0]) + }) + + t.Run("multi-arg root field keeps args together", func(t *testing.T) { + // Regression: a root field with multiple arguments (e.g., userByIdAndName(id, username)) + // must produce exactly 1 RootFields entry with both args, not split them into separate entries. + rootFieldCaching := plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "userByIdAndName", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "User", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }, + }, + }, + }, + } + + config := newEntityKeyMappingTestConfig(t, rootFieldCaching, nil, sdl, keys) + cacheConfigs := planAndExtractCacheConfig(t, definition, + `query Q($id: ID!, $username: String!) { userByIdAndName(id: $id, username: $username) { id username } }`, "Q", config) + + require.Equal(t, 1, len(cacheConfigs), "should have 1 fetch") + cc := cacheConfigs[0] + + // Exactly 1 root field entry (not split by args) + require.Equal(t, 1, len(cc.CacheKeyTemplate.(*resolve.RootQueryCacheKeyTemplate).RootFields), + "multi-arg field must produce exactly 1 RootFields entry, not split by args") + + // The entry has both args + assert.Equal(t, resolve.FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newExpectedRootQueryCacheKeyTemplate([]resolve.QueryField{ + { + Coordinate: resolve.GraphCoordinate{TypeName: "Query", FieldName: "userByIdAndName"}, + ResponseKey: "userByIdAndName", + Args: []resolve.FieldArgument{ + {Name: "id", Variable: &resolve.ContextVariable{Path: []string{"id"}, Renderer: resolve.NewJSONVariableRenderer()}}, + {Name: "username", Variable: &resolve.ContextVariable{Path: []string{"username"}, Renderer: resolve.NewJSONVariableRenderer()}}, + }, + }, + }, []resolve.EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []resolve.EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }, + }, + }), + RootFieldL1EntityCacheKeyTemplates: map[string]resolve.CacheKeyTemplate{ + "userByIdAndName:User": &resolve.EntityQueryCacheKeyTemplate{ + TypeName: "User", + Keys: resolve.NewResolvableObjectVariable(&resolve.Object{ + Nullable: true, + Path: []string{"userByIdAndName"}, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.String{Path: []string{"__typename"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("id"), + Value: &resolve.Scalar{Path: []string{"id"}}, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + }), + }, + }, + }, cc) + }) + + t.Run("nested object key", func(t *testing.T) { + // Entity with @key(fields: "id info {a b}"), root field provides + // arguments that map to the nested key structure + definitionNested := ` + type Info { + a: ID! + b: ID! + } + type Account { + id: ID! + info: Info + name: String! + } + type Query { + account(id: ID!, a: ID!, b: ID!): Account + } + ` + sdlNested := ` + type Query { + account(id: ID!, a: ID!, b: ID!): Account + } + type Account @key(fields: "id info {a b}") { + id: ID! + info: Info + name: String! + } + type Info { + a: ID! + b: ID! + } + ` + keysNested := plan.FederationFieldConfigurations{ + {TypeName: "Account", SelectionSet: "id info {a b}"}, + } + + rootFieldCaching := plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "account", + CacheName: "default", + TTL: 30 * time.Second, + EntityKeyMappings: []plan.EntityKeyMapping{ + { + EntityTypeName: "Account", + FieldMappings: []plan.FieldMapping{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + {EntityKeyField: "a", ArgumentPath: []string{"a"}}, + {EntityKeyField: "b", ArgumentPath: []string{"b"}}, + }, + }, + }, + }, + } + + ds := mustDataSourceConfiguration(t, + "accounts", + &plan.DataSourceMetadata{ + RootNodes: []plan.TypeField{ + {TypeName: "Query", FieldNames: []string{"account"}}, + {TypeName: "Account", FieldNames: []string{"id", "info", "name"}}, + }, + ChildNodes: []plan.TypeField{ + {TypeName: "Info", FieldNames: []string{"a", "b"}}, + }, + FederationMetaData: plan.FederationMetaData{ + Keys: keysNested, + RootFieldCaching: rootFieldCaching, + }, + }, + mustCustomConfiguration(t, + ConfigurationInput{ + Fetch: &FetchConfiguration{URL: "http://accounts.service"}, + SchemaConfiguration: mustSchema(t, + &FederationConfiguration{Enabled: true, ServiceSDL: sdlNested}, + sdlNested, + ), + }, + ), + ) + + config := plan.Configuration{ + DataSources: []plan.DataSource{ds}, + DisableIncludeInfo: false, + DisableIncludeFieldDependencies: false, + DisableEntityCaching: false, + DisableFetchProvidesData: false, + Fields: plan.FieldConfigurations{ + {TypeName: "Query", FieldName: "account", Arguments: plan.ArgumentsConfigurations{ + {Name: "id", SourceType: plan.FieldArgumentSource, SourcePath: []string{"id"}}, + {Name: "a", SourceType: plan.FieldArgumentSource, SourcePath: []string{"a"}}, + {Name: "b", SourceType: plan.FieldArgumentSource, SourcePath: []string{"b"}}, + }}, + }, + } + + cacheConfigs := planAndExtractCacheConfig(t, definitionNested, `query Q($id: ID!, $a: ID!, $b: ID!) { account(id: $id, a: $a, b: $b) { id name } }`, "Q", config) + + require.Equal(t, 1, len(cacheConfigs)) + assert.Equal(t, resolve.FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newExpectedRootQueryCacheKeyTemplate([]resolve.QueryField{ + { + Coordinate: resolve.GraphCoordinate{TypeName: "Query", FieldName: "account"}, + ResponseKey: "account", + Args: []resolve.FieldArgument{ + {Name: "id", Variable: &resolve.ContextVariable{Path: []string{"id"}, Renderer: resolve.NewJSONVariableRenderer()}}, + {Name: "a", Variable: &resolve.ContextVariable{Path: []string{"a"}, Renderer: resolve.NewJSONVariableRenderer()}}, + {Name: "b", Variable: &resolve.ContextVariable{Path: []string{"b"}, Renderer: resolve.NewJSONVariableRenderer()}}, + }, + }, + }, []resolve.EntityKeyMappingConfig{ + { + EntityTypeName: "Account", + FieldMappings: []resolve.EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + {EntityKeyField: "a", ArgumentPath: []string{"a"}}, + {EntityKeyField: "b", ArgumentPath: []string{"b"}}, + }, + }, + }), + RootFieldL1EntityCacheKeyTemplates: map[string]resolve.CacheKeyTemplate{ + "account:Account": &resolve.EntityQueryCacheKeyTemplate{ + TypeName: "Account", + Keys: resolve.NewResolvableObjectVariable(&resolve.Object{ + Nullable: true, + Path: []string{"account"}, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.String{Path: []string{"__typename"}}, + OnTypeNames: [][]byte{[]byte("Account")}, + }, + { + Name: []byte("id"), + Value: &resolve.Scalar{Path: []string{"id"}}, + OnTypeNames: [][]byte{[]byte("Account")}, + }, + { + Name: []byte("info"), + Value: &resolve.Object{ + Nullable: true, + Path: []string{"info"}, + Fields: []*resolve.Field{ + { + Name: []byte("a"), + Value: &resolve.Scalar{Path: []string{"a"}}, + }, + { + Name: []byte("b"), + Value: &resolve.Scalar{Path: []string{"b"}}, + }, + }, + }, + OnTypeNames: [][]byte{[]byte("Account")}, + }, + }, + }), + }, + }, + }, cacheConfigs[0]) + }) +} diff --git a/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_federation_test.go b/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_federation_test.go index 7f115f1e83..da0c06954a 100644 --- a/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_federation_test.go +++ b/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_federation_test.go @@ -2,6 +2,7 @@ package graphql_datasource import ( "testing" + "time" "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" . "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasourcetesting" @@ -1122,6 +1123,15 @@ func TestGraphQLDataSourceFederation(t *testing.T) { SelectionSet: "shippingInfo {zip}", }, }, + RootFieldCaching: plan.RootFieldCacheConfigurations{ + { + TypeName: "Query", + FieldName: "user", + CacheName: "default", + TTL: 30 * time.Second, + IncludeSubgraphHeaderPrefix: true, + }, + }, }, }, mustCustomConfiguration(t, @@ -1226,6 +1236,14 @@ func TestGraphQLDataSourceFederation(t *testing.T) { SelectionSet: "zip", }, }, + EntityCaching: plan.EntityCacheConfigurations{ + { + TypeName: "Account", + CacheName: "default", + TTL: 30 * time.Second, + IncludeSubgraphHeaderPrefix: true, + }, + }, Provides: plan.FederationFieldConfigurations{ { TypeName: "Account", @@ -1538,9 +1556,10 @@ func TestGraphQLDataSourceFederation(t *testing.T) { query CompositeKeys { user { account { + __typename name shippingInfo { - zip + z: zip } } } @@ -1558,6 +1577,25 @@ func TestGraphQLDataSourceFederation(t *testing.T) { Input: `{"method":"POST","url":"http://user.service","body":{"query":"{user {account {__typename id info {a b}}}}"}}`, DataSource: &Source{}, PostProcessing: DefaultPostProcessingConfiguration, + Caching: resolve.FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + IncludeSubgraphHeaderPrefix: true, + // UseL1Cache defaults to false - root query fetches with RootQueryCacheKeyTemplate don't populate entity L1 cache + CacheKeyTemplate: &resolve.RootQueryCacheKeyTemplate{ + RootFields: []resolve.QueryField{ + { + Coordinate: resolve.GraphCoordinate{ + TypeName: "Query", + FieldName: "user", + }, + Args: []resolve.FieldArgument{}, + ResponseKey: "user", + }, + }, + }, + }, }, Info: &resolve.FetchInfo{ DataSourceID: "user.service", @@ -1569,6 +1607,61 @@ func TestGraphQLDataSourceFederation(t *testing.T) { FieldName: "user", }, }, + ProvidesData: &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("user"), + Value: &resolve.Object{ + Path: []string{"user"}, + Nullable: true, + Fields: []*resolve.Field{ + { + Name: []byte("account"), + Value: &resolve.Object{ + Path: []string{"account"}, + Nullable: true, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.Scalar{ + Path: []string{"__typename"}, + }, + }, + { + Name: []byte("id"), + Value: &resolve.Scalar{ + Path: []string{"id"}, + }, + }, + { + Name: []byte("info"), + Value: &resolve.Object{ + Path: []string{"info"}, + Nullable: true, + Fields: []*resolve.Field{ + { + Name: []byte("a"), + Value: &resolve.Scalar{ + Path: []string{"a"}, + }, + }, + { + Name: []byte("b"), + Value: &resolve.Scalar{ + Path: []string{"b"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, }, }), resolve.SingleWithPath(&resolve.SingleFetch{ @@ -1590,11 +1683,141 @@ func TestGraphQLDataSourceFederation(t *testing.T) { HasAuthorizationRule: true, }, }, + CoordinateDependencies: []resolve.FetchDependency{ + { + Coordinate: resolve.GraphCoordinate{ + TypeName: "Account", + FieldName: "name", + }, + IsUserRequested: true, + DependsOn: []resolve.FetchDependencyOrigin{ + { + FetchID: 0, + Subgraph: "user.service", + Coordinate: resolve.GraphCoordinate{ + TypeName: "Account", + FieldName: "id", + }, + IsKey: true, + IsRequires: false, + }, + { + FetchID: 0, + Subgraph: "user.service", + Coordinate: resolve.GraphCoordinate{ + TypeName: "Account", + FieldName: "info", + }, + IsKey: true, + IsRequires: false, + }, + { + FetchID: 0, + Subgraph: "user.service", + Coordinate: resolve.GraphCoordinate{ + TypeName: "Info", + FieldName: "a", + }, + IsKey: true, + IsRequires: false, + }, + { + FetchID: 0, + Subgraph: "user.service", + Coordinate: resolve.GraphCoordinate{ + TypeName: "Info", + FieldName: "b", + }, + IsKey: true, + IsRequires: false, + }, + }, + }, + { + Coordinate: resolve.GraphCoordinate{ + TypeName: "Account", + FieldName: "shippingInfo", + }, + IsUserRequested: true, + DependsOn: []resolve.FetchDependencyOrigin{ + { + FetchID: 0, + Subgraph: "user.service", + Coordinate: resolve.GraphCoordinate{ + TypeName: "Account", + FieldName: "id", + }, + IsKey: true, + IsRequires: false, + }, + { + FetchID: 0, + Subgraph: "user.service", + Coordinate: resolve.GraphCoordinate{ + TypeName: "Account", + FieldName: "info", + }, + IsKey: true, + IsRequires: false, + }, + { + FetchID: 0, + Subgraph: "user.service", + Coordinate: resolve.GraphCoordinate{ + TypeName: "Info", + FieldName: "a", + }, + IsKey: true, + IsRequires: false, + }, + { + FetchID: 0, + Subgraph: "user.service", + Coordinate: resolve.GraphCoordinate{ + TypeName: "Info", + FieldName: "b", + }, + IsKey: true, + IsRequires: false, + }, + }, + }, + }, OperationType: ast.OperationTypeQuery, + ProvidesData: &resolve.Object{ + HasAliases: true, + Fields: []*resolve.Field{ + { + Name: []byte("name"), + OnTypeNames: [][]byte{[]byte("Account")}, + Value: &resolve.Scalar{ + Path: []string{"name"}, + }, + }, + { + Name: []byte("shippingInfo"), + OnTypeNames: [][]byte{[]byte("Account")}, + Value: &resolve.Object{ + Path: []string{"shippingInfo"}, + Nullable: true, + HasAliases: true, + Fields: []*resolve.Field{ + { + Name: []byte("z"), + OriginalName: []byte("zip"), + Value: &resolve.Scalar{ + Path: []string{"z"}, + }, + }, + }, + }, + }, + }, + }, }, DataSourceIdentifier: []byte("graphql_datasource.Source"), FetchConfiguration: resolve.FetchConfiguration{ - Input: `{"method":"POST","url":"http://account.service","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){... on Account {__typename name shippingInfo {zip}}}}","variables":{"representations":[$$0$$]}}}`, + Input: `{"method":"POST","url":"http://account.service","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){... on Account {__typename name shippingInfo {z: zip}}}}","variables":{"representations":[$$0$$]}}}`, DataSource: &Source{}, SetTemplateOutputToNullOnVariableNull: true, RequiresEntityFetch: true, @@ -1644,6 +1867,67 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, }, PostProcessing: SingleEntityPostProcessingConfiguration, + Caching: resolve.FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: time.Second * 30, + IncludeSubgraphHeaderPrefix: true, + UseL1Cache: false, // Set to false by postprocessor (no L1 benefit for this fetch) + KeyFields: []resolve.KeyField{ + {Name: "id"}, + { + Name: "info", + Children: []resolve.KeyField{ + {Name: "a"}, + {Name: "b"}, + }, + }, + }, + CacheKeyTemplate: &resolve.EntityQueryCacheKeyTemplate{ + TypeName: "Account", + Keys: resolve.NewResolvableObjectVariable(&resolve.Object{ + Nullable: true, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + OnTypeNames: [][]byte{[]byte("Account")}, + Value: &resolve.String{ + Path: []string{"__typename"}, + }, + }, + { + Name: []byte("id"), + OnTypeNames: [][]byte{[]byte("Account")}, + Value: &resolve.Scalar{ + Path: []string{"id"}, + }, + }, + { + Name: []byte("info"), + OnTypeNames: [][]byte{[]byte("Account")}, + Value: &resolve.Object{ + Path: []string{"info"}, + Nullable: true, + Fields: []*resolve.Field{ + { + Name: []byte("a"), + Value: &resolve.Scalar{ + Path: []string{"a"}, + }, + }, + { + Name: []byte("b"), + Value: &resolve.Scalar{ + Path: []string{"b"}, + }, + }, + }, + }, + }, + }, + }), + }, + }, }, }, "user.account", resolve.ObjectPath("user"), resolve.ObjectPath("account")), ), @@ -1672,6 +1956,11 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, TypeName: "User", SourceName: "user.service", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{ + {Name: "id"}, + }, + }, Fields: []*resolve.Field{ { Name: []byte("account"), @@ -1684,6 +1973,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { Names: []string{"user.service"}, }, ExactParentTypeName: "User", + CacheAnalyticsHash: true, }, Value: &resolve.Object{ Path: []string{"account"}, @@ -1693,7 +1983,33 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, TypeName: "Account", SourceName: "user.service", + CacheAnalytics: &resolve.ObjectCacheAnalytics{ + KeyFields: []resolve.KeyField{ + {Name: "id"}, + {Name: "info"}, + {Name: "{a"}, + {Name: "b}"}, + }, + }, Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Info: &resolve.FieldInfo{ + Name: "__typename", + NamedType: "String", + ParentTypeNames: []string{"Account"}, + Source: resolve.TypeFieldSource{ + IDs: []string{"user.service"}, + Names: []string{"user.service"}, + }, + ExactParentTypeName: "Account", + CacheAnalyticsHash: true, + }, + Value: &resolve.String{ + Path: []string{"__typename"}, + IsTypeName: true, + }, + }, { Name: []byte("name"), Info: &resolve.FieldInfo{ @@ -1705,6 +2021,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { Names: []string{"account.service"}, }, ExactParentTypeName: "Account", + CacheAnalyticsHash: true, }, Value: &resolve.String{ Path: []string{"name"}, @@ -1722,6 +2039,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, ExactParentTypeName: "Account", HasAuthorizationRule: true, + CacheAnalyticsHash: true, }, Value: &resolve.Object{ Path: []string{"shippingInfo"}, @@ -1733,7 +2051,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { SourceName: "account.service", Fields: []*resolve.Field{ { - Name: []byte("zip"), + Name: []byte("z"), Info: &resolve.FieldInfo{ Name: "zip", NamedType: "String", @@ -1745,7 +2063,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { ExactParentTypeName: "ShippingInfo", }, Value: &resolve.String{ - Path: []string{"zip"}, + Path: []string{"z"}, }, }, }, @@ -1761,7 +2079,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { }, }, }, - planConfiguration, WithFieldInfo(), WithDefaultPostProcessor())) + planConfiguration, WithFieldInfo(), WithDefaultPostProcessor(), WithFieldDependencies(), WithEntityCaching(), WithFetchProvidesData(), WithCacheKeyTemplates())) }) t.Run("composite keys variant", func(t *testing.T) { @@ -3622,6 +3940,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { Info: &resolve.FieldInfo{ Name: "account", ExactParentTypeName: "User", + CacheAnalyticsHash: true, ParentTypeNames: []string{"User"}, NamedType: "Account", Source: resolve.TypeFieldSource{ @@ -3643,6 +3962,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { Info: &resolve.FieldInfo{ Name: "address", ExactParentTypeName: "Account", + CacheAnalyticsHash: true, ParentTypeNames: []string{"Account"}, NamedType: "Address", Source: resolve.TypeFieldSource{ @@ -3667,6 +3987,7 @@ func TestGraphQLDataSourceFederation(t *testing.T) { Info: &resolve.FieldInfo{ Name: "fullAddress", ExactParentTypeName: "Address", + CacheAnalyticsHash: true, ParentTypeNames: []string{"Address"}, NamedType: "String", Source: resolve.TypeFieldSource{ diff --git a/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_test.go b/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_test.go index 05b07df9e1..1f23ffe759 100644 --- a/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_test.go +++ b/v2/pkg/engine/datasource/graphql_datasource/graphql_datasource_test.go @@ -394,6 +394,49 @@ func TestGraphQLDataSource(t *testing.T) { }, ), PostProcessing: DefaultPostProcessingConfiguration, + Caching: resolve.FetchCacheConfiguration{ + CacheKeyTemplate: &resolve.RootQueryCacheKeyTemplate{ + RootFields: []resolve.QueryField{ + { + Coordinate: resolve.GraphCoordinate{ + TypeName: "Query", + FieldName: "droid", + }, + Args: []resolve.FieldArgument{ + { + Name: "id", + Variable: &resolve.ContextVariable{ + Path: []string{"id"}, + Renderer: resolve.NewJSONVariableRenderer(), + }, + }, + }, + ResponseKey: "droid", + }, + { + Coordinate: resolve.GraphCoordinate{ + TypeName: "Query", + FieldName: "hero", + }, + ResponseKey: "hero", + }, + { + Coordinate: resolve.GraphCoordinate{ + TypeName: "Query", + FieldName: "stringList", + }, + ResponseKey: "stringList", + }, + { + Coordinate: resolve.GraphCoordinate{ + TypeName: "Query", + FieldName: "nestedStringList", + }, + ResponseKey: "nestedStringList", + }, + }, + }, + }, }, Info: &resolve.FetchInfo{ OperationType: ast.OperationTypeQuery, @@ -417,6 +460,103 @@ func TestGraphQLDataSource(t *testing.T) { FieldName: "nestedStringList", }, }, + ProvidesData: &resolve.Object{ + Nullable: false, + Path: []string{}, + HasAliases: true, + Fields: []*resolve.Field{ + { + Name: []byte("droid"), + Value: &resolve.Object{ + Nullable: true, + Path: []string{"droid"}, + HasAliases: true, + Fields: []*resolve.Field{ + { + Name: []byte("name"), + Value: &resolve.Scalar{ + Path: []string{"name"}, + Nullable: false, + }, + }, + { + Name: []byte("aliased"), + OriginalName: []byte("name"), + Value: &resolve.Scalar{ + Path: []string{"aliased"}, + Nullable: false, + }, + }, + { + Name: []byte("friends"), + Value: &resolve.Array{ + Path: []string{"friends"}, + Nullable: true, + Item: &resolve.Object{ + Nullable: true, + Path: []string{}, + Fields: []*resolve.Field{ + { + Name: []byte("name"), + Value: &resolve.Scalar{ + Path: []string{"name"}, + Nullable: false, + }, + }, + }, + }, + }, + }, + { + Name: []byte("primaryFunction"), + Value: &resolve.Scalar{ + Path: []string{"primaryFunction"}, + Nullable: false, + }, + }, + }, + }, + }, + { + Name: []byte("hero"), + Value: &resolve.Object{ + Nullable: true, + Path: []string{"hero"}, + Fields: []*resolve.Field{ + { + Name: []byte("name"), + Value: &resolve.Scalar{ + Path: []string{"name"}, + Nullable: false, + }, + }, + }, + }, + }, + { + Name: []byte("stringList"), + Value: &resolve.Array{ + Path: []string{"stringList"}, + Nullable: true, + Item: &resolve.Scalar{ + Path: []string{}, + Nullable: true, + }, + }, + }, + { + Name: []byte("nestedStringList"), + Value: &resolve.Array{ + Path: []string{"nestedStringList"}, + Nullable: true, + Item: &resolve.Scalar{ + Path: []string{}, + Nullable: true, + }, + }, + }, + }, + }, }, })), Info: &resolve.GraphQLResponseInfo{ @@ -681,7 +821,7 @@ func TestGraphQLDataSource(t *testing.T) { }, }, DisableResolveFieldPositions: true, - }, WithFieldInfo(), WithDefaultPostProcessor())) + }, WithFieldInfo(), WithDefaultPostProcessor(), WithFetchProvidesData(), WithEntityCaching(), WithCacheKeyTemplates())) t.Run("selections on interface type", RunTest(interfaceSelectionSchema, ` query MyQuery { @@ -8478,42 +8618,36 @@ type testSubscriptionUpdater struct { func (t *testSubscriptionUpdater) AwaitUpdates(tt *testing.T, timeout time.Duration, count int) { tt.Helper() - ticker := time.NewTicker(timeout) - defer ticker.Stop() + deadline := time.Now().Add(timeout) for { - time.Sleep(10 * time.Millisecond) - select { - case <-ticker.C: - tt.Fatalf("timed out waiting for updates") - default: - t.mux.Lock() - if len(t.updates) == count { - t.mux.Unlock() - return - } - t.mux.Unlock() + t.mux.Lock() + got := len(t.updates) + t.mux.Unlock() + if got == count { + return + } + if time.Now().After(deadline) { + tt.Fatalf("timed out waiting for updates: got %d, want %d", got, count) } + time.Sleep(10 * time.Millisecond) } } func (t *testSubscriptionUpdater) AwaitDone(tt *testing.T, timeout time.Duration) { tt.Helper() - ticker := time.NewTicker(timeout) - defer ticker.Stop() + deadline := time.Now().Add(timeout) for { - time.Sleep(10 * time.Millisecond) - select { - case <-ticker.C: + t.mux.Lock() + isDone := t.done + t.mux.Unlock() + if isDone { + return + } + if time.Now().After(deadline) { tt.Fatalf("timed out waiting for done") - default: - t.mux.Lock() - if t.done { - t.mux.Unlock() - return - } - t.mux.Unlock() } + time.Sleep(10 * time.Millisecond) } } diff --git a/v2/pkg/engine/datasource/graphql_datasource/graphql_sse_handler_test.go b/v2/pkg/engine/datasource/graphql_datasource/graphql_sse_handler_test.go index 4c50f19176..c5c8cdbeef 100644 --- a/v2/pkg/engine/datasource/graphql_datasource/graphql_sse_handler_test.go +++ b/v2/pkg/engine/datasource/graphql_datasource/graphql_sse_handler_test.go @@ -51,7 +51,7 @@ func TestGraphQLSubscriptionClientSubscribe_SSE(t *testing.T) { ctx, clientCancel := context.WithCancel(context.Background()) client := NewGraphQLSubscriptionClient(http.DefaultClient, http.DefaultClient, serverCtx, - WithReadTimeout(time.Millisecond), + WithReadTimeout(100*time.Millisecond), WithLogger(logger()), ) @@ -91,7 +91,7 @@ func TestGraphQLSubscriptionClientSubscribe_SSE_RequestAbort(t *testing.T) { clientCancel() client := NewGraphQLSubscriptionClient(http.DefaultClient, http.DefaultClient, t.Context(), - WithReadTimeout(time.Millisecond), + WithReadTimeout(100*time.Millisecond), WithLogger(logger()), ) @@ -157,7 +157,7 @@ func TestGraphQLSubscriptionClientSubscribe_SSE_POST(t *testing.T) { ctx, clientCancel := context.WithCancel(context.Background()) client := NewGraphQLSubscriptionClient(http.DefaultClient, http.DefaultClient, serverCtx, - WithReadTimeout(time.Millisecond), + WithReadTimeout(100*time.Millisecond), WithLogger(logger()), ) @@ -228,7 +228,7 @@ func TestGraphQLSubscriptionClientSubscribe_SSE_WithEvents(t *testing.T) { ctx, clientCancel := context.WithCancel(context.Background()) client := NewGraphQLSubscriptionClient(http.DefaultClient, http.DefaultClient, serverCtx, - WithReadTimeout(time.Millisecond), + WithReadTimeout(100*time.Millisecond), WithLogger(logger()), ) @@ -294,7 +294,7 @@ func TestGraphQLSubscriptionClientSubscribe_SSE_Error(t *testing.T) { ctx, clientCancel := context.WithCancel(context.Background()) client := NewGraphQLSubscriptionClient(http.DefaultClient, http.DefaultClient, serverCtx, - WithReadTimeout(time.Millisecond), + WithReadTimeout(100*time.Millisecond), WithLogger(logger()), ) @@ -397,7 +397,7 @@ func TestGraphQLSubscriptionClientSubscribe_SSE_Error_Without_Header(t *testing. ctx, clientCancel := context.WithCancel(context.Background()) client := NewGraphQLSubscriptionClient(http.DefaultClient, http.DefaultClient, serverCtx, - WithReadTimeout(time.Millisecond), + WithReadTimeout(100*time.Millisecond), WithLogger(logger()), ) @@ -466,7 +466,7 @@ func TestGraphQLSubscriptionClientSubscribe_QueryParams(t *testing.T) { ctx, clientCancel := context.WithCancel(context.Background()) client := NewGraphQLSubscriptionClient(http.DefaultClient, http.DefaultClient, serverCtx, - WithReadTimeout(time.Millisecond), + WithReadTimeout(100*time.Millisecond), WithLogger(logger()), ) @@ -607,7 +607,7 @@ func TestGraphQLSubscriptionClientSubscribe_SSE_Upstream_Dies(t *testing.T) { ctx, clientCancel := context.WithCancel(context.Background()) client := NewGraphQLSubscriptionClient(http.DefaultClient, http.DefaultClient, serverCtx, - WithReadTimeout(time.Millisecond), + WithReadTimeout(100*time.Millisecond), WithLogger(logger()), ) diff --git a/v2/pkg/engine/datasource/graphql_datasource/graphql_tws_handler_test.go b/v2/pkg/engine/datasource/graphql_datasource/graphql_tws_handler_test.go index cc69902198..638b6a739d 100644 --- a/v2/pkg/engine/datasource/graphql_datasource/graphql_tws_handler_test.go +++ b/v2/pkg/engine/datasource/graphql_datasource/graphql_tws_handler_test.go @@ -61,7 +61,7 @@ func TestWebsocketSubscriptionClient_GQLTWS(t *testing.T) { serverCtx, serverCancel := context.WithCancel(context.Background()) client := NewGraphQLSubscriptionClient(http.DefaultClient, http.DefaultClient, serverCtx, - WithReadTimeout(time.Millisecond), + WithReadTimeout(100*time.Millisecond), WithLogger(logger()), ).(*subscriptionClient) @@ -139,7 +139,7 @@ func TestWebsocketSubscriptionClientPing_GQLTWS(t *testing.T) { serverCtx, serverCancel := context.WithCancel(context.Background()) client := NewGraphQLSubscriptionClient(http.DefaultClient, http.DefaultClient, serverCtx, - WithReadTimeout(time.Millisecond), + WithReadTimeout(100*time.Millisecond), WithLogger(logger()), ).(*subscriptionClient) @@ -206,7 +206,7 @@ func TestWebsocketSubscriptionClientError_GQLTWS(t *testing.T) { clientCtx, clientCancel := context.WithCancel(context.Background()) client := NewGraphQLSubscriptionClient(http.DefaultClient, http.DefaultClient, serverCtx, - WithReadTimeout(time.Millisecond), + WithReadTimeout(100*time.Millisecond), WithLogger(logger()), ) diff --git a/v2/pkg/engine/datasource/graphql_datasource/graphql_ws_handler_test.go b/v2/pkg/engine/datasource/graphql_datasource/graphql_ws_handler_test.go index eddc47253c..7d3a843286 100644 --- a/v2/pkg/engine/datasource/graphql_datasource/graphql_ws_handler_test.go +++ b/v2/pkg/engine/datasource/graphql_datasource/graphql_ws_handler_test.go @@ -72,7 +72,7 @@ func TestWebSocketSubscriptionClientInitIncludeKA_GQLWS(t *testing.T) { defer serverCancel() client := NewGraphQLSubscriptionClient(http.DefaultClient, http.DefaultClient, serverCtx, - WithReadTimeout(time.Millisecond), + WithReadTimeout(100*time.Millisecond), WithLogger(logger()), ).(*subscriptionClient) updater := &testSubscriptionUpdater{} @@ -136,7 +136,7 @@ func TestWebsocketSubscriptionClient_GQLWS(t *testing.T) { defer serverCancel() client := NewGraphQLSubscriptionClient(http.DefaultClient, http.DefaultClient, serverCtx, - WithReadTimeout(time.Millisecond), + WithReadTimeout(100*time.Millisecond), WithLogger(logger()), ).(*subscriptionClient) updater := &testSubscriptionUpdater{} @@ -197,7 +197,7 @@ func TestWebsocketSubscriptionClientErrorArray(t *testing.T) { defer serverCancel() clientCtx, clientCancel := context.WithCancel(context.Background()) client := NewGraphQLSubscriptionClient(http.DefaultClient, http.DefaultClient, serverCtx, - WithReadTimeout(time.Millisecond), + WithReadTimeout(100*time.Millisecond), WithLogger(logger()), ) updater := &testSubscriptionUpdater{} @@ -254,7 +254,7 @@ func TestWebsocketSubscriptionClientErrorObject(t *testing.T) { defer serverCancel() clientCtx, clientCancel := context.WithCancel(context.Background()) client := NewGraphQLSubscriptionClient(http.DefaultClient, http.DefaultClient, serverCtx, - WithReadTimeout(time.Millisecond), + WithReadTimeout(100*time.Millisecond), WithLogger(logger()), ) updater := &testSubscriptionUpdater{} diff --git a/v2/pkg/engine/datasource/graphql_datasource/request_scoped_widening_test.go b/v2/pkg/engine/datasource/graphql_datasource/request_scoped_widening_test.go new file mode 100644 index 0000000000..129125c54d --- /dev/null +++ b/v2/pkg/engine/datasource/graphql_datasource/request_scoped_widening_test.go @@ -0,0 +1,1879 @@ +package graphql_datasource + +import ( + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/astnormalization" + "github.com/wundergraph/graphql-go-tools/v2/pkg/asttransform" + "github.com/wundergraph/graphql-go-tools/v2/pkg/astvalidation" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/postprocess" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" + "github.com/wundergraph/graphql-go-tools/v2/pkg/internal/unsafeparser" + "github.com/wundergraph/graphql-go-tools/v2/pkg/internal/unsafeprinter" + "github.com/wundergraph/graphql-go-tools/v2/pkg/operationreport" +) + +func TestRequestScopedWidening_ViewerSubgraphPlanning(t *testing.T) { + t.Parallel() + + t.Run("without requestScoped the root fetch stays narrow and the child fetch stays wide", func(t *testing.T) { + t.Parallel() + + actual := planViewerScenario(t, requestScopedScenario{ + enableRequestScoped: false, + operationSDL: ` + query Widening { + currentViewer { + id + name + } + article { + id + title + currentViewer { + id + name + email + } + } + } + `, + }) + + expected := expectedViewerScenario( + resolve.Sequence( + rootFetch(0, "http://viewer.service", ` + { + currentViewer { + id + name + } + } + `), + rootFetch(1, "http://articles.service", ` + { + article { + id + title + __typename + } + } + `), + entityFetch(2, 1, "article", "http://viewer.service", ` + query($representations: [_Any!]!) { + _entities(representations: $representations) { + ... on Article { + __typename + currentViewer { + id + name + email + } + } + } + } + `), + ), + rootObject( + field("currentViewer", viewerObject( + scalarField("id"), + stringField("name"), + )), + field("article", articleObject( + scalarField("id"), + stringField("title"), + field("currentViewer", viewerObject( + scalarField("id"), + stringField("name"), + stringField("email"), + )), + )), + ), + nil, + nil, + ) + + assert.Equal(t, expected, actual) + }) + + t.Run("with requestScoped the root fetch widens and both fetches share the same loader mapping", func(t *testing.T) { + t.Parallel() + + actual := planViewerScenario(t, requestScopedScenario{ + enableRequestScoped: true, + operationSDL: ` + query Widening { + currentViewer { + id + name + } + article { + id + title + currentViewer { + id + name + email + } + } + } + `, + }) + + expectedProvides := viewerProvides( + providesScalarField("id"), + providesScalarField("name"), + providesScalarField("email"), + ) + expected := expectedViewerScenario( + resolve.Sequence( + rootFetch(0, "http://viewer.service", ` + { + currentViewer { + id + name + email + } + } + `, requestScopedField("currentViewer", expectedProvides)), + rootFetch(1, "http://articles.service", ` + { + article { + id + title + __typename + } + } + `), + entityFetch(2, 1, "article", "http://viewer.service", ` + query($representations: [_Any!]!) { + _entities(representations: $representations) { + ... on Article { + __typename + currentViewer { + id + name + email + } + } + } + } + `, requestScopedField("currentViewer", expectedProvides)), + ), + rootObject( + field("currentViewer", viewerObject( + scalarField("id"), + stringField("name"), + )), + field("article", articleObject( + scalarField("id"), + stringField("title"), + field("currentViewer", viewerObject( + scalarField("id"), + stringField("name"), + stringField("email"), + )), + )), + ), + []plannedRequestScopedContract{ + contract(0, "currentViewer", "viewer.currentViewer", "id", "id"), + contract(0, "currentViewer", "viewer.currentViewer", "name", "name"), + contract(0, "currentViewer", "viewer.currentViewer", "email", "email"), + contract(2, "article.currentViewer", "viewer.currentViewer", "id", "id"), + contract(2, "article.currentViewer", "viewer.currentViewer", "name", "name"), + contract(2, "article.currentViewer", "viewer.currentViewer", "email", "email"), + }, + []plannedResponseBinding{ + binding("currentViewer.id", "viewer.currentViewer", "id"), + binding("currentViewer.name", "viewer.currentViewer", "name"), + binding("article.currentViewer.id", "viewer.currentViewer", "id"), + binding("article.currentViewer.name", "viewer.currentViewer", "name"), + binding("article.currentViewer.email", "viewer.currentViewer", "email"), + }, + ) + + assert.Equal(t, expected, actual) + }) + + t.Run("field conflicts use synthetic aliases in the subgraph fetches while the response tree stays user-shaped", func(t *testing.T) { + t.Parallel() + + actual := planViewerScenario(t, requestScopedScenario{ + enableRequestScoped: true, + operationSDL: ` + query Widening { + currentViewer { + id + name + } + article { + id + title + currentViewer { + id + name: email + } + } + } + `, + }) + + expected := expectedViewerScenario( + resolve.Sequence( + rootFetch(0, "http://viewer.service", ` + { + currentViewer { + id + __request_scoped__name_1: name + __request_scoped__name_0: email + } + } + `, + requestScopedField("currentViewer", viewerProvides( + providesScalarField("id"), + providesAliasedScalarField("__request_scoped__name_1", "name"), + providesAliasedScalarField("__request_scoped__name_0", "email"), + )), + ), + rootFetch(1, "http://articles.service", ` + { + article { + id + title + __typename + } + } + `), + entityFetch(2, 1, "article", "http://viewer.service", ` + query($representations: [_Any!]!) { + _entities(representations: $representations) { + ... on Article { + __typename + currentViewer { + id + __request_scoped__name_0: email + __request_scoped__name_1: name + } + } + } + } + `, + requestScopedField("currentViewer", viewerProvides( + providesScalarField("id"), + providesAliasedScalarField("__request_scoped__name_0", "email"), + providesAliasedScalarField("__request_scoped__name_1", "name"), + )), + ), + ), + rootObject( + field("currentViewer", viewerObject( + scalarField("id"), + stringFieldAt("name", "__request_scoped__name_1"), + )), + field("article", articleObject( + scalarField("id"), + stringField("title"), + field("currentViewer", viewerObject( + scalarField("id"), + aliasedStringFieldAt("name", "email", "__request_scoped__name_0"), + )), + )), + ), + []plannedRequestScopedContract{ + contract(0, "currentViewer", "viewer.currentViewer", "id", "id"), + contract(0, "currentViewer", "viewer.currentViewer", "__request_scoped__name_1", "name"), + contract(0, "currentViewer", "viewer.currentViewer", "__request_scoped__name_0", "email"), + contract(2, "article.currentViewer", "viewer.currentViewer", "id", "id"), + contract(2, "article.currentViewer", "viewer.currentViewer", "__request_scoped__name_0", "email"), + contract(2, "article.currentViewer", "viewer.currentViewer", "__request_scoped__name_1", "name"), + }, + []plannedResponseBinding{ + binding("currentViewer.id", "viewer.currentViewer", "id"), + binding("currentViewer.name", "viewer.currentViewer", "__request_scoped__name_1"), + binding("article.currentViewer.id", "viewer.currentViewer", "id"), + binding("article.currentViewer.name", "viewer.currentViewer", "__request_scoped__name_0"), + }, + ) + + assert.Equal(t, expected, actual) + }) + + t.Run("argument conflicts use synthetic aliases in fetches and cache-arg mappings in requestScoped provides data", func(t *testing.T) { + t.Parallel() + + actual := planViewerScenario(t, requestScopedScenario{ + enableRequestScoped: true, + operationSDL: ` + query Widening { + currentViewer { + id + posts(first: 1) { + id + } + } + article { + id + title + currentViewer { + id + posts(first: 2) { + id + title + } + } + } + } + `, + }) + + expected := expectedViewerScenario( + resolve.Sequence( + rootFetch(0, "http://viewer.service", ` + query($a: Int!, $b: Int!) { + currentViewer { + id + __request_scoped__posts_0: posts(first: $a) { + id + } + __request_scoped__posts_1: posts(first: $b) { + id + title + } + } + } + `, + requestScopedField("currentViewer", viewerProvides( + providesScalarField("id"), + providesArrayField("__request_scoped__posts_0", "posts", "a", + postItemProvides( + providesScalarField("id"), + ), + ), + providesArrayField("__request_scoped__posts_1", "posts", "b", + postItemProvides( + providesScalarField("id"), + providesScalarField("title"), + ), + ), + )), + ), + rootFetch(1, "http://articles.service", ` + { + article { + id + title + __typename + } + } + `), + entityFetch(2, 1, "article", "http://viewer.service", ` + query($representations: [_Any!]!, $b: Int!, $a: Int!) { + _entities(representations: $representations) { + ... on Article { + __typename + currentViewer { + id + __request_scoped__posts_1: posts(first: $b) { + id + title + } + __request_scoped__posts_0: posts(first: $a) { + id + } + } + } + } + } + `, + requestScopedField("currentViewer", viewerProvides( + providesScalarField("id"), + providesArrayField("__request_scoped__posts_1", "posts", "b", + postItemProvides( + providesScalarField("id"), + providesScalarField("title"), + ), + ), + providesArrayField("__request_scoped__posts_0", "posts", "a", + postItemProvides( + providesScalarField("id"), + ), + ), + )), + ), + ), + rootObject( + field("currentViewer", viewerObject( + scalarField("id"), + postsDataFieldAt("__request_scoped__posts_0", + postItem( + scalarField("id"), + ), + ), + )), + field("article", articleObject( + scalarField("id"), + stringField("title"), + field("currentViewer", viewerObject( + scalarField("id"), + postsDataFieldAt("__request_scoped__posts_1", + postItem( + scalarField("id"), + stringField("title"), + ), + ), + )), + )), + ), + []plannedRequestScopedContract{ + contract(0, "currentViewer", "viewer.currentViewer", "id", "id"), + contract(0, "currentViewer", "viewer.currentViewer", "__request_scoped__posts_0", "posts", "first:a"), + contract(0, "currentViewer", "viewer.currentViewer", "__request_scoped__posts_1", "posts", "first:b"), + contract(2, "article.currentViewer", "viewer.currentViewer", "id", "id"), + contract(2, "article.currentViewer", "viewer.currentViewer", "__request_scoped__posts_1", "posts", "first:b"), + contract(2, "article.currentViewer", "viewer.currentViewer", "__request_scoped__posts_0", "posts", "first:a"), + }, + []plannedResponseBinding{ + binding("currentViewer.id", "viewer.currentViewer", "id"), + binding("currentViewer.posts", "viewer.currentViewer", "__request_scoped__posts_0"), + binding("article.currentViewer.id", "viewer.currentViewer", "id"), + binding("article.currentViewer.posts", "viewer.currentViewer", "__request_scoped__posts_1"), + }, + ) + + assert.Equal(t, expected, actual) + }) + + t.Run("requires-decorated fields widen through an aliased dependency without changing the user response", func(t *testing.T) { + t.Parallel() + + // The root participant exposes name through a user alias, while a downstream + // handle field on another subgraph requires the schema field name `name`. + // Widening must preserve the user alias at the root while still planning the + // hidden dependency fields needed for the later entity fetch. + actual := planRequestScopedRequiresChainViewerScenario(t, true, ` + query Widening { + currentViewer { + viewerName: name + } + article { + id + title + currentViewer { + handle + } + } + } + `) + rootExpectedProvides := viewerProvides( + providesAliasedScalarField("viewerName", "name"), + providesScalarField("__typename"), + providesScalarField("id"), + ) + entityExpectedProvides := viewerProvides( + providesScalarField("name"), + providesScalarField("__typename"), + providesScalarField("id"), + ) + expected := expectedViewerScenario( + // The root viewer fetch is widened with the hidden fields that the later + // handle entity fetch will need, but the response object still keeps only + // the user-visible alias at the root. + resolve.Sequence( + rootFetch(0, "http://viewer.service", ` + { + currentViewer { + viewerName: name + __typename + id + } + } + `, requestScopedField("currentViewer", rootExpectedProvides)), + rootFetch(1, "http://articles.service", ` + { + article { + id + title + __typename + } + } + `), + entityFetch(2, 1, "article", "http://viewer.service", ` + query($representations: [_Any!]!) { + _entities(representations: $representations) { + ... on Article { + __typename + currentViewer { + name + __typename + id + } + } + } + } + `, requestScopedField("currentViewer", entityExpectedProvides)), + entityFetch(3, 2, "article.currentViewer", "http://handles.service", ` + query($representations: [_Any!]!) { + _entities(representations: $representations) { + ... on Viewer { + __typename + handle + } + } + } + `), + ), + rootObject( + field("currentViewer", viewerObject( + stringFieldAt("viewerName", "viewerName"), + )), + field("article", articleObject( + scalarField("id"), + stringField("title"), + field("currentViewer", viewerObject( + stringField("handle"), + )), + )), + ), + []plannedRequestScopedContract{ + contract(0, "currentViewer", "viewer.currentViewer", "__typename", "__typename"), + contract(0, "currentViewer", "viewer.currentViewer", "viewerName", "name"), + contract(0, "currentViewer", "viewer.currentViewer", "id", "id"), + contract(2, "article.currentViewer", "viewer.currentViewer", "__typename", "__typename"), + contract(2, "article.currentViewer", "viewer.currentViewer", "id", "id"), + contract(2, "article.currentViewer", "viewer.currentViewer", "name", "name"), + }, + []plannedResponseBinding{ + binding("currentViewer.viewerName", "viewer.currentViewer", "viewerName"), + binding("article.currentViewer.handle", "viewer.currentViewer", "handle"), + }, + ) + + assert.Equal(t, expected, actual) + }) + + t.Run("requires-decorated field rewrites the first participant to include the hidden dependency", func(t *testing.T) { + t.Parallel() + + // The first participant only asks for id, but the second participant asks for + // handle on another subgraph, which requires `name` as an external field. + // Widening therefore has to rewrite the first fetch to include the hidden + // dependency field `name` even though the user did not ask for it there. + actual := planRequestScopedRequiresChainViewerScenario(t, true, ` + query Widening { + currentViewer { + id + } + article { + id + title + currentViewer { + handle + } + } + } + `) + + rootExpectedProvides := viewerProvides( + providesScalarField("id"), + providesScalarField("__typename"), + providesScalarField("name"), + ) + entityExpectedProvides := viewerProvides( + providesScalarField("name"), + providesScalarField("__typename"), + providesScalarField("id"), + ) + expected := expectedViewerScenario( + // The widened root fetch now carries id, __typename, and the hidden name + // dependency so the later handles subgraph can be fed without a viewer hop. + resolve.Sequence( + rootFetch(0, "http://viewer.service", ` + { + currentViewer { + id + __typename + name + } + } + `, requestScopedField("currentViewer", rootExpectedProvides)), + rootFetch(1, "http://articles.service", ` + { + article { + id + title + __typename + } + } + `), + entityFetch(2, 1, "article", "http://viewer.service", ` + query($representations: [_Any!]!) { + _entities(representations: $representations) { + ... on Article { + __typename + currentViewer { + name + __typename + id + } + } + } + } + `, requestScopedField("currentViewer", entityExpectedProvides)), + entityFetch(3, 2, "article.currentViewer", "http://handles.service", ` + query($representations: [_Any!]!) { + _entities(representations: $representations) { + ... on Viewer { + __typename + handle + } + } + } + `), + ), + rootObject( + field("currentViewer", viewerObject( + scalarField("id"), + )), + field("article", articleObject( + scalarField("id"), + stringField("title"), + field("currentViewer", viewerObject( + stringField("handle"), + )), + )), + ), + []plannedRequestScopedContract{ + contract(0, "currentViewer", "viewer.currentViewer", "__typename", "__typename"), + contract(0, "currentViewer", "viewer.currentViewer", "id", "id"), + contract(0, "currentViewer", "viewer.currentViewer", "name", "name"), + contract(2, "article.currentViewer", "viewer.currentViewer", "__typename", "__typename"), + contract(2, "article.currentViewer", "viewer.currentViewer", "id", "id"), + contract(2, "article.currentViewer", "viewer.currentViewer", "name", "name"), + }, + []plannedResponseBinding{ + binding("currentViewer.id", "viewer.currentViewer", "id"), + binding("article.currentViewer.handle", "viewer.currentViewer", "handle"), + }, + ) + + assert.Equal(t, expected, actual) + }) + + t.Run("three requestScoped participants widen to a common superset while keeping the user response unchanged", func(t *testing.T) { + t.Parallel() + + actual := planViewerScenario(t, requestScopedScenario{ + enableRequestScoped: true, + operationSDL: ` + query Widening { + currentViewer { + id + } + article { + id + title + currentViewer { + id + name + } + } + review { + id + body + currentViewer { + id + name + email + } + } + } + `, + }) + + expectedProvides := viewerProvides( + providesScalarField("id"), + providesScalarField("email"), + providesScalarField("name"), + ) + expected := expectedViewerScenario( + resolve.Sequence( + rootFetch(0, "http://viewer.service", ` + { + currentViewer { + id + email + name + } + } + `, requestScopedField("currentViewer", expectedProvides)), + rootFetch(1, "http://articles.service", ` + { + article { + id + title + __typename + } + } + `), + rootFetch(3, "http://reviews.service", ` + { + review { + id + body + __typename + } + } + `), + entityFetch(2, 1, "article", "http://viewer.service", ` + query($representations: [_Any!]!) { + _entities(representations: $representations) { + ... on Article { + __typename + currentViewer { + id + name + email + } + } + } + } + `, requestScopedField("currentViewer", viewerProvides( + providesScalarField("id"), + providesScalarField("name"), + providesScalarField("email"), + ))), + entityFetch(4, 3, "review", "http://viewer.service", ` + query($representations: [_Any!]!) { + _entities(representations: $representations) { + ... on Review { + __typename + currentViewer { + id + name + email + } + } + } + } + `, requestScopedField("currentViewer", viewerProvides( + providesScalarField("id"), + providesScalarField("name"), + providesScalarField("email"), + ))), + ), + rootObject( + field("currentViewer", viewerObject( + scalarField("id"), + )), + field("article", articleObject( + scalarField("id"), + stringField("title"), + field("currentViewer", viewerObject( + scalarField("id"), + stringField("name"), + )), + )), + field("review", reviewObject( + scalarField("id"), + stringField("body"), + field("currentViewer", viewerObject( + scalarField("id"), + stringField("name"), + stringField("email"), + )), + )), + ), + []plannedRequestScopedContract{ + contract(0, "currentViewer", "viewer.currentViewer", "id", "id"), + contract(0, "currentViewer", "viewer.currentViewer", "email", "email"), + contract(0, "currentViewer", "viewer.currentViewer", "name", "name"), + contract(2, "article.currentViewer", "viewer.currentViewer", "id", "id"), + contract(2, "article.currentViewer", "viewer.currentViewer", "name", "name"), + contract(2, "article.currentViewer", "viewer.currentViewer", "email", "email"), + contract(4, "review.currentViewer", "viewer.currentViewer", "id", "id"), + contract(4, "review.currentViewer", "viewer.currentViewer", "name", "name"), + contract(4, "review.currentViewer", "viewer.currentViewer", "email", "email"), + }, + []plannedResponseBinding{ + binding("currentViewer.id", "viewer.currentViewer", "id"), + binding("article.currentViewer.id", "viewer.currentViewer", "id"), + binding("article.currentViewer.name", "viewer.currentViewer", "name"), + binding("review.currentViewer.id", "viewer.currentViewer", "id"), + binding("review.currentViewer.name", "viewer.currentViewer", "name"), + binding("review.currentViewer.email", "viewer.currentViewer", "email"), + }, + ) + + assert.Equal(t, expected, actual) + }) +} + +type requestScopedScenario struct { + enableRequestScoped bool + operationSDL string +} + +type plannedViewerScenario struct { + Plan *plan.SynchronousResponsePlan + RequestScoped []plannedRequestScopedContract + ResponseBindings []plannedResponseBinding +} + +type plannedRequestScopedContract struct { + FetchID int + ResponsePath string + L1Key string + RequestScopedKey string + SchemaField string + CacheArgs []string +} + +type plannedResponseBinding struct { + ResponsePath string + L1Key string + CacheKey string +} + +func planViewerScenario(t *testing.T, scenario requestScopedScenario) plannedViewerScenario { + t.Helper() + + planned := planRequestScopedWideningScenario(t, scenario.enableRequestScoped, scenario.operationSDL) + return postprocessViewerScenario(t, planned) +} + +func planRequestScopedRequiresChainViewerScenario(t *testing.T, enableRequestScoped bool, operationSDL string) plannedViewerScenario { + t.Helper() + + planned := planRequestScopedRequiresChainScenario(t, enableRequestScoped, operationSDL) + return postprocessViewerScenario(t, planned) +} + +func postprocessViewerScenario(t *testing.T, planned plan.Plan) plannedViewerScenario { + t.Helper() + + processor := postprocess.NewProcessor( + postprocess.DisableResolveInputTemplates(), + postprocess.DisableCreateConcreteSingleFetchTypes(), + postprocess.DisableCreateParallelNodes(), + postprocess.DisableMergeFields(), + ) + processor.Process(planned) + + syncPlan, ok := planned.(*plan.SynchronousResponsePlan) + require.True(t, ok) + require.NotNil(t, syncPlan.Response) + require.NotNil(t, syncPlan.Response.Fetches) + require.NotNil(t, syncPlan.Response.Data) + + return projectViewerScenario(t, syncPlan) +} + +func expectedViewerPlan(fetches *resolve.FetchTreeNode, data *resolve.Object) *plan.SynchronousResponsePlan { + return &plan.SynchronousResponsePlan{ + Response: &resolve.GraphQLResponse{ + Fetches: fetches, + Data: data, + }, + } +} + +func expectedViewerScenario(fetches *resolve.FetchTreeNode, data *resolve.Object, requestScoped []plannedRequestScopedContract, responseBindings []plannedResponseBinding) plannedViewerScenario { + sortRequestScopedContracts(requestScoped) + sortResponseBindings(responseBindings) + return plannedViewerScenario{ + Plan: expectedViewerPlan(fetches, data), + RequestScoped: requestScoped, + ResponseBindings: responseBindings, + } +} + +func projectViewerScenario(t *testing.T, syncPlan *plan.SynchronousResponsePlan) plannedViewerScenario { + t.Helper() + + plan := &plan.SynchronousResponsePlan{ + Response: &resolve.GraphQLResponse{ + Fetches: normalizeFetchTree(t, syncPlan.Response.Fetches), + Data: normalizeObject(syncPlan.Response.Data), + }, + } + requestScoped := collectRequestScopedContracts(plan.Response.Fetches) + responseBindings := collectResponseBindings(plan.Response.Fetches, plan.Response.Data) + sortRequestScopedContracts(requestScoped) + sortResponseBindings(responseBindings) + return plannedViewerScenario{ + Plan: plan, + RequestScoped: requestScoped, + ResponseBindings: responseBindings, + } +} + +func objectAtPath(obj *resolve.Object, path []string) *resolve.Object { + current := obj + for _, segment := range path { + if current == nil { + return nil + } + + var next resolve.Node + for _, field := range current.Fields { + if string(field.Name) == segment { + next = field.Value + break + } + } + if next == nil { + return nil + } + + switch typed := next.(type) { + case *resolve.Object: + current = typed + default: + return nil + } + } + return current +} + +func collectRequestScopedContracts(fetchTree *resolve.FetchTreeNode) []plannedRequestScopedContract { + var out []plannedRequestScopedContract + walkFetchTree(fetchTree, func(fetch *resolve.SingleFetch, responsePath string) { + for _, field := range fetch.Caching.RequestScopedFields { + objectPath := joinPath(responsePath, strings.Join(field.FieldPath, ".")) + for _, providedField := range field.ProvidesData.Fields { + out = append(out, plannedRequestScopedContract{ + FetchID: fetch.FetchID, + ResponsePath: objectPath, + L1Key: field.L1Key, + RequestScopedKey: string(providedField.Name), + SchemaField: providedField.SchemaFieldName(), + CacheArgs: cacheArgsStrings(providedField.CacheArgs), + }) + } + } + }) + return out +} + +func collectResponseBindings(fetchTree *resolve.FetchTreeNode, data *resolve.Object) []plannedResponseBinding { + var out []plannedResponseBinding + walkFetchTree(fetchTree, func(fetch *resolve.SingleFetch, responsePath string) { + for _, field := range fetch.Caching.RequestScopedFields { + objectPath := joinPath(responsePath, strings.Join(field.FieldPath, ".")) + responseObj := objectAtPath(data, strings.Split(objectPath, ".")) + if responseObj == nil { + continue + } + for _, responseField := range responseObj.Fields { + nodePath := responseField.Value.NodePath() + if len(nodePath) == 0 { + continue + } + out = append(out, plannedResponseBinding{ + ResponsePath: joinPath(objectPath, string(responseField.Name)), + L1Key: field.L1Key, + CacheKey: nodePath[0], + }) + } + } + }) + return out +} + +func walkFetchTree(node *resolve.FetchTreeNode, visit func(fetch *resolve.SingleFetch, responsePath string)) { + if node == nil { + return + } + if node.Item != nil { + if fetch, ok := node.Item.Fetch.(*resolve.SingleFetch); ok { + visit(fetch, node.Item.ResponsePath) + } + } + for _, child := range node.ChildNodes { + walkFetchTree(child, visit) + } +} + +func joinPath(parts ...string) string { + out := make([]string, 0, len(parts)) + for _, part := range parts { + if part == "" { + continue + } + out = append(out, part) + } + return strings.Join(out, ".") +} + +func cacheArgsStrings(args []resolve.CacheFieldArg) []string { + if len(args) == 0 { + return nil + } + out := make([]string, 0, len(args)) + for _, arg := range args { + out = append(out, fmt.Sprintf("%s:%s", arg.ArgName, arg.VariableName)) + } + return out +} + +func sortRequestScopedContracts(contracts []plannedRequestScopedContract) { + sort.Slice(contracts, func(i, j int) bool { + if contracts[i].FetchID != contracts[j].FetchID { + return contracts[i].FetchID < contracts[j].FetchID + } + if contracts[i].ResponsePath != contracts[j].ResponsePath { + return contracts[i].ResponsePath < contracts[j].ResponsePath + } + return contracts[i].RequestScopedKey < contracts[j].RequestScopedKey + }) +} + +func sortResponseBindings(bindings []plannedResponseBinding) { + sort.Slice(bindings, func(i, j int) bool { + return bindings[i].ResponsePath < bindings[j].ResponsePath + }) +} + +func normalizeFetchTree(t *testing.T, node *resolve.FetchTreeNode) *resolve.FetchTreeNode { + t.Helper() + + if node == nil { + return nil + } + + out := &resolve.FetchTreeNode{ + Kind: node.Kind, + } + if node.Item != nil { + singleFetch, ok := node.Item.Fetch.(*resolve.SingleFetch) + require.True(t, ok, "expected *resolve.SingleFetch, got %T", node.Item.Fetch) + item := &resolve.FetchItem{ + Fetch: normalizeSingleFetch(t, singleFetch), + ResponsePath: node.Item.ResponsePath, + ResponsePathElements: append([]string(nil), node.Item.ResponsePathElements...), + } + if len(node.Item.FetchPath) > 0 { + item.FetchPath = append([]resolve.FetchItemPathElement(nil), node.Item.FetchPath...) + } + out.Item = item + } + if len(node.ChildNodes) > 0 { + out.ChildNodes = make([]*resolve.FetchTreeNode, 0, len(node.ChildNodes)) + for _, child := range node.ChildNodes { + out.ChildNodes = append(out.ChildNodes, normalizeFetchTree(t, child)) + } + } + return out +} + +func normalizeSingleFetch(t *testing.T, fetch *resolve.SingleFetch) *resolve.SingleFetch { + t.Helper() + + return &resolve.SingleFetch{ + FetchDependencies: resolve.FetchDependencies{ + FetchID: fetch.FetchID, + DependsOnFetchIDs: append([]int(nil), fetch.DependsOnFetchIDs...), + }, + DataSourceIdentifier: append([]byte(nil), fetch.DataSourceIdentifier...), + FetchConfiguration: resolve.FetchConfiguration{ + Input: normalizeFetchInput(t, fetch.Input), + DataSource: &Source{}, + RequiresEntityFetch: fetch.RequiresEntityFetch, + RequiresEntityBatchFetch: fetch.RequiresEntityBatchFetch, + PostProcessing: fetch.PostProcessing, + SetTemplateOutputToNullOnVariableNull: fetch.SetTemplateOutputToNullOnVariableNull, + Caching: resolve.FetchCacheConfiguration{ + RequestScopedFields: normalizeRequestScopedFields(fetch.Caching.RequestScopedFields), + }, + }, + } +} + +func normalizeRequestScopedFields(fields []resolve.RequestScopedField) []resolve.RequestScopedField { + if len(fields) == 0 { + return nil + } + out := make([]resolve.RequestScopedField, 0, len(fields)) + for _, field := range fields { + out = append(out, resolve.RequestScopedField{ + FieldName: field.FieldName, + FieldPath: append([]string(nil), field.FieldPath...), + L1Key: field.L1Key, + ProvidesData: normalizeObject(field.ProvidesData), + }) + } + return out +} + +func normalizeObject(obj *resolve.Object) *resolve.Object { + if obj == nil { + return nil + } + fields := make([]*resolve.Field, 0, len(obj.Fields)) + for _, field := range obj.Fields { + fields = append(fields, normalizeField(field)) + } + return &resolve.Object{ + Nullable: obj.Nullable, + Path: append([]string(nil), obj.Path...), + Fields: fields, + HasAliases: obj.HasAliases, + } +} + +func normalizeField(field *resolve.Field) *resolve.Field { + if field == nil { + return nil + } + out := &resolve.Field{ + Name: append([]byte(nil), field.Name...), + Value: normalizeNode(field.Value), + CacheArgs: append([]resolve.CacheFieldArg(nil), field.CacheArgs...), + } + if field.OriginalName != nil { + out.OriginalName = append([]byte(nil), field.OriginalName...) + } + return out +} + +func normalizeNode(node resolve.Node) resolve.Node { + switch n := node.(type) { + case *resolve.Object: + return normalizeObject(n) + case *resolve.Array: + return &resolve.Array{ + Path: append([]string(nil), n.Path...), + Nullable: n.Nullable, + Item: normalizeNode(n.Item), + } + case *resolve.String: + return &resolve.String{ + Path: append([]string(nil), n.Path...), + Nullable: n.Nullable, + } + case *resolve.Scalar: + return &resolve.Scalar{ + Path: append([]string(nil), n.Path...), + Nullable: n.Nullable, + } + case *resolve.Integer: + return &resolve.Integer{ + Path: append([]string(nil), n.Path...), + Nullable: n.Nullable, + } + case *resolve.Float: + return &resolve.Float{ + Path: append([]string(nil), n.Path...), + Nullable: n.Nullable, + } + case *resolve.Boolean: + return &resolve.Boolean{ + Path: append([]string(nil), n.Path...), + Nullable: n.Nullable, + } + case *resolve.BigInt: + return &resolve.BigInt{ + Path: append([]string(nil), n.Path...), + Nullable: n.Nullable, + } + case *resolve.StaticString: + return &resolve.StaticString{ + Path: n.Path, + } + default: + panic(fmt.Sprintf("unsupported resolve node type %T", node)) + } +} + +func normalizeFetchInput(t *testing.T, input string) string { + t.Helper() + + url := extractFetchInputField(t, input, "url") + query := extractQueryFromFetchInput(t, input) + + return graphqlInput(url, query) +} + +func extractFetchInputField(t *testing.T, input, key string) string { + t.Helper() + + match := regexp.MustCompile(`"` + regexp.QuoteMeta(key) + `":"((?:\\.|[^"])*)"`).FindStringSubmatch(input) + require.Len(t, match, 2, input) + + value, err := strconv.Unquote(`"` + match[1] + `"`) + require.NoError(t, err) + + return value +} + +func extractQueryFromFetchInput(t *testing.T, input string) string { + t.Helper() + + match := regexp.MustCompile(`"query":"((?:\\.|[^"])*)"`).FindStringSubmatch(input) + require.Len(t, match, 2, input) + + query, err := strconv.Unquote(`"` + match[1] + `"`) + require.NoError(t, err) + require.NotEmpty(t, query) + + return query +} + +func graphqlInput(url, query string) string { + return fmt.Sprintf( + `{"method":"POST","url":%s,"body":{"query":%s}}`, + strconv.Quote(url), + strconv.Quote(unsafeprinter.Prettify(query)), + ) +} + +func rootFetch(fetchID int, url, query string, requestScopedFields ...resolve.RequestScopedField) *resolve.FetchTreeNode { + return resolve.Single(&resolve.SingleFetch{ + FetchDependencies: resolve.FetchDependencies{ + FetchID: fetchID, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + FetchConfiguration: resolve.FetchConfiguration{ + Input: graphqlInput(url, query), + DataSource: &Source{}, + PostProcessing: DefaultPostProcessingConfiguration, + Caching: resolve.FetchCacheConfiguration{ + RequestScopedFields: requestScopedFields, + }, + }, + }) +} + +func entityFetch(fetchID int, dependsOnFetchID int, responsePath, url, query string, requestScopedFields ...resolve.RequestScopedField) *resolve.FetchTreeNode { + return resolve.SingleWithPath(&resolve.SingleFetch{ + FetchDependencies: resolve.FetchDependencies{ + FetchID: fetchID, + DependsOnFetchIDs: []int{dependsOnFetchID}, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + FetchConfiguration: resolve.FetchConfiguration{ + Input: graphqlInput(url, query), + DataSource: &Source{}, + RequiresEntityFetch: true, + PostProcessing: SingleEntityPostProcessingConfiguration, + SetTemplateOutputToNullOnVariableNull: true, + Caching: resolve.FetchCacheConfiguration{ + RequestScopedFields: requestScopedFields, + }, + }, + }, responsePath, entityFetchPath(responsePath)...) +} + +func entityFetchPath(responsePath string) []resolve.FetchItemPathElement { + if responsePath == "" { + return nil + } + + segments := strings.Split(responsePath, ".") + path := make([]resolve.FetchItemPathElement, 0, len(segments)) + for _, segment := range segments { + path = append(path, resolve.ObjectPath(segment)) + } + return path +} + +func requestScopedField(fieldName string, providesData *resolve.Object) resolve.RequestScopedField { + return resolve.RequestScopedField{ + FieldName: fieldName, + FieldPath: []string{fieldName}, + L1Key: "viewer.currentViewer", + ProvidesData: providesData, + } +} + +func rootObject(fields ...*resolve.Field) *resolve.Object { + return &resolve.Object{ + Fields: fields, + } +} + +func viewerObject(fields ...*resolve.Field) *resolve.Object { + return &resolve.Object{ + Nullable: true, + Path: []string{"currentViewer"}, + Fields: fields, + } +} + +func articleObject(fields ...*resolve.Field) *resolve.Object { + return &resolve.Object{ + Path: []string{"article"}, + Fields: fields, + } +} + +func reviewObject(fields ...*resolve.Field) *resolve.Object { + return &resolve.Object{ + Path: []string{"review"}, + Fields: fields, + } +} + +func field(name string, value resolve.Node) *resolve.Field { + return &resolve.Field{ + Name: []byte(name), + Value: value, + } +} + +func scalarField(name string) *resolve.Field { + return scalarFieldAt(name, name) +} + +func scalarFieldAt(name, path string) *resolve.Field { + return &resolve.Field{ + Name: []byte(name), + Value: &resolve.Scalar{ + Path: []string{path}, + }, + } +} + +func stringField(name string) *resolve.Field { + return stringFieldAt(name, name) +} + +func stringFieldAt(name, path string) *resolve.Field { + return &resolve.Field{ + Name: []byte(name), + Value: &resolve.String{ + Path: []string{path}, + }, + } +} + +func aliasedStringFieldAt(name, originalName, path string) *resolve.Field { + return &resolve.Field{ + Name: []byte(name), + OriginalName: []byte(originalName), + Value: &resolve.String{ + Path: []string{path}, + }, + } +} + +func postsDataField(item *resolve.Object) *resolve.Field { + return postsDataFieldAt("posts", item) +} + +func postsDataFieldAt(path string, item *resolve.Object) *resolve.Field { + return &resolve.Field{ + Name: []byte("posts"), + Value: &resolve.Array{ + Path: []string{path}, + Item: item, + }, + } +} + +func postItem(fields ...*resolve.Field) *resolve.Object { + return &resolve.Object{ + Fields: fields, + } +} + +func viewerProvides(fields ...*resolve.Field) *resolve.Object { + obj := &resolve.Object{ + Nullable: true, + Path: []string{"currentViewer"}, + Fields: fields, + } + resolve.ComputeHasAliases(obj) + return obj +} + +func postItemProvides(fields ...*resolve.Field) *resolve.Object { + obj := &resolve.Object{ + Fields: fields, + } + resolve.ComputeHasAliases(obj) + return obj +} + +func providesScalarField(name string) *resolve.Field { + return &resolve.Field{ + Name: []byte(name), + Value: &resolve.Scalar{ + Path: []string{name}, + }, + } +} + +func providesAliasedScalarField(name, originalName string) *resolve.Field { + return &resolve.Field{ + Name: []byte(name), + OriginalName: []byte(originalName), + Value: &resolve.Scalar{ + Path: []string{name}, + }, + } +} + +func providesArrayField(name, originalName, variableName string, item *resolve.Object) *resolve.Field { + field := &resolve.Field{ + Name: []byte(name), + Value: &resolve.Array{ + Path: []string{name}, + Item: item, + }, + CacheArgs: []resolve.CacheFieldArg{ + { + ArgName: "first", + VariableName: variableName, + }, + }, + } + if originalName != "" { + field.OriginalName = []byte(originalName) + } + return field +} + +func contract(fetchID int, responsePath, l1Key, requestScopedKey, schemaField string, cacheArgs ...string) plannedRequestScopedContract { + return plannedRequestScopedContract{ + FetchID: fetchID, + ResponsePath: responsePath, + L1Key: l1Key, + RequestScopedKey: requestScopedKey, + SchemaField: schemaField, + CacheArgs: cacheArgs, + } +} + +func binding(responsePath, l1Key, cacheKey string) plannedResponseBinding { + return plannedResponseBinding{ + ResponsePath: responsePath, + L1Key: l1Key, + CacheKey: cacheKey, + } +} + +func planRequestScopedWideningScenario(t *testing.T, enableRequestScoped bool, operationSDL string) plan.Plan { + t.Helper() + + const definitionSDL = ` + directive @tag(label: String!) on FIELD + + schema { query: Query } + + type Query { + currentViewer: Viewer + article: Article! + review: Review! + } + + type Viewer { + id: ID! + name: String! + email: String! + handle: String! + posts(first: Int!): [Post!]! + } + + type Post { + id: ID! + title: String! + } + + type Article { + id: ID! + title: String! + currentViewer: Viewer + } + + type Review { + id: ID! + body: String! + currentViewer: Viewer + } + ` + + def := unsafeparser.ParseGraphqlDocumentString(definitionSDL) + require.NoError(t, asttransform.MergeDefinitionWithBaseSchema(&def)) + + op := unsafeparser.ParseGraphqlDocumentString(operationSDL) + report := &operationreport.Report{} + + normalizer := astnormalization.NewWithOpts( + astnormalization.WithExtractVariables(), + astnormalization.WithInlineFragmentSpreads(), + astnormalization.WithRemoveFragmentDefinitions(), + astnormalization.WithRemoveUnusedVariables(), + ) + normalizer.NormalizeOperation(&op, &def, report) + require.False(t, report.HasErrors(), report.Error()) + + validator := astvalidation.DefaultOperationValidator() + validator.Validate(&op, &def, report) + require.False(t, report.HasErrors(), report.Error()) + + plannerInstance, err := plan.NewPlanner(plan.Configuration{ + DataSources: buildRequestScopedWideningDataSources(t, enableRequestScoped), + DisableResolveFieldPositions: true, + DisableEntityCaching: true, + Fields: plan.FieldConfigurations{ + { + TypeName: "Viewer", + FieldName: "posts", + Arguments: plan.ArgumentsConfigurations{ + { + Name: "first", + SourceType: plan.FieldArgumentSource, + SourcePath: []string{"first"}, + }, + }, + }, + }, + }) + require.NoError(t, err) + + result := plannerInstance.Plan(&op, &def, "Widening", report) + require.False(t, report.HasErrors(), report.Error()) + + return result +} + +func buildRequestScopedWideningDataSources(t *testing.T, enableRequestScoped bool) []plan.DataSource { + t.Helper() + + const viewerSDL = ` + directive @tag(label: String!) on FIELD + + type Query { + currentViewer: Viewer + } + + type Article @key(fields: "id") { + id: ID! + currentViewer: Viewer + } + + type Review @key(fields: "id") { + id: ID! + currentViewer: Viewer + } + + type Viewer @key(fields: "id") { + id: ID! + name: String! + email: String! + handle: String! + posts(first: Int!): [Post!]! + } + + type Post { + id: ID! + title: String! + } + ` + + const articlesSDL = ` + type Query { + article: Article! + } + + type Article @key(fields: "id") { + id: ID! + title: String! + } + ` + + const reviewsSDL = ` + type Query { + review: Review! + } + + type Review @key(fields: "id") { + id: ID! + body: String! + } + ` + + viewerMetadata := &plan.DataSourceMetadata{ + RootNodes: []plan.TypeField{ + {TypeName: "Query", FieldNames: []string{"currentViewer"}}, + {TypeName: "Article", FieldNames: []string{"id", "currentViewer"}}, + {TypeName: "Review", FieldNames: []string{"id", "currentViewer"}}, + }, + ChildNodes: []plan.TypeField{ + {TypeName: "Viewer", FieldNames: []string{"id", "name", "email", "handle", "posts"}}, + {TypeName: "Post", FieldNames: []string{"id", "title"}}, + }, + FederationMetaData: plan.FederationMetaData{ + Keys: plan.FederationFieldConfigurations{ + {TypeName: "Viewer", SelectionSet: "id"}, + {TypeName: "Article", SelectionSet: "id"}, + {TypeName: "Review", SelectionSet: "id"}, + }, + }, + } + if enableRequestScoped { + viewerMetadata.FederationMetaData.RequestScopedFields = []plan.RequestScopedField{ + {TypeName: "Query", FieldName: "currentViewer", L1Key: "viewer.currentViewer"}, + {TypeName: "Article", FieldName: "currentViewer", L1Key: "viewer.currentViewer"}, + {TypeName: "Review", FieldName: "currentViewer", L1Key: "viewer.currentViewer"}, + } + } + + articlesMetadata := &plan.DataSourceMetadata{ + RootNodes: []plan.TypeField{ + {TypeName: "Query", FieldNames: []string{"article"}}, + }, + ChildNodes: []plan.TypeField{ + {TypeName: "Article", FieldNames: []string{"id", "title"}}, + }, + FederationMetaData: plan.FederationMetaData{ + Keys: plan.FederationFieldConfigurations{ + {TypeName: "Article", SelectionSet: "id"}, + }, + }, + } + + reviewsMetadata := &plan.DataSourceMetadata{ + RootNodes: []plan.TypeField{ + {TypeName: "Query", FieldNames: []string{"review"}}, + }, + ChildNodes: []plan.TypeField{ + {TypeName: "Review", FieldNames: []string{"id", "body"}}, + }, + FederationMetaData: plan.FederationMetaData{ + Keys: plan.FederationFieldConfigurations{ + {TypeName: "Review", SelectionSet: "id"}, + }, + }, + } + + viewerConfiguration := mustCustomConfiguration(t, ConfigurationInput{ + Fetch: &FetchConfiguration{ + URL: "http://viewer.service", + }, + SchemaConfiguration: mustSchema(t, &FederationConfiguration{ + Enabled: true, + ServiceSDL: viewerSDL, + }, viewerSDL), + }) + + articlesConfiguration := mustCustomConfiguration(t, ConfigurationInput{ + Fetch: &FetchConfiguration{ + URL: "http://articles.service", + }, + SchemaConfiguration: mustSchema(t, &FederationConfiguration{ + Enabled: true, + ServiceSDL: articlesSDL, + }, articlesSDL), + }) + + reviewsConfiguration := mustCustomConfiguration(t, ConfigurationInput{ + Fetch: &FetchConfiguration{ + URL: "http://reviews.service", + }, + SchemaConfiguration: mustSchema(t, &FederationConfiguration{ + Enabled: true, + ServiceSDL: reviewsSDL, + }, reviewsSDL), + }) + + return []plan.DataSource{ + mustDataSourceConfiguration(t, "viewer", viewerMetadata, viewerConfiguration), + mustDataSourceConfiguration(t, "articles", articlesMetadata, articlesConfiguration), + mustDataSourceConfiguration(t, "reviews", reviewsMetadata, reviewsConfiguration), + } +} + +func planRequestScopedRequiresChainScenario(t *testing.T, enableRequestScoped bool, operationSDL string) plan.Plan { + t.Helper() + + const definitionSDL = ` + directive @tag(label: String!) on FIELD + + schema { query: Query } + + type Query { + currentViewer: Viewer + article: Article! + } + + type Viewer { + id: ID! + name: String! + handle: String! + } + + type Article { + id: ID! + title: String! + currentViewer: Viewer + } + ` + + def := unsafeparser.ParseGraphqlDocumentString(definitionSDL) + require.NoError(t, asttransform.MergeDefinitionWithBaseSchema(&def)) + + op := unsafeparser.ParseGraphqlDocumentString(operationSDL) + report := &operationreport.Report{} + + normalizer := astnormalization.NewWithOpts( + astnormalization.WithExtractVariables(), + astnormalization.WithInlineFragmentSpreads(), + astnormalization.WithRemoveFragmentDefinitions(), + astnormalization.WithRemoveUnusedVariables(), + ) + normalizer.NormalizeOperation(&op, &def, report) + require.False(t, report.HasErrors(), report.Error()) + + validator := astvalidation.DefaultOperationValidator() + validator.Validate(&op, &def, report) + require.False(t, report.HasErrors(), report.Error()) + + plannerInstance, err := plan.NewPlanner(plan.Configuration{ + DataSources: buildRequestScopedRequiresChainDataSources(t, enableRequestScoped), + DisableResolveFieldPositions: true, + DisableEntityCaching: true, + }) + require.NoError(t, err) + + result := plannerInstance.Plan(&op, &def, "Widening", report) + require.False(t, report.HasErrors(), report.Error()) + + return result +} + +func buildRequestScopedRequiresChainDataSources(t *testing.T, enableRequestScoped bool) []plan.DataSource { + t.Helper() + + const viewerSDL = ` + type Query { + currentViewer: Viewer + } + + type Article @key(fields: "id") { + id: ID! + currentViewer: Viewer + } + + type Viewer @key(fields: "id") { + id: ID! + name: String! + } + ` + + const articlesSDL = ` + type Query { + article: Article! + } + + type Article @key(fields: "id") { + id: ID! + title: String! + } + ` + + const handlesSDL = ` + directive @external on FIELD_DEFINITION + directive @requires(fields: String!) on FIELD_DEFINITION + + type Viewer @key(fields: "id") { + id: ID! @external + name: String! @external + handle: String! @requires(fields: "name") + } + ` + + viewerMetadata := &plan.DataSourceMetadata{ + RootNodes: []plan.TypeField{ + {TypeName: "Query", FieldNames: []string{"currentViewer"}}, + {TypeName: "Article", FieldNames: []string{"id", "currentViewer"}}, + {TypeName: "Viewer", FieldNames: []string{"id", "name"}}, + }, + ChildNodes: []plan.TypeField{ + {TypeName: "Viewer", FieldNames: []string{"id", "name"}}, + }, + FederationMetaData: plan.FederationMetaData{ + Keys: plan.FederationFieldConfigurations{ + {TypeName: "Viewer", SelectionSet: "id"}, + {TypeName: "Article", SelectionSet: "id"}, + }, + }, + } + if enableRequestScoped { + viewerMetadata.FederationMetaData.RequestScopedFields = []plan.RequestScopedField{ + {TypeName: "Query", FieldName: "currentViewer", L1Key: "viewer.currentViewer"}, + {TypeName: "Article", FieldName: "currentViewer", L1Key: "viewer.currentViewer"}, + } + } + + articlesMetadata := &plan.DataSourceMetadata{ + RootNodes: []plan.TypeField{ + {TypeName: "Query", FieldNames: []string{"article"}}, + }, + ChildNodes: []plan.TypeField{ + {TypeName: "Article", FieldNames: []string{"id", "title"}}, + }, + FederationMetaData: plan.FederationMetaData{ + Keys: plan.FederationFieldConfigurations{ + {TypeName: "Article", SelectionSet: "id"}, + }, + }, + } + + handlesMetadata := &plan.DataSourceMetadata{ + RootNodes: []plan.TypeField{ + {TypeName: "Viewer", FieldNames: []string{"id", "handle"}, ExternalFieldNames: []string{"name"}}, + }, + ChildNodes: []plan.TypeField{ + {TypeName: "Viewer", FieldNames: []string{"id", "handle"}, ExternalFieldNames: []string{"name"}}, + }, + FederationMetaData: plan.FederationMetaData{ + Keys: plan.FederationFieldConfigurations{ + {TypeName: "Viewer", SelectionSet: "id"}, + }, + Requires: plan.FederationFieldConfigurations{ + {TypeName: "Viewer", FieldName: "handle", SelectionSet: "name"}, + }, + }, + } + + viewerConfiguration := mustCustomConfiguration(t, ConfigurationInput{ + Fetch: &FetchConfiguration{URL: "http://viewer.service"}, + SchemaConfiguration: mustSchema(t, &FederationConfiguration{ + Enabled: true, + ServiceSDL: viewerSDL, + }, viewerSDL), + }) + + articlesConfiguration := mustCustomConfiguration(t, ConfigurationInput{ + Fetch: &FetchConfiguration{URL: "http://articles.service"}, + SchemaConfiguration: mustSchema(t, &FederationConfiguration{ + Enabled: true, + ServiceSDL: articlesSDL, + }, articlesSDL), + }) + + handlesConfiguration := mustCustomConfiguration(t, ConfigurationInput{ + Fetch: &FetchConfiguration{URL: "http://handles.service"}, + SchemaConfiguration: mustSchema(t, &FederationConfiguration{ + Enabled: true, + ServiceSDL: handlesSDL, + }, handlesSDL), + }) + + return []plan.DataSource{ + mustDataSourceConfiguration(t, "viewer", viewerMetadata, viewerConfiguration), + mustDataSourceConfiguration(t, "articles", articlesMetadata, articlesConfiguration), + mustDataSourceConfiguration(t, "handles", handlesMetadata, handlesConfiguration), + } +} diff --git a/v2/pkg/engine/datasource/graphql_datasource/resolve_argument_path_test.go b/v2/pkg/engine/datasource/graphql_datasource/resolve_argument_path_test.go new file mode 100644 index 0000000000..6de3801604 --- /dev/null +++ b/v2/pkg/engine/datasource/graphql_datasource/resolve_argument_path_test.go @@ -0,0 +1,63 @@ +package graphql_datasource + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +func TestResolveArgumentPath(t *testing.T) { + args := []resolve.FieldArgument{ + { + Name: "id", + Variable: &resolve.ContextVariable{Path: []string{"a"}}, + }, + { + Name: "key", + Variable: &resolve.ContextVariable{Path: []string{"b"}}, + }, + } + + t.Run("empty path returns unchanged", func(t *testing.T) { + result := resolveArgumentPath(nil, args) + assert.Nil(t, result) + }) + + t.Run("single element resolves to variable path", func(t *testing.T) { + result := resolveArgumentPath([]string{"id"}, args) + assert.Equal(t, []string{"a"}, result) + }) + + t.Run("unknown argument returns unchanged", func(t *testing.T) { + result := resolveArgumentPath([]string{"unknown"}, args) + assert.Equal(t, []string{"unknown"}, result) + }) + + t.Run("nested path resolves root and appends rest", func(t *testing.T) { + result := resolveArgumentPath([]string{"key", "sellerId"}, args) + assert.Equal(t, []string{"b", "sellerId"}, result) + }) + + t.Run("deeply nested path resolves root and appends rest", func(t *testing.T) { + result := resolveArgumentPath([]string{"key", "address", "id"}, args) + assert.Equal(t, []string{"b", "address", "id"}, result) + }) + + t.Run("nested path with unknown root returns unchanged", func(t *testing.T) { + result := resolveArgumentPath([]string{"missing", "field"}, args) + assert.Equal(t, []string{"missing", "field"}, result) + }) + + t.Run("non-context-variable returns original path", func(t *testing.T) { + argsWithObjectVar := []resolve.FieldArgument{ + { + Name: "obj", + Variable: &resolve.ObjectVariable{Path: []string{"x"}}, + }, + } + result := resolveArgumentPath([]string{"obj", "field"}, argsWithObjectVar) + assert.Equal(t, []string{"obj", "field"}, result) + }) +} diff --git a/v2/pkg/engine/datasource/grpc_datasource/json_builder.go b/v2/pkg/engine/datasource/grpc_datasource/json_builder.go index 556371d511..531439817c 100644 --- a/v2/pkg/engine/datasource/grpc_datasource/json_builder.go +++ b/v2/pkg/engine/datasource/grpc_datasource/json_builder.go @@ -54,7 +54,7 @@ func (j *jsonBuilder) mergeValues(left *astjson.Value, right resultData) (*astjs if right.kind != CallKindEntity { // No federation index map available - use simple merge // This path is taken for non-federated queries - root, _, err := astjson.MergeValues(j.jsonArena, left, right.response) + root, err := astjson.MergeValues(j.jsonArena, left, right.response) if err != nil { return nil, err } @@ -331,7 +331,7 @@ func (j *jsonBuilder) marshalResponseJSON(message *RPCMessage, data protoref.Mes if field.JSONPath == "" { // Field should be merged into parent object (flattened) - root, _, err = astjson.MergeValues(j.jsonArena, root, value) + root, err = astjson.MergeValues(j.jsonArena, root, value) if err != nil { return nil, err } diff --git a/v2/pkg/engine/datasource/introspection_datasource/planner_test.go b/v2/pkg/engine/datasource/introspection_datasource/planner_test.go index 96f57d1e1e..b0a8dd91c9 100644 --- a/v2/pkg/engine/datasource/introspection_datasource/planner_test.go +++ b/v2/pkg/engine/datasource/introspection_datasource/planner_test.go @@ -143,6 +143,7 @@ func TestIntrospectionDataSourcePlanning(t *testing.T) { PostProcessing: resolve.PostProcessingConfiguration{ MergePath: []string{"__type"}, }, + // Note: UseL1Cache is cleared to false by the test framework when WithCacheKeyTemplates() is not used }, }, }, @@ -218,6 +219,7 @@ func TestIntrospectionDataSourcePlanning(t *testing.T) { PostProcessing: resolve.PostProcessingConfiguration{ MergePath: []string{"__schema"}, }, + // Note: UseL1Cache is cleared to false by the test framework when WithCacheKeyTemplates() is not used }, }, }, @@ -286,6 +288,7 @@ func TestIntrospectionDataSourcePlanning(t *testing.T) { PostProcessing: resolve.PostProcessingConfiguration{ MergePath: []string{"__schema"}, }, + // Note: UseL1Cache is cleared to false by the test framework when WithCacheKeyTemplates() is not used }, }, }, @@ -416,6 +419,7 @@ func TestIntrospectionDataSourcePlanning(t *testing.T) { PostProcessing: resolve.PostProcessingConfiguration{ MergePath: []string{"__type"}, }, + // Note: UseL1Cache is cleared to false by the test framework when WithCacheKeyTemplates() is not used }, }, }, diff --git a/v2/pkg/engine/datasource/service_datasource/config_factory.go b/v2/pkg/engine/datasource/service_datasource/config_factory.go new file mode 100644 index 0000000000..7a3a2bab94 --- /dev/null +++ b/v2/pkg/engine/datasource/service_datasource/config_factory.go @@ -0,0 +1,104 @@ +package service_datasource + +import ( + "fmt" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" +) + +const ( + // ServiceDataSourceID is the unique identifier for the service datasource. + ServiceDataSourceID = "service_datasource" +) + +// ServiceConfigFactory creates the datasource configuration for the __service field. +type ServiceConfigFactory struct { + service *Service +} + +// NewServiceConfigFactory creates a new ServiceConfigFactory with the given options. +func NewServiceConfigFactory(opts ServiceOptions) *ServiceConfigFactory { + return &ServiceConfigFactory{ + service: NewService(opts), + } +} + +// NewServiceConfigFactoryWithSchema creates a factory that also extends +// the provided schema with service capability types (_Service, _Capability) +// and the __service field on the Query type. +// +// This is the recommended method for Cosmo router integration where the schema +// is built programmatically and needs to include service capability types. +// +// Usage: +// +// // Parse user schema +// schema, _ := astparser.ParseGraphqlDocumentString(userSchemaSDL) +// +// // Merge with base schema (adds introspection types) +// asttransform.MergeDefinitionWithBaseSchema(&schema) +// +// // Extend with service types (adds _Service, _Capability, __service) +// factory, err := service_datasource.NewServiceConfigFactoryWithSchema(&schema, opts) +// +// // Add datasource configurations +// planConfig.DataSources = append(planConfig.DataSources, factory.BuildDataSourceConfigurations()...) +// planConfig.Fields = append(planConfig.Fields, factory.BuildFieldConfigurations()...) +func NewServiceConfigFactoryWithSchema(schema *ast.Document, opts ServiceOptions) (*ServiceConfigFactory, error) { + // Extend schema with _Service, _Capability types and __service field + if err := ExtendSchemaWithServiceTypes(schema); err != nil { + return nil, fmt.Errorf("failed to extend schema with service types: %w", err) + } + + return &ServiceConfigFactory{ + service: NewService(opts), + }, nil +} + +// BuildFieldConfigurations returns the field configurations for the __service field. +func (f *ServiceConfigFactory) BuildFieldConfigurations() plan.FieldConfigurations { + return plan.FieldConfigurations{ + { + TypeName: "Query", + FieldName: "__service", + }, + } +} + +// BuildDataSourceConfigurations returns the datasource configurations for the __service field. +func (f *ServiceConfigFactory) BuildDataSourceConfigurations() []plan.DataSource { + ds, _ := f.buildDataSourceConfiguration() + return []plan.DataSource{ds} +} + +func (f *ServiceConfigFactory) buildDataSourceConfiguration() (plan.DataSourceConfiguration[Configuration], error) { + return plan.NewDataSourceConfiguration[Configuration]( + ServiceDataSourceID, + NewFactory[Configuration](f.service), + &plan.DataSourceMetadata{ + RootNodes: []plan.TypeField{ + { + TypeName: "Query", + FieldNames: []string{"__service"}, + }, + }, + ChildNodes: []plan.TypeField{ + { + TypeName: "_Service", + FieldNames: []string{"capabilities", "__typename"}, + }, + { + TypeName: "_Capability", + FieldNames: []string{"identifier", "value", "description", "__typename"}, + }, + }, + }, + Configuration{SourceType: "Service: __service"}, + ) +} + +// Service returns the underlying Service for testing purposes. +func (f *ServiceConfigFactory) Service() *Service { + return f.service +} diff --git a/v2/pkg/engine/datasource/service_datasource/factory.go b/v2/pkg/engine/datasource/service_datasource/factory.go new file mode 100644 index 0000000000..430737af5a --- /dev/null +++ b/v2/pkg/engine/datasource/service_datasource/factory.go @@ -0,0 +1,44 @@ +package service_datasource + +import ( + "context" + + "github.com/jensneuse/abstractlogger" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" +) + +// Factory creates planners for the __service field. +type Factory[T Configuration] struct { + service *Service +} + +// NewFactory creates a new Factory with the given service configuration. +func NewFactory[T Configuration](service *Service) *Factory[T] { + return &Factory[T]{service: service} +} + +// Planner implements the PlannerFactory interface. +func (f *Factory[T]) Planner(logger abstractlogger.Logger) plan.DataSourcePlanner[T] { + return &Planner[T]{service: f.service} +} + +// Context implements the PlannerFactory interface. +func (f *Factory[T]) Context() context.Context { + return context.TODO() +} + +// UpstreamSchema implements the PlannerFactory interface. +func (f *Factory[T]) UpstreamSchema(_ plan.DataSourceConfiguration[T]) (*ast.Document, bool) { + return nil, false +} + +// PlanningBehavior implements the PlannerFactory interface. +func (f *Factory[T]) PlanningBehavior() plan.DataSourcePlanningBehavior { + return plan.DataSourcePlanningBehavior{ + MergeAliasedRootNodes: false, + OverrideFieldPathFromAlias: true, + AllowPlanningTypeName: true, + } +} diff --git a/v2/pkg/engine/datasource/service_datasource/planner.go b/v2/pkg/engine/datasource/service_datasource/planner.go new file mode 100644 index 0000000000..c97f5dead1 --- /dev/null +++ b/v2/pkg/engine/datasource/service_datasource/planner.go @@ -0,0 +1,80 @@ +package service_datasource + +import ( + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/plan" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +const ( + serviceFieldName = "__service" +) + +// Configuration is the configuration for the service datasource. +type Configuration struct { + SourceType string +} + +// Planner is the planner for the __service field. +type Planner[T Configuration] struct { + id int + service *Service + v *plan.Visitor + rootField int + rootFieldPath string +} + +// SetID implements the DataSourcePlanner interface. +func (p *Planner[T]) SetID(id int) { + p.id = id +} + +// ID implements the DataSourcePlanner interface. +func (p *Planner[T]) ID() (id int) { + return p.id +} + +// Register implements the DataSourcePlanner interface. +func (p *Planner[T]) Register(visitor *plan.Visitor, dataSourceConfiguration plan.DataSourceConfiguration[T], dataSourcePlannerConfiguration plan.DataSourcePlannerConfiguration) error { + p.v = visitor + p.rootField = ast.InvalidRef + visitor.Walker.RegisterEnterFieldVisitor(p) + return nil +} + +// DownstreamResponseFieldAlias implements the DataSourcePlanner interface. +func (p *Planner[T]) DownstreamResponseFieldAlias(_ int) (alias string, exists bool) { + return +} + +// EnterField is called when entering a field. +func (p *Planner[T]) EnterField(ref int) { + fieldName := p.v.Operation.FieldNameString(ref) + fieldAliasOrName := p.v.Operation.FieldAliasOrNameString(ref) + if fieldName == serviceFieldName { + p.rootField = ref + p.rootFieldPath = fieldAliasOrName + } +} + +// ConfigureFetch implements the DataSourcePlanner interface. +func (p *Planner[T]) ConfigureFetch() resolve.FetchConfiguration { + if p.rootField == ast.InvalidRef { + return resolve.FetchConfiguration{} + } + + postProcessing := resolve.PostProcessingConfiguration{ + MergePath: []string{p.rootFieldPath}, + } + + return resolve.FetchConfiguration{ + Input: `{}`, + DataSource: NewSource(p.service), + PostProcessing: postProcessing, + } +} + +// ConfigureSubscription implements the DataSourcePlanner interface. +func (p *Planner[T]) ConfigureSubscription() plan.SubscriptionConfiguration { + return plan.SubscriptionConfiguration{} +} diff --git a/v2/pkg/engine/datasource/service_datasource/schema.go b/v2/pkg/engine/datasource/service_datasource/schema.go new file mode 100644 index 0000000000..6dcaf1ddd0 --- /dev/null +++ b/v2/pkg/engine/datasource/service_datasource/schema.go @@ -0,0 +1,211 @@ +package service_datasource + +import ( + "fmt" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" +) + +// ServiceSDL is the GraphQL SDL for service capability types. +// This is provided for documentation purposes. The actual types are added +// programmatically via ExtendSchemaWithServiceTypes for robustness. +const ServiceSDL = `""" +Service capabilities exposed via __service query. +""" +type _Service { + """ + List of capabilities supported by this service. + """ + capabilities: [_Capability!]! +} + +""" +A single service capability. +""" +type _Capability { + """ + Unique identifier for this capability (e.g., "graphql.onError"). + """ + identifier: String! + """ + Optional value associated with the capability. + """ + value: String + """ + Human-readable description of the capability. + """ + description: String +} +` + +// ExtendSchemaWithServiceTypes adds _Service, _Capability types and +// __service field to the Query type in the given schema document. +// This follows the same pattern as MergeDefinitionWithBaseSchema for introspection. +// +// The function: +// 1. Adds the _Capability type with identifier, value, and description fields +// 2. Adds the _Service type with capabilities field +// 3. Adds the __service field to the Query type +// +// This is the recommended integration method for Cosmo router and similar frameworks +// that need to extend an existing schema with service capabilities. +// +// IMPORTANT: Call this AFTER MergeDefinitionWithBaseSchema if you need both +// introspection types and service capability types. +func ExtendSchemaWithServiceTypes(schema *ast.Document) error { + // 1. Find Query type first to fail fast + queryNode, found := findQueryType(schema) + if !found { + return fmt.Errorf("query type not found in schema") + } + + // 2. Add _Capability type (must be added before _Service since _Service references it) + addCapabilityType(schema) + + // 3. Add _Service type + addServiceType(schema) + + // 4. Add __service field to Query type + addServiceField(schema, queryNode.Ref) + + return nil +} + +// findQueryType locates the Query type in the schema document. +func findQueryType(schema *ast.Document) (ast.Node, bool) { + // First try to find via index (handles custom query type names) + if len(schema.Index.QueryTypeName) > 0 { + queryNode, ok := schema.Index.FirstNodeByNameBytes(schema.Index.QueryTypeName) + if ok { + return queryNode, true + } + } + + // Fall back to looking for "Query" by name + queryNode, ok := schema.Index.FirstNodeByNameStr("Query") + if ok { + return queryNode, true + } + + // Manual search through root nodes + for i := range schema.RootNodes { + if schema.RootNodes[i].Kind == ast.NodeKindObjectTypeDefinition { + name := schema.ObjectTypeDefinitionNameString(schema.RootNodes[i].Ref) + if name == "Query" { + return schema.RootNodes[i], true + } + } + } + + return ast.Node{}, false +} + +// addCapabilityType adds the _Capability type to the schema: +// +// type _Capability { +// identifier: String! +// value: String +// description: String +// } +func addCapabilityType(schema *ast.Document) { + // Check if type already exists + if _, found := schema.Index.FirstNodeByNameStr("_Capability"); found { + return + } + + // identifier: String! + identifierTypeRef := schema.AddNonNullNamedType([]byte("String")) + identifierFieldRef := schema.ImportFieldDefinition( + "identifier", + "Unique identifier for this capability (e.g., \"graphql.onError\").", + identifierTypeRef, + nil, + nil, + ) + + // value: String + valueTypeRef := schema.AddNamedType([]byte("String")) + valueFieldRef := schema.ImportFieldDefinition( + "value", + "Optional value associated with the capability.", + valueTypeRef, + nil, + nil, + ) + + // description: String + descTypeRef := schema.AddNamedType([]byte("String")) + descFieldRef := schema.ImportFieldDefinition( + "description", + "Human-readable description of the capability.", + descTypeRef, + nil, + nil, + ) + + // Create _Capability type + schema.ImportObjectTypeDefinition( + "_Capability", + "A single service capability.", + []int{identifierFieldRef, valueFieldRef, descFieldRef}, + nil, + ) +} + +// addServiceType adds the _Service type to the schema: +// +// type _Service { +// capabilities: [_Capability!]! +// } +func addServiceType(schema *ast.Document) { + // Check if type already exists + if _, found := schema.Index.FirstNodeByNameStr("_Service"); found { + return + } + + // capabilities: [_Capability!]! + // Build the type: [_Capability!]! + capabilityTypeRef := schema.AddNonNullNamedType([]byte("_Capability")) // _Capability! + listTypeRef := schema.AddListType(capabilityTypeRef) // [_Capability!] + nonNullListTypeRef := schema.AddNonNullType(listTypeRef) // [_Capability!]! + + capabilitiesFieldRef := schema.ImportFieldDefinition( + "capabilities", + "List of capabilities supported by this service.", + nonNullListTypeRef, + nil, + nil, + ) + + // Create _Service type + schema.ImportObjectTypeDefinition( + "_Service", + "Service capabilities exposed via __service query.", + []int{capabilitiesFieldRef}, + nil, + ) +} + +// addServiceField adds the __service: _Service! field to the Query type. +func addServiceField(schema *ast.Document, queryRef int) { + // Check if __service field already exists + if schema.ObjectTypeDefinitionHasField(queryRef, []byte("__service")) { + return + } + + // Create __service: _Service! field + fieldNameRef := schema.Input.AppendInputBytes([]byte("__service")) + fieldTypeRef := schema.AddNonNullNamedType([]byte("_Service")) + + fieldRef := schema.AddFieldDefinition(ast.FieldDefinition{ + Name: fieldNameRef, + Type: fieldTypeRef, + }) + + // Add field to Query type + schema.ObjectTypeDefinitions[queryRef].FieldsDefinition.Refs = append( + schema.ObjectTypeDefinitions[queryRef].FieldsDefinition.Refs, + fieldRef, + ) + schema.ObjectTypeDefinitions[queryRef].HasFieldDefinitions = true +} diff --git a/v2/pkg/engine/datasource/service_datasource/schema_test.go b/v2/pkg/engine/datasource/service_datasource/schema_test.go new file mode 100644 index 0000000000..0402362989 --- /dev/null +++ b/v2/pkg/engine/datasource/service_datasource/schema_test.go @@ -0,0 +1,269 @@ +package service_datasource + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/astparser" + "github.com/wundergraph/graphql-go-tools/v2/pkg/asttransform" +) + +func TestServiceSDLIsValidGraphQL(t *testing.T) { + // Test that ServiceSDL parses as valid GraphQL + schema, report := astparser.ParseGraphqlDocumentString(ServiceSDL) + require.False(t, report.HasErrors(), "ServiceSDL should be valid GraphQL: %s", report.Error()) + + // Verify _Service type exists + serviceNode, found := schema.Index.FirstNodeByNameStr("_Service") + assert.True(t, found, "_Service type should exist") + assert.Equal(t, ast.NodeKindObjectTypeDefinition, serviceNode.Kind) + + // Verify _Capability type exists + capabilityNode, found := schema.Index.FirstNodeByNameStr("_Capability") + assert.True(t, found, "_Capability type should exist") + assert.Equal(t, ast.NodeKindObjectTypeDefinition, capabilityNode.Kind) +} + +func TestExtendSchemaWithServiceTypes(t *testing.T) { + t.Run("extends schema with service types", func(t *testing.T) { + // Start with a simple user schema + userSchemaSDL := ` + type Query { + user(id: ID!): User + } + type User { + id: ID! + name: String! + } + ` + + schema, report := astparser.ParseGraphqlDocumentString(userSchemaSDL) + require.False(t, report.HasErrors()) + + // Extend with service types + err := ExtendSchemaWithServiceTypes(&schema) + require.NoError(t, err) + + // Verify _Service type was added + serviceNode, found := schema.Index.FirstNodeByNameStr("_Service") + assert.True(t, found, "_Service type should exist after extension") + assert.Equal(t, ast.NodeKindObjectTypeDefinition, serviceNode.Kind) + + // Verify _Capability type was added + capabilityNode, found := schema.Index.FirstNodeByNameStr("_Capability") + assert.True(t, found, "_Capability type should exist after extension") + assert.Equal(t, ast.NodeKindObjectTypeDefinition, capabilityNode.Kind) + + // Verify __service field was added to Query + queryNode, found := schema.Index.FirstNodeByNameStr("Query") + require.True(t, found, "Query type should exist") + assert.True(t, schema.ObjectTypeDefinitionHasField(queryNode.Ref, []byte("__service")), + "Query should have __service field") + + // Verify original fields still exist + assert.True(t, schema.ObjectTypeDefinitionHasField(queryNode.Ref, []byte("user")), + "Query should still have user field") + }) + + t.Run("does not duplicate __service field if already exists", func(t *testing.T) { + // Schema that already has __service field + schemaSDL := ` + type Query { + user: User + __service: _Service! + } + type User { + id: ID! + } + type _Service { + capabilities: [_Capability!]! + } + type _Capability { + identifier: String! + } + ` + + schema, report := astparser.ParseGraphqlDocumentString(schemaSDL) + require.False(t, report.HasErrors()) + + // Get field count before + queryNode, _ := schema.Index.FirstNodeByNameStr("Query") + fieldCountBefore := len(schema.ObjectTypeDefinitions[queryNode.Ref].FieldsDefinition.Refs) + + // Extend with service types (should not duplicate) + err := ExtendSchemaWithServiceTypes(&schema) + require.NoError(t, err) + + // Field count should be the same (no duplicate __service) + fieldCountAfter := len(schema.ObjectTypeDefinitions[queryNode.Ref].FieldsDefinition.Refs) + assert.Equal(t, fieldCountBefore, fieldCountAfter, "should not duplicate __service field") + }) + + t.Run("returns error if Query type not found", func(t *testing.T) { + // Schema without Query type + schemaSDL := ` + type Mutation { + createUser(name: String!): User + } + type User { + id: ID! + } + ` + + schema, report := astparser.ParseGraphqlDocumentString(schemaSDL) + require.False(t, report.HasErrors()) + + err := ExtendSchemaWithServiceTypes(&schema) + assert.Error(t, err) + assert.Contains(t, err.Error(), "query type not found") + }) + + t.Run("works with custom query type name", func(t *testing.T) { + // Schema with custom query type name via schema definition + schemaSDL := ` + schema { + query: RootQuery + } + type RootQuery { + user: User + } + type User { + id: ID! + } + ` + + schema, report := astparser.ParseGraphqlDocumentString(schemaSDL) + require.False(t, report.HasErrors()) + + err := ExtendSchemaWithServiceTypes(&schema) + require.NoError(t, err) + + // Verify __service field was added to RootQuery + queryNode, found := schema.Index.FirstNodeByNameStr("RootQuery") + require.True(t, found, "RootQuery type should exist") + assert.True(t, schema.ObjectTypeDefinitionHasField(queryNode.Ref, []byte("__service")), + "RootQuery should have __service field") + }) +} + +func TestExtendSchemaWithServiceTypes_CosmoRouterPattern(t *testing.T) { + // This test mimics exactly how Cosmo router integrates: + // 1. Start with a user schema (no service types) + // 2. Parse it + // 3. Merge with base schema (adds introspection types) + // 4. Extend with service types + // 5. Verify both introspection and service types exist + + t.Run("full integration pattern", func(t *testing.T) { + // User's schema - does NOT include _Service, _Capability, or __service + userSchemaSDL := ` + type Query { + user(id: ID!): User + users: [User!]! + } + type User { + id: ID! + name: String! + email: String + } + ` + + // 1. Parse user schema + schema, report := astparser.ParseGraphqlDocumentString(userSchemaSDL) + require.False(t, report.HasErrors()) + + // 2. Merge with base schema (like Cosmo does - adds introspection types) + err := asttransform.MergeDefinitionWithBaseSchema(&schema) + require.NoError(t, err) + + // Verify introspection types were added by MergeDefinitionWithBaseSchema + _, foundSchema := schema.Index.FirstNodeByNameStr("__Schema") + assert.True(t, foundSchema, "__Schema type should exist after base schema merge") + + // 3. Extend with service types (NEW API) + err = ExtendSchemaWithServiceTypes(&schema) + require.NoError(t, err) + + // 4. Verify service types were added + _, foundService := schema.Index.FirstNodeByNameStr("_Service") + assert.True(t, foundService, "_Service type should exist") + + _, foundCapability := schema.Index.FirstNodeByNameStr("_Capability") + assert.True(t, foundCapability, "_Capability type should exist") + + // 5. Verify __service field exists on Query + queryNode, found := schema.Index.FirstNodeByNameStr("Query") + require.True(t, found) + assert.True(t, schema.ObjectTypeDefinitionHasField(queryNode.Ref, []byte("__service")), + "Query should have __service field") + + // 6. Verify introspection fields still exist + assert.True(t, schema.ObjectTypeDefinitionHasField(queryNode.Ref, []byte("__schema")), + "Query should have __schema field") + assert.True(t, schema.ObjectTypeDefinitionHasField(queryNode.Ref, []byte("__type")), + "Query should have __type field") + + // 7. Verify original user fields still exist + assert.True(t, schema.ObjectTypeDefinitionHasField(queryNode.Ref, []byte("user")), + "Query should still have user field") + assert.True(t, schema.ObjectTypeDefinitionHasField(queryNode.Ref, []byte("users")), + "Query should still have users field") + }) +} + +func TestNewServiceConfigFactoryWithSchema(t *testing.T) { + t.Run("creates factory and extends schema", func(t *testing.T) { + userSchemaSDL := ` + type Query { + user: User + } + type User { + id: ID! + } + ` + + schema, report := astparser.ParseGraphqlDocumentString(userSchemaSDL) + require.False(t, report.HasErrors()) + + factory, err := NewServiceConfigFactoryWithSchema(&schema, ServiceOptions{ + DefaultErrorBehavior: "PROPAGATE", + }) + require.NoError(t, err) + require.NotNil(t, factory) + + // Verify schema was extended + _, found := schema.Index.FirstNodeByNameStr("_Service") + assert.True(t, found, "_Service type should exist") + + queryNode, _ := schema.Index.FirstNodeByNameStr("Query") + assert.True(t, schema.ObjectTypeDefinitionHasField(queryNode.Ref, []byte("__service"))) + + // Verify factory works + fieldConfigs := factory.BuildFieldConfigurations() + assert.Len(t, fieldConfigs, 1) + assert.Equal(t, "__service", fieldConfigs[0].FieldName) + + dataSources := factory.BuildDataSourceConfigurations() + assert.Len(t, dataSources, 1) + }) + + t.Run("returns error if schema extension fails", func(t *testing.T) { + // Schema without Query type + schemaSDL := ` + type Mutation { + doSomething: Boolean + } + ` + + schema, report := astparser.ParseGraphqlDocumentString(schemaSDL) + require.False(t, report.HasErrors()) + + factory, err := NewServiceConfigFactoryWithSchema(&schema, ServiceOptions{}) + assert.Error(t, err) + assert.Nil(t, factory) + assert.Contains(t, err.Error(), "query type not found") + }) +} diff --git a/v2/pkg/engine/datasource/service_datasource/service_datasource_test.go b/v2/pkg/engine/datasource/service_datasource/service_datasource_test.go new file mode 100644 index 0000000000..483325ad41 --- /dev/null +++ b/v2/pkg/engine/datasource/service_datasource/service_datasource_test.go @@ -0,0 +1,129 @@ +package service_datasource + +import ( + "context" + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewService(t *testing.T) { + t.Run("with default error behavior", func(t *testing.T) { + opts := ServiceOptions{ + DefaultErrorBehavior: "PROPAGATE", + } + service := NewService(opts) + + assert.Len(t, service.Capabilities, 2) + + // First capability should be onError support + assert.Equal(t, "graphql.onError", service.Capabilities[0].Identifier) + assert.NotNil(t, service.Capabilities[0].Description) + + // Second capability should be default error behavior + assert.Equal(t, "graphql.defaultErrorBehavior", service.Capabilities[1].Identifier) + assert.NotNil(t, service.Capabilities[1].Value) + assert.Equal(t, "PROPAGATE", *service.Capabilities[1].Value) + }) + + t.Run("without default error behavior", func(t *testing.T) { + opts := ServiceOptions{} + service := NewService(opts) + + assert.Len(t, service.Capabilities, 1) + assert.Equal(t, "graphql.onError", service.Capabilities[0].Identifier) + }) +} + +func TestSource_Load(t *testing.T) { + service := NewService(ServiceOptions{ + DefaultErrorBehavior: "NULL", + }) + source := NewSource(service) + + data, err := source.Load(context.Background(), nil, []byte(`{}`)) + require.NoError(t, err) + + var result Service + err = json.Unmarshal(data, &result) + require.NoError(t, err) + + assert.Len(t, result.Capabilities, 2) + assert.Equal(t, "graphql.onError", result.Capabilities[0].Identifier) + assert.Equal(t, "graphql.defaultErrorBehavior", result.Capabilities[1].Identifier) + assert.Equal(t, "NULL", *result.Capabilities[1].Value) +} + +func TestSource_LoadWithFiles(t *testing.T) { + service := NewService(ServiceOptions{}) + source := NewSource(service) + + _, err := source.LoadWithFiles(context.Background(), nil, nil, nil) + assert.Error(t, err) + assert.Contains(t, err.Error(), "does not support file uploads") +} + +func TestServiceConfigFactory(t *testing.T) { + factory := NewServiceConfigFactory(ServiceOptions{ + DefaultErrorBehavior: "HALT", + }) + + t.Run("field configurations", func(t *testing.T) { + fieldConfigs := factory.BuildFieldConfigurations() + assert.Len(t, fieldConfigs, 1) + assert.Equal(t, "Query", fieldConfigs[0].TypeName) + assert.Equal(t, "__service", fieldConfigs[0].FieldName) + }) + + t.Run("datasource configurations", func(t *testing.T) { + dsConfigs := factory.BuildDataSourceConfigurations() + assert.Len(t, dsConfigs, 1) + }) + + t.Run("service accessor", func(t *testing.T) { + service := factory.Service() + assert.NotNil(t, service) + assert.Len(t, service.Capabilities, 2) + }) +} + +func TestCapability_JSON(t *testing.T) { + cap := Capability{ + Identifier: "test.capability", + Value: ptr("test-value"), + Description: ptr("A test capability"), + } + + data, err := json.Marshal(cap) + require.NoError(t, err) + + var result Capability + err = json.Unmarshal(data, &result) + require.NoError(t, err) + + assert.Equal(t, "test.capability", result.Identifier) + assert.NotNil(t, result.Value) + assert.Equal(t, "test-value", *result.Value) + assert.NotNil(t, result.Description) + assert.Equal(t, "A test capability", *result.Description) +} + +func TestCapability_JSON_WithNils(t *testing.T) { + cap := Capability{ + Identifier: "test.capability", + } + + data, err := json.Marshal(cap) + require.NoError(t, err) + + // Verify that nil fields are omitted from JSON + var raw map[string]interface{} + err = json.Unmarshal(data, &raw) + require.NoError(t, err) + + assert.Contains(t, raw, "identifier") + assert.NotContains(t, raw, "value") + assert.NotContains(t, raw, "description") +} diff --git a/v2/pkg/engine/datasource/service_datasource/source.go b/v2/pkg/engine/datasource/service_datasource/source.go new file mode 100644 index 0000000000..f2f2e81116 --- /dev/null +++ b/v2/pkg/engine/datasource/service_datasource/source.go @@ -0,0 +1,30 @@ +package service_datasource + +import ( + "context" + "encoding/json" + "errors" + "net/http" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/httpclient" +) + +// Source is the data source for the __service field. +type Source struct { + service *Service +} + +// NewSource creates a new Source with the given service configuration. +func NewSource(service *Service) *Source { + return &Source{service: service} +} + +// Load implements the DataSource interface. +func (s *Source) Load(ctx context.Context, headers http.Header, input []byte) (data []byte, err error) { + return json.Marshal(s.service) +} + +// LoadWithFiles implements the DataSource interface. +func (s *Source) LoadWithFiles(ctx context.Context, headers http.Header, input []byte, files []*httpclient.FileUpload) (data []byte, err error) { + return nil, errors.New("service data source does not support file uploads") +} diff --git a/v2/pkg/engine/datasource/service_datasource/types.go b/v2/pkg/engine/datasource/service_datasource/types.go new file mode 100644 index 0000000000..6a9bd756e3 --- /dev/null +++ b/v2/pkg/engine/datasource/service_datasource/types.go @@ -0,0 +1,50 @@ +package service_datasource + +// Service represents the GraphQL service capabilities exposed via __service query. +type Service struct { + Capabilities []Capability `json:"capabilities"` +} + +// Capability represents a single service capability. +// This follows the pattern proposed in GraphQL spec PR #1163 for service introspection. +type Capability struct { + // Identifier is the unique identifier for this capability (e.g., "graphql.onError") + Identifier string `json:"identifier"` + // Value is an optional value associated with the capability (e.g., "PROPAGATE" for default error behavior) + Value *string `json:"value,omitempty"` + // Description provides human-readable documentation for the capability + Description *string `json:"description,omitempty"` +} + +// ServiceOptions configures the service capabilities to expose. +type ServiceOptions struct { + // DefaultErrorBehavior is the default error behavior when onError is not specified. + // This is exposed as the "graphql.defaultErrorBehavior" capability. + DefaultErrorBehavior string +} + +// NewService creates a Service with the configured capabilities. +func NewService(opts ServiceOptions) *Service { + capabilities := []Capability{ + { + Identifier: "graphql.onError", + Description: ptr("Supports the onError request extension for controlling error propagation behavior"), + }, + } + + if opts.DefaultErrorBehavior != "" { + capabilities = append(capabilities, Capability{ + Identifier: "graphql.defaultErrorBehavior", + Value: ptr(opts.DefaultErrorBehavior), + Description: ptr("The default error behavior when onError is not specified in the request"), + }) + } + + return &Service{ + Capabilities: capabilities, + } +} + +func ptr(s string) *string { + return &s +} diff --git a/v2/pkg/engine/datasource/staticdatasource/static_datasource_test.go b/v2/pkg/engine/datasource/staticdatasource/static_datasource_test.go index c6ea27abcc..ddc4b2ba44 100644 --- a/v2/pkg/engine/datasource/staticdatasource/static_datasource_test.go +++ b/v2/pkg/engine/datasource/staticdatasource/static_datasource_test.go @@ -26,6 +26,7 @@ func TestStaticDataSourcePlanning(t *testing.T) { FetchConfiguration: resolve.FetchConfiguration{ Input: `{"hello": "world"}`, DataSource: Source{}, + // Note: UseL1Cache is cleared to false by the test framework when WithCacheKeyTemplates() is not used }, }, }, diff --git a/v2/pkg/engine/datasourcetesting/datasourcetesting.go b/v2/pkg/engine/datasourcetesting/datasourcetesting.go index ec9c8907f3..8afdc5fa9b 100644 --- a/v2/pkg/engine/datasourcetesting/datasourcetesting.go +++ b/v2/pkg/engine/datasourcetesting/datasourcetesting.go @@ -34,6 +34,9 @@ type testOptions struct { withPrintPlan bool withFieldDependencies bool withFetchReasons bool + withEntityCaching bool + withFetchProvidesData bool + withCacheKeyTemplates bool validationOptions []astvalidation.Option } @@ -85,6 +88,28 @@ func WithFetchReasons() func(*testOptions) { } } +func WithEntityCaching() func(*testOptions) { + return func(o *testOptions) { + o.withFieldInfo = true + o.withFieldDependencies = true + o.withEntityCaching = true + } +} + +func WithFetchProvidesData() func(*testOptions) { + return func(o *testOptions) { + o.withFieldInfo = true + o.withFieldDependencies = true + o.withFetchProvidesData = true + } +} + +func WithCacheKeyTemplates() func(*testOptions) { + return func(o *testOptions) { + o.withCacheKeyTemplates = true + } +} + func WithValidationOptions(options ...astvalidation.Option) func(*testOptions) { return func(o *testOptions) { o.validationOptions = options @@ -150,6 +175,8 @@ func RunTestWithVariables(definition, operation, operationName, variables string // by default, we don't want to have field info in the tests because it's too verbose config.DisableIncludeInfo = true config.DisableIncludeFieldDependencies = true + config.DisableEntityCaching = true + config.DisableFetchProvidesData = true opts := &testOptions{} for _, o := range options { @@ -168,6 +195,14 @@ func RunTestWithVariables(definition, operation, operationName, variables string config.BuildFetchReasons = true } + if opts.withEntityCaching { + config.DisableEntityCaching = false + } + + if opts.withFetchProvidesData { + config.DisableFetchProvidesData = false + } + if opts.skipReason != "" { t.Skip(opts.skipReason) } @@ -218,6 +253,23 @@ func RunTestWithVariables(definition, operation, operationName, variables string } } + // Clear CacheKeyTemplate from actual plan by default since most tests don't need + // to verify the internal cache key template structure. Tests that need to verify + // caching behavior should use WithCacheKeyTemplates() to opt in. + if !opts.withCacheKeyTemplates { + clearCacheKeyTemplates(actualPlan) + } else { + // Always clear RootFieldL1EntityCacheKeyTemplates even when WithCacheKeyTemplates() + // is set, because planner path assignment can make these non-deterministic. + clearRootFieldEntityCacheKeyTemplates(actualPlan) + } + + // Clear CacheAnalytics from response Object nodes by default since most tests + // don't need to verify cache analytics. Tests using WithEntityCaching() opt in. + if !opts.withEntityCaching { + clearCacheAnalytics(actualPlan) + } + if opts.withPrintPlan { t.Log("\n", actualPlan.(*plan.SynchronousResponsePlan).Response.Fetches.QueryPlan().PrettyPrint()) } @@ -255,3 +307,132 @@ func RunTestWithVariables(definition, operation, operationName, variables string } } } + +// clearCacheKeyTemplates recursively clears CacheKeyTemplate from all fetches in the plan. +// This is called by default so tests don't need to specify the internal cache key template structure. +// Use WithCacheKeyTemplates() to opt in to including cache key templates in tests. +func clearCacheKeyTemplates(p plan.Plan) { + switch pl := p.(type) { + case *plan.SynchronousResponsePlan: + if pl.Response != nil { + if pl.Response.Fetches != nil { + clearCacheKeyTemplatesFromFetchTree(pl.Response.Fetches) + } + // Also clear from RawFetches (pre-postprocessed fetch items) + for _, item := range pl.Response.RawFetches { + if item != nil && item.Fetch != nil { + clearCacheKeyTemplateFromFetch(item.Fetch) + } + } + } + case *plan.SubscriptionResponsePlan: + if pl.Response != nil && pl.Response.Response != nil { + if pl.Response.Response.Fetches != nil { + clearCacheKeyTemplatesFromFetchTree(pl.Response.Response.Fetches) + } + // Also clear from RawFetches + for _, item := range pl.Response.Response.RawFetches { + if item != nil && item.Fetch != nil { + clearCacheKeyTemplateFromFetch(item.Fetch) + } + } + } + } +} + +func clearCacheKeyTemplatesFromFetchTree(node *resolve.FetchTreeNode) { + if node == nil { + return + } + + // Clear from this node's fetch + if node.Item != nil && node.Item.Fetch != nil { + clearCacheKeyTemplateFromFetch(node.Item.Fetch) + } + + // Clear from trigger + if node.Trigger != nil { + clearCacheKeyTemplatesFromFetchTree(node.Trigger) + } + + // Clear from children + for _, child := range node.ChildNodes { + clearCacheKeyTemplatesFromFetchTree(child) + } +} + +func clearCacheKeyTemplateFromFetch(f resolve.Fetch) { + switch fetch := f.(type) { + case *resolve.SingleFetch: + fetch.FetchConfiguration.Caching.CacheKeyTemplate = nil + fetch.FetchConfiguration.Caching.RootFieldL1EntityCacheKeyTemplates = nil + // Clear UseL1Cache to avoid test failures when comparing expected vs actual + // since the planner now defaults to true but most tests expect false (zero value) + fetch.FetchConfiguration.Caching.UseL1Cache = false + } +} + +// clearRootFieldEntityCacheKeyTemplates clears only RootFieldL1EntityCacheKeyTemplates from all +// fetches, preserving CacheKeyTemplate. Used when WithCacheKeyTemplates() is set but +// root field templates are non-deterministic due to planner path assignment ordering. +func clearRootFieldEntityCacheKeyTemplates(p plan.Plan) { + switch pl := p.(type) { + case *plan.SynchronousResponsePlan: + if pl.Response != nil && pl.Response.Fetches != nil { + clearRootFieldEntityCacheKeyTemplatesFromFetchTree(pl.Response.Fetches) + } + case *plan.SubscriptionResponsePlan: + if pl.Response != nil && pl.Response.Response != nil && pl.Response.Response.Fetches != nil { + clearRootFieldEntityCacheKeyTemplatesFromFetchTree(pl.Response.Response.Fetches) + } + } +} + +func clearRootFieldEntityCacheKeyTemplatesFromFetchTree(node *resolve.FetchTreeNode) { + if node == nil { + return + } + if node.Item != nil && node.Item.Fetch != nil { + if sf, ok := node.Item.Fetch.(*resolve.SingleFetch); ok { + sf.FetchConfiguration.Caching.RootFieldL1EntityCacheKeyTemplates = nil + } + } + if node.Trigger != nil { + clearRootFieldEntityCacheKeyTemplatesFromFetchTree(node.Trigger) + } + for _, child := range node.ChildNodes { + clearRootFieldEntityCacheKeyTemplatesFromFetchTree(child) + } +} + +// clearCacheAnalytics recursively clears CacheAnalytics from all Object nodes in the plan. +// This is called by default so tests don't need to account for cache analytics. +// Use WithEntityCaching() to opt in to including cache analytics in tests. +func clearCacheAnalytics(p plan.Plan) { + switch pl := p.(type) { + case *plan.SynchronousResponsePlan: + if pl.Response != nil && pl.Response.Data != nil { + clearCacheAnalyticsFromNode(pl.Response.Data) + } + case *plan.SubscriptionResponsePlan: + if pl.Response != nil && pl.Response.Response != nil && pl.Response.Response.Data != nil { + clearCacheAnalyticsFromNode(pl.Response.Response.Data) + } + } +} + +func clearCacheAnalyticsFromNode(node resolve.Node) { + switch n := node.(type) { + case *resolve.Object: + n.CacheAnalytics = nil + for _, field := range n.Fields { + if field.Value != nil { + clearCacheAnalyticsFromNode(field.Value) + } + } + case *resolve.Array: + if n.Item != nil { + clearCacheAnalyticsFromNode(n.Item) + } + } +} diff --git a/v2/pkg/engine/plan/caching_planner_state.go b/v2/pkg/engine/plan/caching_planner_state.go new file mode 100644 index 0000000000..261e00e32b --- /dev/null +++ b/v2/pkg/engine/plan/caching_planner_state.go @@ -0,0 +1,936 @@ +package plan + +import ( + "bytes" + "cmp" + "regexp" + "slices" + "strings" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" + "github.com/wundergraph/graphql-go-tools/v2/pkg/lexer/literal" +) + +// cachingPlannerState owns the cache-planning state extracted from Visitor. +// Keeps the entity-caching planner additions separate from the core +// response-shaping visitor. +type cachingPlannerState struct { + visitor *Visitor + entityAnalyticsCache map[string]*resolve.ObjectCacheAnalytics + requestScopedVisibleResponseKeys map[int]string + requestScopedFetchAliases map[int]string + // plannerObjects stores the root object for each planner's ProvidesData + // map plannerID -> root object + plannerObjects map[int]*resolve.Object + // plannerCurrentFields stores the current field stack for each planner + // map plannerID -> field stack + plannerCurrentFields map[int][]objectFields + // plannerResponsePaths stores the response paths relative to each planner's root. + // Paths are normalized: inline-fragment markers like ".$0User" are stripped so + // prefix comparisons against plannerEntityBoundaryPaths match regardless of fragments. + // map plannerID -> response path stack + plannerResponsePaths map[int][]string + // plannerEntityBoundaryPaths stores the entity boundary paths for each planner. + // Stored in normalized form (no inline-fragment markers) so that isEntityRootField + // can match regardless of how the query wraps the boundary in a fragment. + // map plannerID -> entity boundary path + plannerEntityBoundaryPaths map[int]string +} + +func newCachingPlannerState(visitor *Visitor) *cachingPlannerState { + return &cachingPlannerState{ + visitor: visitor, + } +} + +func (s *cachingPlannerState) setRequestScopedMaps(visibleResponseKeys, fetchAliases map[int]string) { + s.requestScopedVisibleResponseKeys = visibleResponseKeys + s.requestScopedFetchAliases = fetchAliases +} + +func (s *cachingPlannerState) visibleResponseKey(fieldRef int) (string, bool) { + visible, ok := s.requestScopedVisibleResponseKeys[fieldRef] + return visible, ok +} + +func (s *cachingPlannerState) fetchAlias(fieldRef int) (string, bool) { + alias, ok := s.requestScopedFetchAliases[fieldRef] + return alias, ok +} + +func (s *cachingPlannerState) resetPlannerStructures() { + s.plannerObjects = map[int]*resolve.Object{} + s.plannerCurrentFields = map[int][]objectFields{} + s.plannerResponsePaths = map[int][]string{} +} + +// initializePlannerStructures seeds per-planner ProvidesData state so field tracking +// during the walk can push/pop onto a stable root. Safe to call when no planners +// are configured: the range over a nil slice is a no-op. +func (s *cachingPlannerState) initializePlannerStructures() { + v := s.visitor + for i := range v.planners { + s.plannerObjects[i] = &resolve.Object{ + Fields: []*resolve.Field{}, + } + s.plannerCurrentFields[i] = []objectFields{{ + fields: &s.plannerObjects[i].Fields, + popOnField: -1, + }} + s.plannerResponsePaths[i] = []string{} + } + s.plannerEntityBoundaryPaths = map[int]string{} +} + +// trackFieldForPlanner adds field information to the planner's tracked object structure. +// It handles entity boundary detection, __typename field deduplication, and creates +// the appropriate field value nodes for the planner's representation of the query. +// The caller may pass any plannerID; shouldPlannerHandleField validates bounds and +// ownership in one place. +func (s *cachingPlannerState) trackFieldForPlanner(plannerID int, fieldRef int) { + v := s.visitor + if !v.shouldPlannerHandleField(plannerID, fieldRef) { + return + } + + fieldName := v.Operation.FieldNameBytes(fieldRef) + fieldAliasOrName := v.Operation.FieldAliasOrNameString(fieldRef) + fetchResponseKey := fieldAliasOrName + if fetchAlias, ok := s.fetchAlias(fieldRef); ok { + fetchResponseKey = fetchAlias + } + + // For nested entity fetches, check if this field represents the entity boundary + // If so, we should skip adding this field to ProvidesData and instead add its children + if s.isEntityBoundaryField(plannerID, fieldRef) { + // Create a new object for the entity fields (children of the boundary) + // This ensures entity fields like id, username are added to this object, not the parent + entityObj := &resolve.Object{ + Fields: []*resolve.Field{}, + } + // Push the entity object onto the stack so child fields get added to it + v.Walker.DefferOnEnterField(func() { + s.plannerCurrentFields[plannerID] = append(s.plannerCurrentFields[plannerID], objectFields{ + popOnField: fieldRef, + fields: &entityObj.Fields, + }) + }) + // Replace the root object for this planner with the entity object + // This makes the entity fields the top-level fields in ProvidesData + s.plannerObjects[plannerID] = entityObj + return + } + + // Check if this is a __typename field and if we already have one with the same name and path + if bytes.Equal(fieldName, literal.TYPENAME) && len(s.plannerCurrentFields[plannerID]) > 0 { + currentFields := s.plannerCurrentFields[plannerID][len(s.plannerCurrentFields[plannerID])-1] + + // Check if we already have a __typename field with the same name and path + for _, existingField := range *currentFields.fields { + if bytes.Equal(existingField.Name, []byte(fetchResponseKey)) { + // For __typename fields, the path is [fieldAliasOrName] + // Check if the existing field has the same path + if existingValue, ok := existingField.Value.(*resolve.Scalar); ok { + if len(existingValue.Path) > 0 && existingValue.Path[0] == fetchResponseKey { + // We already have this __typename field with the same name and path, skip it + return + } + } + } + } + } + + fieldDefinition, ok := v.Walker.FieldDefinition(fieldRef) + if !ok { + return + } + fieldType := v.Definition.FieldDefinitionType(fieldDefinition) + + fieldValue := s.createFieldValueForPlanner(fieldType, []string{fetchResponseKey}) + + onTypeNames := v.resolveEntityOnTypeNames(plannerID, fieldRef, fieldName) + + field := &resolve.Field{ + Name: []byte(fetchResponseKey), + Value: fieldValue, + OnTypeNames: onTypeNames, + } + if fetchResponseKey != string(fieldName) { + field.OriginalName = v.Operation.FieldNameBytes(fieldRef) + } + // Capture field arguments for cache suffix computation at resolve time. + // Skip root query fields (Query/Mutation/Subscription) — their args are already + // part of the cache key, and suffixing would break entity key mapping. + if v.Operation.FieldHasArguments(fieldRef) { + enclosingType := v.Walker.EnclosingTypeDefinition.NameString(v.Definition) + if !v.Definition.Index.IsRootOperationTypeNameString(enclosingType) { + field.CacheArgs = s.captureFieldCacheArgs(fieldRef) + } + } + + if len(s.plannerCurrentFields[plannerID]) > 0 { + currentFields := s.plannerCurrentFields[plannerID][len(s.plannerCurrentFields[plannerID])-1] + *currentFields.fields = append(*currentFields.fields, field) + } + + for { + // for loop to unwrap array item + switch node := fieldValue.(type) { + case *resolve.Array: + // unwrap and check type again + fieldValue = node.Item + case *resolve.Object: + // if the field value is an object, add it to the current fields stack + v.Walker.DefferOnEnterField(func() { + s.plannerCurrentFields[plannerID] = append(s.plannerCurrentFields[plannerID], objectFields{ + popOnField: fieldRef, + fields: &node.Fields, + }) + }) + return + default: + // field value is a scalar or null, we don't add it to the stack + return + } + } +} + +// captureFieldCacheArgs extracts argument metadata from a field for cache suffix computation. +// After normalization, all argument values are variable references (e.g., friends(first: $a)). +// We capture the arg name and variable path so the resolve-time suffix can look up actual values. +func (s *cachingPlannerState) captureFieldCacheArgs(fieldRef int) []resolve.CacheFieldArg { + v := s.visitor + argRefs := v.Operation.FieldArguments(fieldRef) + if len(argRefs) == 0 { + return nil + } + args := make([]resolve.CacheFieldArg, 0, len(argRefs)) + for _, argRef := range argRefs { + argName := v.Operation.ArgumentNameString(argRef) + argValue := v.Operation.ArgumentValue(argRef) + if argValue.Kind == ast.ValueKindVariable { + variableName := v.Operation.VariableValueNameString(argValue.Ref) + args = append(args, resolve.CacheFieldArg{ + ArgName: argName, + VariableName: variableName, + }) + } + } + if len(args) == 0 { + return nil + } + // Sort by ArgName for deterministic suffix + slices.SortFunc(args, func(a, b resolve.CacheFieldArg) int { + return cmp.Compare(a.ArgName, b.ArgName) + }) + return args +} + +// createFieldValueForPlanner builds the resolve.Node shape used for ProvidesData +// tracking on a given planner. Unlike resolveFieldValue it does not mutate walker +// state (objects list, currentFields stack, etc.), so it can be invoked from +// trackFieldForPlanner during EnterField without side-effects on the main walk. +func (s *cachingPlannerState) createFieldValueForPlanner(typeRef int, path []string) resolve.Node { + v := s.visitor + ofType := v.Definition.Types[typeRef].OfType + + switch v.Definition.Types[typeRef].TypeKind { + case ast.TypeKindNonNull: + node := s.createFieldValueForPlanner(ofType, path) + // Set nullable to false for the returned node + switch n := node.(type) { + case *resolve.Scalar: + n.Nullable = false + case *resolve.Object: + n.Nullable = false + case *resolve.Array: + n.Nullable = false + } + return node + case ast.TypeKindList: + listItem := s.createFieldValueForPlanner(ofType, nil) + return &resolve.Array{ + Nullable: true, + Path: path, + Item: listItem, + } + case ast.TypeKindNamed: + typeName := v.Definition.ResolveTypeNameString(typeRef) + typeDefinitionNode, ok := v.Definition.Index.FirstNodeByNameStr(typeName) + if !ok { + return &resolve.Null{} + } + switch typeDefinitionNode.Kind { + case ast.NodeKindScalarTypeDefinition, ast.NodeKindEnumTypeDefinition: + return &resolve.Scalar{ + Nullable: true, + Path: path, + } + case ast.NodeKindObjectTypeDefinition, ast.NodeKindInterfaceTypeDefinition, ast.NodeKindUnionTypeDefinition: + // For object types, create a new object that will be populated by child fields + obj := &resolve.Object{ + Nullable: true, + Path: path, + Fields: []*resolve.Field{}, + } + return obj + default: + return &resolve.Null{} + } + default: + return &resolve.Null{} + } +} + +// isEntityBoundaryField checks if this field represents the entity boundary for a nested entity fetch +// For nested entity fetches, the field at the response path boundary should be skipped in ProvidesData +func (s *cachingPlannerState) isEntityBoundaryField(plannerID int, fieldRef int) bool { + v := s.visitor + config := v.planners[plannerID] + fetchConfig := config.ObjectFetchConfiguration() + if fetchConfig == nil || fetchConfig.fetchItem == nil { + return false + } + + // Check if this is a nested fetch (has "." in response path) + if fetchConfig.fetchItem.ResponsePath == "" { + return false // Root fetch, no boundary field to skip + } + + // Determine the root path prefix from the walker path. + // For queries this is "query", for mutations "mutation", for subscriptions "subscription". + currentPath := v.Walker.Path.DotDelimitedString() + rootPrefix := "query" + if idx := strings.IndexByte(currentPath, '.'); idx > 0 { + rootPrefix = currentPath[:idx] + } + responsePath := rootPrefix + "." + fetchConfig.fetchItem.ResponsePath + + // Normalize the response path by removing array index markers (@.) + // e.g., "query.topProducts.@.reviews.@.author" -> "query.topProducts.reviews.author" + normalizedResponsePath := strings.ReplaceAll(responsePath, ".@", "") + + // For nested fetches, check if this field is at the entity boundary + fieldName := v.Operation.FieldAliasOrNameString(fieldRef) + fullFieldPath := currentPath + "." + fieldName + + // Normalize the field path by removing inline fragment type conditions + // e.g., "query.meInterface.$0User.reviews" -> "query.meInterface.reviews" + // The walker path includes $N markers for inline fragments + normalizedFieldPath := s.normalizePathRemovingFragments(fullFieldPath) + + // If this normalized field path matches the normalized response path, it's the entity boundary + if normalizedFieldPath == normalizedResponsePath { + // Store the entity boundary path for this planner (use normalized path) + s.plannerEntityBoundaryPaths[plannerID] = normalizedFieldPath + return true + } + return false +} + +// normalizePathRemovingFragments removes inline fragment type condition markers from the path +// e.g., "query.meInterface.$0User.reviews" -> "query.meInterface.reviews" +// The walker path includes $N markers for inline fragments (e.g., $0User, $1Admin) +var fragmentMarkerRegex = regexp.MustCompile(`\.\$\d+\w+`) + +func (s *cachingPlannerState) normalizePathRemovingFragments(path string) string { + return fragmentMarkerRegex.ReplaceAllString(path, "") +} + +// isEntityRootField checks if this field is at the root of an entity. +// It returns true when the field path is a direct child of the stored entity +// boundary path. The current walker path is normalized (inline-fragment markers +// stripped) before the prefix check — boundary paths are stored normalized by +// isEntityBoundaryField, so comparing a raw path here would miss queries that +// wrap the boundary in an inline fragment such as `... on User { reviews }`. +func (s *cachingPlannerState) isEntityRootField(plannerID int, fieldRef int) bool { + v := s.visitor + boundaryPath, hasBoundary := s.plannerEntityBoundaryPaths[plannerID] + if !hasBoundary { + return false + } + + currentPath := v.Walker.Path.DotDelimitedString() + fieldName := v.Operation.FieldAliasOrNameString(fieldRef) + return s.isEntityRootPath(boundaryPath, currentPath+"."+fieldName) +} + +// isEntityRootPath is the pure, walker-free core of isEntityRootField. It +// normalizes the candidate field path (stripping inline-fragment markers) and +// returns true when that path is a direct child of boundaryPath. Extracted so +// the inline-fragment / fragment-wrapping invariant from A42 can be unit-tested +// without staging a real walker. +func (s *cachingPlannerState) isEntityRootPath(boundaryPath, fullFieldPath string) bool { + normalized := s.normalizePathRemovingFragments(fullFieldPath) + if !strings.HasPrefix(normalized, boundaryPath+".") { + return false + } + return !strings.Contains(strings.TrimPrefix(normalized, boundaryPath+"."), ".") +} + +func (s *cachingPlannerState) popFieldsForPlanner(plannerID int, fieldRef int) { + fields, ok := s.plannerCurrentFields[plannerID] + if !ok { + return + } + + if len(fields) > 0 { + last := len(fields) - 1 + if fields[last].popOnField == fieldRef { + s.plannerCurrentFields[plannerID] = fields[:last] + } + } +} + +// configureSubscriptionEntityCachePopulation determines whether the subscription +// should populate or invalidate L2 cache entries for root entities. +func (s *cachingPlannerState) configureSubscriptionEntityCachePopulation(config *objectFetchConfiguration) { + v := s.visitor + if len(config.rootFields) == 0 { + return + } + + ds := s.findDataSourceByID(config.sourceID) + if ds == nil { + return + } + + fedConfigVal := ds.FederationConfiguration() + fedConfig := &fedConfigVal + if len(fedConfig.SubscriptionEntityPopulation) == 0 { + return + } + + // Get the subscription field's return type from the definition + subscriptionField := config.rootFields[0] + entityTypeName := s.subscriptionFieldReturnTypeName(subscriptionField.TypeName, subscriptionField.FieldName) + if entityTypeName == "" { + return + } + + // Look up subscription entity population config with a 2-tier fallback: + // 1. Exact match: type + field name (disambiguates when multiple subscription fields return the same entity type) + // 2. Union/interface resolution: check member/implementor types + resolvedTypeName, popConfig := s.resolveSubscriptionEntityPopulationConfig(entityTypeName, subscriptionField.FieldName, fedConfig) + if popConfig == nil { + return + } + entityTypeName = resolvedTypeName + // Build EntityQueryCacheKeyTemplate from entity's @key fields + entityKeys := fedConfig.RequiredFieldsByKey(entityTypeName) + if len(entityKeys) == 0 { + return + } + + var objects []*resolve.Object + for _, key := range entityKeys { + node, err := BuildRepresentationVariableNode(v.Definition, key, *fedConfig) + if err != nil { + continue + } + objects = append(objects, node) + } + if len(objects) == 0 { + return + } + + mergedObject := MergeRepresentationVariableNodes(objects) + cacheKeyTemplate := &resolve.EntityQueryCacheKeyTemplate{ + Keys: resolve.NewResolvableObjectVariable(mergedObject), + TypeName: entityTypeName, + } + + // Determine populate vs invalidate mode: + // Check if the subscription selects any non-key fields from this datasource for the entity type + keyFieldNames := s.entityKeyFieldNames(entityKeys) + hasNonKeyFields := s.subscriptionSelectsNonKeyFields(ds, entityTypeName, keyFieldNames) + + mode := resolve.SubscriptionCacheModePopulate + if !hasNonKeyFields { + if popConfig.EnableInvalidationOnKeyOnly { + mode = resolve.SubscriptionCacheModeInvalidate + } else { + // No non-key fields and invalidation not enabled — nothing to do + return + } + } + + // Use the alias (or name if no alias) from the operation AST, because + // resolvable.data uses the response field name (alias) as the JSON key. + subscriptionResponseFieldName := v.Operation.FieldAliasOrNameString(config.fieldRef) + + v.subscription.EntityCachePopulation = &resolve.SubscriptionEntityCachePopulation{ + Mode: mode, + CacheKeyTemplate: cacheKeyTemplate, + CacheName: popConfig.CacheName, + TTL: popConfig.TTL, + IncludeSubgraphHeaderPrefix: popConfig.IncludeSubgraphHeaderPrefix, + DataSourceName: config.sourceName, + SubscriptionFieldName: subscriptionResponseFieldName, + EntityTypeName: entityTypeName, + } +} + +// resolveSubscriptionEntityPopulationConfig performs a 2-tier lookup for subscription +// entity population config: +// 1. Exact match by type name + subscription field name +// 2. Union/interface member resolution (when the subscription returns an abstract type) +// +// Returns the resolved entity type name (may differ from input if an abstract type was +// resolved to a concrete member) and the config. Returns ("", nil) if no match found. +func (s *cachingPlannerState) resolveSubscriptionEntityPopulationConfig(entityTypeName, fieldName string, fedConfig *FederationMetaData) (string, *SubscriptionEntityPopulationConfiguration) { + // Tier 1: exact match on both type and field name + if config := fedConfig.SubscriptionEntityPopulation.FindByTypeAndFieldName(entityTypeName, fieldName); config != nil { + return entityTypeName, config + } + // Tier 2: abstract type resolution — check union members and interface implementors. + if resolvedName, config := s.resolveAbstractEntityPopulation(entityTypeName, fieldName, fedConfig); config != nil { + return resolvedName, config + } + return "", nil +} + +// resolveAbstractEntityPopulation checks if typeName is a union or interface type and +// returns the first member/implementor that has a SubscriptionEntityPopulation config. +func (s *cachingPlannerState) resolveAbstractEntityPopulation(typeName, fieldName string, fedConfig *FederationMetaData) (string, *SubscriptionEntityPopulationConfiguration) { + v := s.visitor + node, exists := v.Definition.Index.FirstNodeByNameStr(typeName) + if !exists { + return "", nil + } + var candidates []string + var ok bool + switch node.Kind { + case ast.NodeKindUnionTypeDefinition: + candidates, ok = v.Definition.UnionTypeDefinitionMemberTypeNames(node.Ref) + case ast.NodeKindInterfaceTypeDefinition: + candidates, ok = v.Definition.InterfaceTypeDefinitionImplementedByObjectWithNames(node.Ref) + default: + return "", nil + } + if !ok { + return "", nil + } + for _, name := range candidates { + if cfg := fedConfig.SubscriptionEntityPopulation.FindByTypeAndFieldName(name, fieldName); cfg != nil { + return name, cfg + } + } + return "", nil +} + +// subscriptionFieldReturnTypeName returns the named return type of a subscription field. +func (s *cachingPlannerState) subscriptionFieldReturnTypeName(typeName, fieldName string) string { + v := s.visitor + node, exists := v.Definition.Index.FirstNodeByNameStr(typeName) + if !exists { + return "" + } + if node.Kind != ast.NodeKindObjectTypeDefinition { + return "" + } + for _, fieldDefRef := range v.Definition.ObjectTypeDefinitions[node.Ref].FieldsDefinition.Refs { + if v.Definition.FieldDefinitionNameString(fieldDefRef) == fieldName { + return v.Definition.FieldDefinitionTypeNameString(fieldDefRef) + } + } + return "" +} + +// entityKeyFieldNames extracts top-level field names from @key configurations. +// It walks the parsed field-set AST so nested keys like "org { id }" correctly +// yield only "org" rather than the previous superset {"org", "id"}. +func (s *cachingPlannerState) entityKeyFieldNames(keys []FederationFieldConfiguration) map[string]struct{} { + result := make(map[string]struct{}) + for i := range keys { + if err := keys[i].parseSelectionSet(); err != nil { + continue + } + doc := keys[i].parsedSelectionSet + if doc == nil || len(doc.FragmentDefinitions) == 0 { + continue + } + + selectionSetRef := doc.FragmentDefinitions[0].SelectionSet + for _, fieldRef := range doc.SelectionSetFieldRefs(selectionSetRef) { + fieldName := doc.FieldNameString(fieldRef) + if fieldName == "" { + continue + } + result[fieldName] = struct{}{} + } + } + return result +} + +// subscriptionSelectsNonKeyFields checks if the operation selects any fields +// from the given datasource for the entity type that are NOT @key fields. +// It iterates the fieldEnclosingTypeNames map (already narrowed to fields we +// have type info for) rather than every operation field ref. +func (s *cachingPlannerState) subscriptionSelectsNonKeyFields(ds DataSource, entityTypeName string, keyFieldNames map[string]struct{}) bool { + v := s.visitor + for fieldRef, enclosingType := range v.fieldEnclosingTypeNames { + if enclosingType != entityTypeName { + continue + } + opFieldName := v.Operation.FieldNameString(fieldRef) + if opFieldName == "__typename" { + continue + } + if _, isKey := keyFieldNames[opFieldName]; isKey { + continue + } + if ds.HasChildNode(entityTypeName, opFieldName) || ds.HasRootNode(entityTypeName, opFieldName) { + return true + } + } + return false +} + +// configureFetchCaching determines the cache configuration for a fetch. +// For entity fetches, it looks up per-entity configuration from FederationMetaData. +// Returns disabled caching if no configuration exists or if caching is globally disabled. +func (s *cachingPlannerState) configureFetchCaching(internal *objectFetchConfiguration, external resolve.FetchConfiguration) resolve.FetchCacheConfiguration { + v := s.visitor + // Populate ProvidesData on requestScoped fields using the planner's response + // Object tree. This enables alias-aware normalization/denormalization (same + // pipeline as entity L1 / L2 caches). Fields without aliases or args get a + // fast path via Object.HasAliases. + plannerObj := s.plannerObjects[internal.fetchID] + requestScopedFields := s.populateRequestScopedFieldsProvidesData(external.Caching.RequestScopedFields, plannerObj) + + // Always preserve CacheKeyTemplate for L1 cache - L1 cache works independently of L2 cache. + // The Enabled flag controls L2 cache only, not L1 cache. + // L1 cache uses CacheKeyTemplate.Keys and is controlled by ctx.ExecutionOptions.Caching.EnableL1Cache. + // UseL1Cache defaults to false - the postprocessor (optimizeL1Cache) will enable it when beneficial. + result := resolve.FetchCacheConfiguration{ + CacheKeyTemplate: external.Caching.CacheKeyTemplate, + RootFieldL1EntityCacheKeyTemplates: external.Caching.RootFieldL1EntityCacheKeyTemplates, + RequestScopedFields: requestScopedFields, + } + if rootTemplate, ok := external.Caching.CacheKeyTemplate.(*resolve.RootQueryCacheKeyTemplate); ok { + result.BatchEntityKeyArgumentPathHint = rootTemplate.BatchEntityKeyArgumentPath() + } + + // For mutations returning cached entities: enable mutation impact detection. + // This runs before the L2 caching checks because mutations don't have CacheKeyTemplate + // (they go through a separate path), but we still want to annotate the fetch for + // runtime mutation impact detection. + if internal.operationType == ast.OperationTypeMutation && len(internal.rootFields) > 0 { + if !v.Config.DisableEntityCaching { + s.configureMutationEntityImpact(internal, &result) + } + // Look up per-mutation-field cache config from the subgraph that owns the mutation + ds := s.findDataSourceByID(internal.sourceID) + if ds != nil { + if mutConfig := ds.MutationFieldCacheConfig(internal.rootFields[0].FieldName); mutConfig != nil { + result.EnableMutationL2CachePopulation = mutConfig.EnableEntityL2CachePopulation + result.MutationCacheTTLOverride = mutConfig.TTL + } + } + } + + // Global disable takes precedence for L2 cache + if v.Config.DisableEntityCaching { + return result + } + + // No cache key template = caching not applicable + if external.Caching.CacheKeyTemplate == nil { + return result + } + + // Must have at least 1 root field to determine cache config + if len(internal.rootFields) == 0 { + return result + } + + // Find the datasource by ID to access FederationMetaData + ds := s.findDataSourceByID(internal.sourceID) + if ds == nil { + return result + } + + fedConfig := ds.FederationConfiguration() + + // Check if this is an entity fetch or a root field fetch + if external.RequiresEntityFetch || external.RequiresEntityBatchFetch { + // Entity fetch: look up cache config for the entity type + // All root fields in an entity fetch belong to the same entity type + entityTypeName := internal.rootFields[0].TypeName + cacheConfig := fedConfig.EntityCacheConfig(entityTypeName) + + // Extract key fields from cache key template (plan time) + var keyFields []resolve.KeyField + if entityTemplate, ok := external.Caching.CacheKeyTemplate.(*resolve.EntityQueryCacheKeyTemplate); ok { + keyFields = entityTemplate.KeyFields() + } + + if cacheConfig == nil { + // No config = L2 caching disabled for this entity (opt-in model) + // L1 cache can still work since CacheKeyTemplate is preserved + // Still provide key fields for analytics + result.KeyFields = keyFields + return result + } + + // L2 cache is enabled for this entity type + // UseL1Cache is set by the postprocessor (optimizeL1Cache) when beneficial + return resolve.FetchCacheConfiguration{ + Enabled: true, + CacheName: cacheConfig.CacheName, + TTL: cacheConfig.TTL, + CacheKeyTemplate: external.Caching.CacheKeyTemplate, + IncludeSubgraphHeaderPrefix: cacheConfig.IncludeSubgraphHeaderPrefix, + EnablePartialCacheLoad: cacheConfig.EnablePartialCacheLoad, + HashAnalyticsKeys: cacheConfig.HashAnalyticsKeys, + KeyFields: keyFields, + ShadowMode: cacheConfig.ShadowMode, + NegativeCacheTTL: cacheConfig.NegativeCacheTTL, + BatchEntityKeyArgumentPathHint: result.BatchEntityKeyArgumentPathHint, + // Preserve requestScoped hints/exports through the entity-cache-enabled path. + RequestScopedFields: requestScopedFields, + } + } + + // Root field fetch: find common cache config for all root fields + // All root fields in the fetch must have the same cache config for L2 caching to be enabled + + // Root field caching only applies to queries - mutations and subscriptions + // should never cache root field responses in L2 (they would never be read). + if internal.operationType != ast.OperationTypeQuery { + return result + } + + var commonConfig *RootFieldCacheConfiguration + for i := range internal.rootFields { + rootField := internal.rootFields[i] + cacheConfig := fedConfig.RootFieldCacheConfig(rootField.TypeName, rootField.FieldName) + if cacheConfig == nil { + // No config for this field = L2 caching disabled for this fetch + return result + } + if commonConfig == nil { + commonConfig = cacheConfig + } else { + // Check if config matches the common config + if commonConfig.CacheName != cacheConfig.CacheName || + commonConfig.TTL != cacheConfig.TTL || + commonConfig.IncludeSubgraphHeaderPrefix != cacheConfig.IncludeSubgraphHeaderPrefix { + // Different configs = can't enable L2 caching for this fetch + return result + } + } + } + + if commonConfig == nil { + return result + } + + // L2 cache is enabled - all root fields have the same cache config + // UseL1Cache is set by the postprocessor (optimizeL1Cache) when beneficial + return resolve.FetchCacheConfiguration{ + Enabled: true, + CacheName: commonConfig.CacheName, + TTL: commonConfig.TTL, + CacheKeyTemplate: external.Caching.CacheKeyTemplate, + IncludeSubgraphHeaderPrefix: commonConfig.IncludeSubgraphHeaderPrefix, + RootFieldL1EntityCacheKeyTemplates: external.Caching.RootFieldL1EntityCacheKeyTemplates, + ShadowMode: commonConfig.ShadowMode, + PartialBatchLoad: commonConfig.PartialBatchLoad, + BatchEntityKeyArgumentPathHint: result.BatchEntityKeyArgumentPathHint, + // Preserve requestScoped fields through the L2-enabled root field path. + RequestScopedFields: requestScopedFields, + } +} + +// populateRequestScopedFieldsProvidesData fills in ProvidesData by locating the +// matching sub-Object in the planner's response tree. The match is by response +// key (field.Name), since the datasource planner already resolves aliases. +// +// If plannerObj is nil or no matching field is found, ProvidesData is left nil +// (resolver falls back to raw byte storage, loses alias awareness). +func (s *cachingPlannerState) populateRequestScopedFieldsProvidesData(fields []resolve.RequestScopedField, plannerObj *resolve.Object) []resolve.RequestScopedField { + if len(fields) == 0 || plannerObj == nil { + return fields + } + out := make([]resolve.RequestScopedField, len(fields)) + for i, f := range fields { + out[i] = f + sub := s.findObjectFieldByResponseKey(plannerObj, f.FieldName) + if sub != nil { + resolve.ComputeHasAliases(sub) + out[i].ProvidesData = sub + } + } + return out +} + +// findObjectFieldByResponseKey walks the Object's top-level fields looking for one +// whose response key (field.Name) matches, and returns its value Object (if the +// value is an Object). Returns nil if not found or if the value is not an Object. +func (s *cachingPlannerState) findObjectFieldByResponseKey(obj *resolve.Object, responseKey string) *resolve.Object { + if obj == nil { + return nil + } + for _, field := range obj.Fields { + if string(field.Name) == responseKey { + if sub, ok := field.Value.(*resolve.Object); ok { + return sub + } + return nil + } + } + return nil +} + +// findDataSourceByID finds the datasource configuration for a given source ID +func (s *cachingPlannerState) findDataSourceByID(sourceID string) DataSource { + v := s.visitor + for i := range v.Config.DataSources { + if v.Config.DataSources[i].Id() == sourceID { + return v.Config.DataSources[i] + } + } + return nil +} + +// configureMutationEntityImpact checks if a mutation returns a cached entity and annotates +// the fetch config with MutationEntityImpactConfig for runtime cache staleness detection. +func (s *cachingPlannerState) configureMutationEntityImpact(internal *objectFetchConfiguration, result *resolve.FetchCacheConfiguration) { + returnTypeName := s.resolveMutationReturnType(internal.fieldDefinitionRef) + if returnTypeName == "" { + return + } + + ds := s.findDataSourceByID(internal.sourceID) + if ds == nil { + return + } + + fedConfig := ds.FederationConfiguration() + entityCacheConfig := fedConfig.EntityCacheConfig(returnTypeName) + if entityCacheConfig == nil { + return + } + + // Merge key fields from ALL @key configurations so entities with multiple keys + // keep every invalidation-relevant field (top-level fields deduped by name). + keyConfigs := fedConfig.RequiredFieldsByKey(returnTypeName) + keyFields := extractKeyFields(keyConfigs, returnTypeName) + + result.MutationEntityImpactConfig = &resolve.MutationEntityImpactConfig{ + EntityTypeName: returnTypeName, + KeyFields: keyFields, + CacheName: entityCacheConfig.CacheName, + IncludeSubgraphHeaderPrefix: entityCacheConfig.IncludeSubgraphHeaderPrefix, + } + + // Check if this specific mutation field is configured for cache invalidation + // or populate. A field is annotated with one or the other in composition. + if len(internal.rootFields) > 0 { + mutationFieldName := internal.rootFields[0].FieldName + if fedConfig.MutationCacheInvalidationConfig(mutationFieldName) != nil { + result.MutationEntityImpactConfig.InvalidateCache = true + } + // `@cachePopulate` arrives via MutationFieldCacheConfig with EnableEntityL2CachePopulation. + // The flag was originally added to thread the populate intent through to follow-up entity + // fetches in federated mutations; here we extend it to single-subgraph mutations where the + // entity is returned directly and there is no follow-up fetch to inherit it. + if mutCfg := fedConfig.MutationFieldCacheConfig(mutationFieldName); mutCfg != nil && mutCfg.EnableEntityL2CachePopulation { + result.MutationEntityImpactConfig.PopulateCache = true + result.MutationEntityImpactConfig.PopulateTTL = mutCfg.TTL + } + } +} + +// resolveMutationReturnType resolves the return type name of a mutation field definition. +func (s *cachingPlannerState) resolveMutationReturnType(fieldDefinitionRef int) string { + v := s.visitor + if fieldDefinitionRef < 0 { + return "" + } + typeRef := v.Definition.FieldDefinitionType(fieldDefinitionRef) + underlyingType := v.Definition.ResolveUnderlyingType(typeRef) + if underlyingType != -1 { + return v.Definition.ResolveTypeNameString(underlyingType) + } + return v.Definition.ResolveTypeNameString(typeRef) +} + +// entityCacheAnalytics returns the ObjectCacheAnalytics for a given type name. +// Uses a lazy cache to avoid repeated scans across datasources. +// Returns nil if the type is not an entity. +func (s *cachingPlannerState) entityCacheAnalytics(typeName string) *resolve.ObjectCacheAnalytics { + if s.entityAnalyticsCache == nil { + s.entityAnalyticsCache = make(map[string]*resolve.ObjectCacheAnalytics) + } + if cached, ok := s.entityAnalyticsCache[typeName]; ok { + return cached // may be nil (not entity) + } + + // Scan all datasources for this entity type + for i := range s.visitor.Config.DataSources { + ds := s.visitor.Config.DataSources[i] + fedConfig := ds.FederationConfiguration() + if !fedConfig.HasEntity(typeName) { + continue + } + // Extract full key structure from @key SelectionSets + keys := fedConfig.Keys.FilterByTypeAndResolvability(typeName, true) + keyFields := extractKeyFields(keys, typeName) + // Get hash mode from entity cache config (default false) + var hashKeys bool + if cacheConfig := fedConfig.EntityCacheConfig(typeName); cacheConfig != nil { + hashKeys = cacheConfig.HashAnalyticsKeys + } + result := &resolve.ObjectCacheAnalytics{ + KeyFields: keyFields, + HashKeys: hashKeys, + } + s.entityAnalyticsCache[typeName] = result + return result + } + + s.entityAnalyticsCache[typeName] = nil // not an entity + return nil +} + +// polymorphicEntityCacheAnalytics returns per-concrete-type cache analytics for an +// interface/union object. Returns nil when none of the possible types is an entity +// (so the caller can assign unconditionally). +func (s *cachingPlannerState) polymorphicEntityCacheAnalytics(possibleTypes map[string]struct{}) *resolve.ObjectCacheAnalytics { + byTypeName := make(map[string]*resolve.ObjectCacheAnalytics, len(possibleTypes)) + for possibleType := range possibleTypes { + if analytics := s.entityCacheAnalytics(possibleType); analytics != nil { + byTypeName[possibleType] = analytics + } + } + if len(byTypeName) == 0 { + return nil + } + return &resolve.ObjectCacheAnalytics{ByTypeName: byTypeName} +} + +// extractKeyFields extracts the full structured key from @key SelectionSets. +// Merges all @key directives for the type, deduplicating top-level names. +func extractKeyFields(keys []FederationFieldConfiguration, typeName string) []resolve.KeyField { + var result []resolve.KeyField + seen := make(map[string]struct{}) + for i := range keys { + if keys[i].TypeName != typeName || keys[i].FieldName != "" { + continue + } + for _, kf := range resolve.ParseKeyFields(keys[i].SelectionSet) { + if kf.Name == "__typename" { + continue + } + if _, ok := seen[kf.Name]; !ok { + seen[kf.Name] = struct{}{} + result = append(result, kf) + } + } + } + return result +} diff --git a/v2/pkg/engine/plan/configuration.go b/v2/pkg/engine/plan/configuration.go index eebd9df352..a2f05fbd1c 100644 --- a/v2/pkg/engine/plan/configuration.go +++ b/v2/pkg/engine/plan/configuration.go @@ -47,6 +47,12 @@ type Configuration struct { // This option requires BuildFetchReasons set to true. ValidateRequiredExternalFields bool + // DisableEntityCaching disables planning of L2 entity caching metadata and mutation-impact logic. + // L1 cache templates are still generated regardless of this setting. + DisableEntityCaching bool + // DisableFetchProvidesData disables planning of meta information about which fields are provided by a fetch + DisableFetchProvidesData bool + // ComputeCosts enables static cost computation for operations. ComputeCosts bool diff --git a/v2/pkg/engine/plan/datasource_configuration.go b/v2/pkg/engine/plan/datasource_configuration.go index 0601e3abfb..470e871748 100644 --- a/v2/pkg/engine/plan/datasource_configuration.go +++ b/v2/pkg/engine/plan/datasource_configuration.go @@ -343,6 +343,22 @@ func (d *dataSourceConfiguration[T]) FederationConfiguration() FederationMetaDat return d.FederationMetaData } +func (d *dataSourceConfiguration[T]) EntityCacheConfig(typeName string) *EntityCacheConfiguration { + return d.FederationMetaData.EntityCacheConfig(typeName) +} + +func (d *dataSourceConfiguration[T]) RootFieldCacheConfig(typeName, fieldName string) *RootFieldCacheConfiguration { + return d.FederationMetaData.RootFieldCacheConfig(typeName, fieldName) +} + +func (d *dataSourceConfiguration[T]) MutationCacheInvalidationConfig(fieldName string) *MutationCacheInvalidationConfiguration { + return d.FederationMetaData.MutationCacheInvalidationConfig(fieldName) +} + +func (d *dataSourceConfiguration[T]) MutationFieldCacheConfig(fieldName string) *MutationFieldCacheConfiguration { + return d.FederationMetaData.MutationFieldCacheConfig(fieldName) +} + func (d *dataSourceConfiguration[T]) Hash() DSHash { return d.hash } diff --git a/v2/pkg/engine/plan/datasource_filter_visitor_test.go b/v2/pkg/engine/plan/datasource_filter_visitor_test.go index c385c23d26..0b8751f074 100644 --- a/v2/pkg/engine/plan/datasource_filter_visitor_test.go +++ b/v2/pkg/engine/plan/datasource_filter_visitor_test.go @@ -10,14 +10,16 @@ import ( "github.com/stretchr/testify/assert" "github.com/wundergraph/graphql-go-tools/v2/pkg/astvalidation" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" "github.com/wundergraph/graphql-go-tools/v2/pkg/internal/unsafeparser" "github.com/wundergraph/graphql-go-tools/v2/pkg/operationreport" "github.com/wundergraph/graphql-go-tools/v2/pkg/testing/permutations" ) type dsBuilder struct { - ds *dataSourceConfiguration[any] - behavior *DataSourcePlanningBehavior + ds *dataSourceConfiguration[any] + behavior *DataSourcePlanningBehavior + cacheKeyTemplate resolve.CacheKeyTemplate } func dsb() *dsBuilder { @@ -64,11 +66,17 @@ func (b *dsBuilder) WithBehavior(behavior DataSourcePlanningBehavior) *dsBuilder return b } +func (b *dsBuilder) CacheKeyTemplate(t resolve.CacheKeyTemplate) *dsBuilder { + b.cacheKeyTemplate = t + return b +} + func (b *dsBuilder) Schema(schema string) *dsBuilder { def := unsafeparser.ParseGraphqlDocumentString(schema) b.ds.factory = &FakeFactory[any]{ - upstreamSchema: &def, - behavior: b.behavior, + upstreamSchema: &def, + behavior: b.behavior, + cacheKeyTemplate: b.cacheKeyTemplate, } return b } @@ -76,8 +84,9 @@ func (b *dsBuilder) Schema(schema string) *dsBuilder { func (b *dsBuilder) SchemaMergedWithBase(schema string) *dsBuilder { def := unsafeparser.ParseGraphqlDocumentStringWithBaseSchema(schema) b.ds.factory = &FakeFactory[any]{ - upstreamSchema: &def, - behavior: b.behavior, + upstreamSchema: &def, + behavior: b.behavior, + cacheKeyTemplate: b.cacheKeyTemplate, } return b } @@ -101,6 +110,11 @@ func (b *dsBuilder) Id(id string) *dsBuilder { b.ds.id = id return b } + +func (b *dsBuilder) Name(name string) *dsBuilder { + b.ds.name = name + return b +} func (b *dsBuilder) DS() DataSource { if err := b.ds.DataSourceMetadata.Init(); err != nil { panic(err) diff --git a/v2/pkg/engine/plan/federation_metadata.go b/v2/pkg/engine/plan/federation_metadata.go index 748b507415..f9f513c9dc 100644 --- a/v2/pkg/engine/plan/federation_metadata.go +++ b/v2/pkg/engine/plan/federation_metadata.go @@ -4,16 +4,39 @@ import ( "encoding/json" "fmt" "slices" + "time" "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" ) +// RequestScopedField declares a field whose value resolves to the same value for +// all other fields in the same subgraph that share the same L1Key (all fields with +// @requestScoped(key: "X") have L1Key = "{subgraphName}.X"). The directive is +// purely symmetric — there is no receiver/provider distinction. Every field that +// participates is both: +// - A reader (the planner emits a hint so the resolver can inject from L1) +// - A writer (the planner emits an export so the resolver stores the value after fetch) +// +// The first field to resolve populates L1; subsequent fields inject from L1 and can +// skip their fetch when all required sub-fields are present. +type RequestScopedField struct { + FieldName string // field name, e.g. "currentViewer" + TypeName string // enclosing type name, e.g. "Personalized" or "Query" + L1Key string // L1 cache key, format "{subgraphName}.{key}" +} + type FederationMetaData struct { - Keys FederationFieldConfigurations - Requires FederationFieldConfigurations - Provides FederationFieldConfigurations - EntityInterfaces []EntityInterfaceConfiguration - InterfaceObjects []EntityInterfaceConfiguration + Keys FederationFieldConfigurations + Requires FederationFieldConfigurations + Provides FederationFieldConfigurations + EntityInterfaces []EntityInterfaceConfiguration + InterfaceObjects []EntityInterfaceConfiguration + EntityCaching EntityCacheConfigurations + RootFieldCaching RootFieldCacheConfigurations + MutationFieldCaching MutationFieldCacheConfigurations + SubscriptionEntityPopulation SubscriptionEntityPopulationConfigurations + MutationCacheInvalidation MutationCacheInvalidationConfigurations + RequestScopedFields []RequestScopedField entityTypeNames map[string]struct{} } @@ -26,6 +49,10 @@ type FederationInfo interface { HasInterfaceObject(typeName string) bool HasEntityInterface(typeName string) bool EntityInterfaceNames() []string + EntityCacheConfig(typeName string) *EntityCacheConfiguration + RootFieldCacheConfig(typeName, fieldName string) *RootFieldCacheConfiguration + MutationCacheInvalidationConfig(fieldName string) *MutationCacheInvalidationConfiguration + MutationFieldCacheConfig(fieldName string) *MutationFieldCacheConfiguration } func (d *FederationMetaData) HasKeyRequirement(typeName, requiresFields string) bool { @@ -74,6 +101,292 @@ type EntityInterfaceConfiguration struct { ConcreteTypeNames []string } +// EntityCacheConfiguration defines L2 caching behavior for a specific entity type. +// This configuration is subgraph-local: each subgraph configures caching for entities it provides. +// Caching is opt-in: entities without configuration will not be cached in L2. +type EntityCacheConfiguration struct { + // TypeName is the GraphQL type name of the entity to cache (e.g., "User", "Product"). + // This must match the __typename returned by the subgraph for _entities queries. + TypeName string `json:"type_name"` + + // CacheName identifies which LoaderCache instance to use for storing this entity. + // Multiple entity types can share a cache by using the same CacheName. + // The cache name must be registered in the Loader's caches map at runtime. + CacheName string `json:"cache_name"` + + // TTL (Time To Live) specifies how long cached entities remain valid. + // After TTL expires, the next request will fetch fresh data from the subgraph. + // A zero TTL means entries never expire (not recommended for production). + TTL time.Duration `json:"ttl"` + + // IncludeSubgraphHeaderPrefix controls whether forwarded headers affect cache keys. + // When true, cache keys include a hash of the headers sent to the subgraph, + // ensuring different header configurations (e.g., different auth tokens) use + // separate cache entries. Set to true when subgraph responses vary by headers. + IncludeSubgraphHeaderPrefix bool `json:"include_subgraph_header_prefix"` + + // EnablePartialCacheLoad enables fetching only cache-missed entities from the subgraph. + // Default behavior (false): If ANY entity in a batch is missing from cache, ALL entities + // are fetched from the subgraph. This keeps the cache fresh but may overfetch. + // When enabled (true): Only missing entities are fetched; cached entities are served + // directly from cache. This reduces subgraph load but cached entities may become stale + // within their TTL window. Use when cache freshness is acceptable within TTL bounds. + EnablePartialCacheLoad bool `json:"enable_partial_cache_load"` + + // HashAnalyticsKeys controls whether entity keys are hashed (true) or stored raw (false) + // in cache analytics EntityFieldHash entries. When true, KeyHash is populated instead of KeyRaw. + HashAnalyticsKeys bool `json:"hash_analytics_keys"` + + // ShadowMode enables shadow caching for this entity type. + // When true, L2 cache reads and writes still occur, but cached data is never served. + // Instead, fresh data is always fetched from the subgraph and compared against the cached value + // to detect staleness. L1 cache works normally (not affected by shadow mode). + ShadowMode bool `json:"shadow_mode"` + + // NegativeCacheTTL is the TTL for caching null entity results (entity not found). + // When > 0, null responses (entity returned null without errors from _entities) are cached + // as negative sentinels to avoid repeated subgraph lookups for non-existent entities. + // When 0 (default), null entities are not cached and will be re-fetched on every request. + NegativeCacheTTL time.Duration `json:"negative_cache_ttl,omitzero"` +} + +// EntityCacheConfigurations is a collection of entity cache configurations. +type EntityCacheConfigurations []EntityCacheConfiguration + +// FindByTypeName returns the cache configuration for the given entity type. +// Returns nil if no configuration exists (caching disabled for this entity). +func (c EntityCacheConfigurations) FindByTypeName(typeName string) *EntityCacheConfiguration { + for i := range c { + if c[i].TypeName == typeName { + return &c[i] + } + } + return nil +} + +// RootFieldCacheConfiguration defines L2 caching behavior for a specific root field. +// This configuration is subgraph-local: each subgraph configures caching for root fields it provides. +type RootFieldCacheConfiguration struct { + // TypeName is the type containing the field (e.g., "Query", "Mutation") + TypeName string `json:"type_name"` + // FieldName is the name of the root field to cache (e.g., "topProducts", "me") + FieldName string `json:"field_name"` + // CacheName is the name of the cache to use (maps to LoaderCache instances) + CacheName string `json:"cache_name"` + // TTL is the time-to-live for cached responses + TTL time.Duration `json:"ttl"` + // IncludeSubgraphHeaderPrefix indicates if forwarded headers affect cache key. + // When true, different header values result in different cache keys. + IncludeSubgraphHeaderPrefix bool `json:"include_subgraph_header_prefix"` + // EntityKeyMappings configures derived entity cache keys for this root field. + // When set, the L2 cache key uses entity key format instead of root field format, + // enabling cache sharing between root field queries and entity fetches. + EntityKeyMappings []EntityKeyMapping `json:"entity_key_mappings,omitempty"` + + // ShadowMode enables shadow caching for this root field. + // When true, L2 cache reads and writes still occur, but cached data is never served. + // Instead, fresh data is always fetched from the subgraph and compared against the cached value. + // Note: shadow mode behavior is currently implemented for entity fetches only. + ShadowMode bool `json:"shadow_mode"` + + // PartialBatchLoad enables partial fetch mode for batch arguments (ArgumentIsEntityKey + list). + // When false (default), batch cache is all-or-nothing: any miss fetches the full list. + // When true, only missing IDs are fetched; cached entities are served directly. + PartialBatchLoad bool `json:"partial_batch_load,omitempty"` +} + +// EntityKeyMapping defines how a root field's arguments map to entity @key fields. +// When configured, the root field's L2 cache key uses the entity key format +// (e.g., {"__typename":"User","key":{"id":"123"}}) instead of the root field format. +// This enables cache sharing between root field queries and entity fetches. +type EntityKeyMapping struct { + // EntityTypeName is the entity type returned by the root field (e.g., "User") + EntityTypeName string `json:"entity_type_name"` + // FieldMappings maps entity @key fields to root field arguments + FieldMappings []FieldMapping `json:"field_mappings"` +} + +// FieldMapping maps an entity @key field to a root field argument path. +type FieldMapping struct { + // EntityKeyField is the @key field name on the entity (e.g., "id") + EntityKeyField string `json:"entity_key_field"` + // ArgumentPath is the path into ctx.Variables to extract the argument value. + // Uses the same []string format as ContextVariable.Path. + // Object keys: ["id"], ["input", "userId"] + // Array index: ["ids", "0"] (decimal string) + // Subject to ctx.RemapVariables when len==1 + ArgumentPath []string `json:"argument_path"` + // ArgumentIsEntityKey marks the argument as a direct entity key lookup. + // When true AND the argument is a list type, each list element maps 1:1 + // to an entity in the response (positional correspondence). + // This enables: + // - Batch cache key construction (one cache key per list element) + // - Empty list optimization ([] → empty response, resolver skipped) + // - Partial fetch mode (fetch only missing entities by filtering the list) + // When false, the argument is treated as a filter/search parameter and + // the engine cannot make assumptions about the response shape. + ArgumentIsEntityKey bool `json:"argument_is_entity_key,omitempty"` +} + +// RootFieldCacheConfigurations is a collection of root field cache configurations. +type RootFieldCacheConfigurations []RootFieldCacheConfiguration + +// FindByTypeAndField returns the cache configuration for the given type and field. +// Returns nil if no configuration exists (caching disabled for this root field). +func (c RootFieldCacheConfigurations) FindByTypeAndField(typeName, fieldName string) *RootFieldCacheConfiguration { + for i := range c { + if c[i].TypeName == typeName && c[i].FieldName == fieldName { + return &c[i] + } + } + return nil +} + +// MutationFieldCacheConfiguration controls cache behavior for entity fetches +// triggered by a specific mutation root field. The subgraph that owns the mutation +// field decides whether entity data fetched during that mutation populates L2. +type MutationFieldCacheConfiguration struct { + // FieldName is the mutation root field name (e.g., "addReview", "deleteUser"). + FieldName string `json:"field_name"` + // EnableEntityL2CachePopulation allows entity fetches triggered by this + // mutation to write to the L2 cache. Mutations always skip L2 reads + // (existing behavior). By default, mutations do NOT populate L2. + // Set to true to opt in to L2 cache population for this mutation field. + EnableEntityL2CachePopulation bool `json:"enable_entity_l2_cache_population"` + // TTL overrides the entity's default cache TTL for L2 writes triggered by this mutation. + // When zero, the entity's default TTL (from EntityCacheConfiguration) is used. + TTL time.Duration `json:"ttl,omitempty"` +} + +// MutationFieldCacheConfigurations is a collection of mutation field cache configurations. +type MutationFieldCacheConfigurations []MutationFieldCacheConfiguration + +// FindByFieldName returns the mutation field cache config for the given field name. +// Returns nil if no configuration exists. +func (c MutationFieldCacheConfigurations) FindByFieldName(fieldName string) *MutationFieldCacheConfiguration { + for i := range c { + if c[i].FieldName == fieldName { + return &c[i] + } + } + return nil +} + +// SubscriptionEntityPopulationConfiguration defines how a subscription should +// manage L2 cache entries for root entities received via subscription events. +// +// Two modes are supported: +// - Populate: When the subscription selects entity fields beyond @key, write those +// fields to L2 on each event. This allows subsequent queries to hit the L2 cache. +// - Invalidate: When the subscription only provides @key fields (and +// EnableInvalidationOnKeyOnly is true), DELETE the L2 cache entry on each event. +// This ensures stale data is evicted when the entity changes. +type SubscriptionEntityPopulationConfiguration struct { + // TypeName is the entity type managed by this subscription (e.g., "Product"). + TypeName string `json:"type_name"` + // FieldName is the subscription root field name (e.g., "itemCreated"). + // Used to disambiguate when multiple subscription fields return the same entity type. + FieldName string `json:"field_name,omitempty"` + // CacheName identifies which LoaderCache instance to use. + CacheName string `json:"cache_name"` + // TTL is the time-to-live for populated cache entries. + TTL time.Duration `json:"ttl"` + // IncludeSubgraphHeaderPrefix controls whether forwarded headers affect cache keys. + IncludeSubgraphHeaderPrefix bool `json:"include_subgraph_header_prefix"` + // EnableInvalidationOnKeyOnly: when true and the subscription only provides + // @key fields (no additional entity fields), DELETE the L2 cache entry on + // each subscription event instead of populating it. + EnableInvalidationOnKeyOnly bool `json:"enable_invalidation_on_key_only"` +} + +// SubscriptionEntityPopulationConfigurations is a collection of subscription entity population configurations. +type SubscriptionEntityPopulationConfigurations []SubscriptionEntityPopulationConfiguration + +// FindByTypeAndFieldName returns the subscription entity population config matching +// both the entity type name and subscription field name. Returns nil if no match. +func (c SubscriptionEntityPopulationConfigurations) FindByTypeAndFieldName(typeName, fieldName string) *SubscriptionEntityPopulationConfiguration { + for i := range c { + if c[i].TypeName == typeName && c[i].FieldName == fieldName { + return &c[i] + } + } + return nil +} + +// MutationCacheInvalidationConfiguration defines which mutation fields should +// invalidate (delete) L2 cache entries for the entity they return. +type MutationCacheInvalidationConfiguration struct { + // FieldName is the mutation field name (e.g., "updateUser", "deleteUser"). + FieldName string `json:"field_name"` + // EntityTypeName is the return entity type (e.g., "User"). + // If empty, it is inferred from the mutation return type at plan time. + EntityTypeName string `json:"entity_type_name,omitempty"` +} + +// MutationCacheInvalidationConfigurations is a collection of mutation cache invalidation configurations. +type MutationCacheInvalidationConfigurations []MutationCacheInvalidationConfiguration + +// FindByFieldName returns the invalidation config for the given mutation field. +// Returns nil if no configuration exists (no invalidation for this field). +func (c MutationCacheInvalidationConfigurations) FindByFieldName(fieldName string) *MutationCacheInvalidationConfiguration { + for i := range c { + if c[i].FieldName == fieldName { + return &c[i] + } + } + return nil +} + +// EntityCacheConfig returns the cache configuration for the given entity type. +// Returns nil if no configuration exists (caching is not configured for this entity). +func (d *FederationMetaData) EntityCacheConfig(typeName string) *EntityCacheConfiguration { + return d.EntityCaching.FindByTypeName(typeName) +} + +// RootFieldCacheConfig returns the cache configuration for the given root field. +// Returns nil if no configuration exists (caching is not configured for this root field). +func (d *FederationMetaData) RootFieldCacheConfig(typeName, fieldName string) *RootFieldCacheConfiguration { + return d.RootFieldCaching.FindByTypeAndField(typeName, fieldName) +} + +// MutationCacheInvalidationConfig returns the invalidation config for the given mutation field. +// Returns nil if no configuration exists (no invalidation for this field). +func (d *FederationMetaData) MutationCacheInvalidationConfig(fieldName string) *MutationCacheInvalidationConfiguration { + return d.MutationCacheInvalidation.FindByFieldName(fieldName) +} + +// MutationFieldCacheConfig returns the cache configuration for the given mutation field. +// Returns nil if no configuration exists. +func (d *FederationMetaData) MutationFieldCacheConfig(fieldName string) *MutationFieldCacheConfiguration { + return d.MutationFieldCaching.FindByFieldName(fieldName) +} + +// RequestScopedFieldsForType returns all @requestScoped fields for the given type. +// These are fields that can be read from (and written to) the coordinate L1 cache. +func (d *FederationMetaData) RequestScopedFieldsForType(typeName string) []RequestScopedField { + var result []RequestScopedField + for _, f := range d.RequestScopedFields { + if f.TypeName == typeName { + result = append(result, f) + } + } + return result +} + +// RequestScopedExportsForField returns the L1 keys that should be exported when +// a given field is fetched. Under the symmetric model, every field annotated with +// @requestScoped exports its value to L1 (so another field with the same key can +// later inject from it). The lookup matches by TypeName + FieldName. +func (d *FederationMetaData) RequestScopedExportsForField(typeName, fieldName string) []string { + var keys []string + for _, f := range d.RequestScopedFields { + if f.TypeName == typeName && f.FieldName == fieldName { + keys = append(keys, f.L1Key) + } + } + return keys +} + type FederationFieldConfiguration struct { TypeName string `json:"type_name"` // TypeName is the name of the Entity the Fragment is for FieldName string `json:"field_name,omitempty"` // FieldName is empty for key requirements, otherwise, it is the name of the field that has requires or provides directive diff --git a/v2/pkg/engine/plan/federation_metadata_test.go b/v2/pkg/engine/plan/federation_metadata_test.go new file mode 100644 index 0000000000..a73cd7cccd --- /dev/null +++ b/v2/pkg/engine/plan/federation_metadata_test.go @@ -0,0 +1,75 @@ +package plan + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRequestScopedFieldsForType(t *testing.T) { + // Symmetric model: every field annotated with @requestScoped is both a reader + // and a writer of its L1 key. Fields with the same L1Key (same @requestScoped(key)) + // share the same L1 entry. + meta := FederationMetaData{ + RequestScopedFields: []RequestScopedField{ + // Two fields in the viewer subgraph sharing the "viewer" key — both read/write + // L1 under "viewer.viewer". + {FieldName: "currentViewer", TypeName: "Query", L1Key: "viewer.viewer"}, + {FieldName: "currentViewer", TypeName: "Personalized", L1Key: "viewer.viewer"}, + // A separate key for locale + {FieldName: "locale", TypeName: "Query", L1Key: "viewer.locale"}, + {FieldName: "locale", TypeName: "Personalized", L1Key: "viewer.locale"}, + // Unrelated key on a different type + {FieldName: "theme", TypeName: "Settings", L1Key: "viewer.theme"}, + }, + } + + got := meta.RequestScopedFieldsForType("Personalized") + assert.Len(t, got, 2) + assert.Equal(t, "currentViewer", got[0].FieldName) + assert.Equal(t, "locale", got[1].FieldName) + + got = meta.RequestScopedFieldsForType("Query") + assert.Len(t, got, 2) + + got = meta.RequestScopedFieldsForType("Settings") + assert.Len(t, got, 1) + assert.Equal(t, "theme", got[0].FieldName) + + got = meta.RequestScopedFieldsForType("NonExistent") + assert.Nil(t, got) +} + +func TestRequestScopedExportsForField(t *testing.T) { + // A field that is @requestScoped exports its own L1 key (symmetric — every + // participating field writes its value to L1 after fetch, and other fields + // with the same L1 key inject from it on later fetches). + meta := FederationMetaData{ + RequestScopedFields: []RequestScopedField{ + {FieldName: "currentViewer", TypeName: "Query", L1Key: "viewer.viewer"}, + {FieldName: "currentViewer", TypeName: "Personalized", L1Key: "viewer.viewer"}, + {FieldName: "locale", TypeName: "Query", L1Key: "viewer.locale"}, + {FieldName: "theme", TypeName: "Settings", L1Key: "viewer.theme"}, + }, + } + + // Query.currentViewer is a @requestScoped field → it exports its L1 key. + keys := meta.RequestScopedExportsForField("Query", "currentViewer") + assert.Equal(t, []string{"viewer.viewer"}, keys) + + // Personalized.currentViewer is the same key — also exports. + keys = meta.RequestScopedExportsForField("Personalized", "currentViewer") + assert.Equal(t, []string{"viewer.viewer"}, keys) + + // Query.locale exports its own (different) key. + keys = meta.RequestScopedExportsForField("Query", "locale") + assert.Equal(t, []string{"viewer.locale"}, keys) + + // A field that is not @requestScoped exports nothing. + keys = meta.RequestScopedExportsForField("Query", "nonExistent") + assert.Nil(t, keys) + + // A @requestScoped field on a different type than queried — no match. + keys = meta.RequestScopedExportsForField("Query", "theme") + assert.Nil(t, keys) +} diff --git a/v2/pkg/engine/plan/node_selection_builder.go b/v2/pkg/engine/plan/node_selection_builder.go index b60363c289..75bfde9f1e 100644 --- a/v2/pkg/engine/plan/node_selection_builder.go +++ b/v2/pkg/engine/plan/node_selection_builder.go @@ -48,6 +48,9 @@ type NodeSelectionResult struct { fieldRefDependsOn map[int][]int fieldDependencyKind map[fieldDependencyKey]fieldDependencyKind + + requestScopedVisibleResponseKeys map[int]string + requestScopedFetchAliases map[int]string } func NewNodeSelectionBuilder(config *Configuration) *NodeSelectionBuilder { @@ -195,13 +198,15 @@ func (p *NodeSelectionBuilder) SelectNodes(operation, definition *ast.Document, } return &NodeSelectionResult{ - dataSources: p.nodeSelectionsVisitor.dataSources, - nodeSuggestions: p.nodeSelectionsVisitor.nodeSuggestions, - fieldDependsOn: p.nodeSelectionsVisitor.fieldDependsOn, - fieldRequirementsConfigs: p.nodeSelectionsVisitor.fieldRequirementsConfigs, - skipFieldsRefs: p.nodeSelectionsVisitor.skipFieldsRefs, - fieldRefDependsOn: p.nodeSelectionsVisitor.fieldRefDependsOn, - fieldDependencyKind: p.nodeSelectionsVisitor.fieldDependencyKind, + dataSources: p.nodeSelectionsVisitor.dataSources, + nodeSuggestions: p.nodeSelectionsVisitor.nodeSuggestions, + fieldDependsOn: p.nodeSelectionsVisitor.fieldDependsOn, + fieldRequirementsConfigs: p.nodeSelectionsVisitor.fieldRequirementsConfigs, + skipFieldsRefs: p.nodeSelectionsVisitor.skipFieldsRefs, + fieldRefDependsOn: p.nodeSelectionsVisitor.fieldRefDependsOn, + fieldDependencyKind: p.nodeSelectionsVisitor.fieldDependencyKind, + requestScopedVisibleResponseKeys: p.nodeSelectionsVisitor.requestScopedVisibleResponseKeys, + requestScopedFetchAliases: p.nodeSelectionsVisitor.requestScopedFetchAliases, } } diff --git a/v2/pkg/engine/plan/node_selection_visitor.go b/v2/pkg/engine/plan/node_selection_visitor.go index db8403cd3c..04b9e9c1d3 100644 --- a/v2/pkg/engine/plan/node_selection_visitor.go +++ b/v2/pkg/engine/plan/node_selection_visitor.go @@ -43,6 +43,9 @@ type nodeSelectionVisitor struct { secondaryRun bool // secondaryRun is a flag to indicate that we're running the nodeSelectionVisitor not the first time hasNewFields bool // hasNewFields is used to determine if we need to run the planner again. It will be true in case required fields were added + requestScopedVisibleResponseKeys map[int]string // original response keys for field refs rewritten to synthetic requestScoped aliases + requestScopedFetchAliases map[int]string // synthetic fetch aliases for existing conflicting requestScoped field refs + rewrittenFieldRefs []int // rewrittenFieldRefs holds field refs which had their selection sets rewritten during the current walk persistedRewrittenFieldRefs map[int]struct{} // persistedRewrittenFieldRefs holds field refs which had their selection sets rewritten during any of the walks @@ -93,6 +96,7 @@ type keyRequirements struct { type fieldRequirements struct { dsHash DSHash + typeName string path string selectionSet string requestedByFieldRefs []int @@ -160,10 +164,12 @@ func (c *nodeSelectionVisitor) EnterDocument(operation, definition *ast.Document c.fieldRefDependsOn = make(map[int][]int) c.fieldRequirementsConfigs = make(map[fieldIndexKey][]FederationFieldConfiguration) c.fieldLandedTo = make(map[int]DSHash) + c.requestScopedVisibleResponseKeys = make(map[int]string) + c.requestScopedFetchAliases = make(map[int]string) } func (c *nodeSelectionVisitor) LeaveDocument(operation, definition *ast.Document) { - + c.propagateRequestScopedWidening() } func (c *nodeSelectionVisitor) EnterOperationDefinition(ref int) { @@ -269,21 +275,7 @@ func (c *nodeSelectionVisitor) handleFieldRequiredByRequires(fieldRef int, paren return } - requiresConfiguration, exists := dsConfig.RequiredFieldsByRequires(typeName, fieldName) - - if !exists { - for _, io := range dsConfig.FederationConfiguration().InterfaceObjects { - if slices.Contains(io.ConcreteTypeNames, typeName) { - // we should check if we have a @requires configuration for the interface object - requiresConfiguration, exists = dsConfig.RequiredFieldsByRequires(io.InterfaceTypeName, fieldName) - if exists { - requiresConfiguration.TypeName = typeName - break - } - } - } - } - + requiresConfiguration, exists := c.requiresConfigurationForField(dsConfig, typeName, fieldName) if !exists { // we do not have a @requires configuration for the field return @@ -317,6 +309,25 @@ func (c *nodeSelectionVisitor) handleFieldRequiredByRequires(fieldRef int, paren c.handleKeyRequirementsForBackJumpOnSameDataSource(fieldRef, dsConfig, typeName, parentPath) } +func (c *nodeSelectionVisitor) requiresConfigurationForField(dsConfig DataSource, typeName, fieldName string) (FederationFieldConfiguration, bool) { + requiresConfiguration, exists := dsConfig.RequiredFieldsByRequires(typeName, fieldName) + if exists { + return requiresConfiguration, true + } + + for _, io := range dsConfig.FederationConfiguration().InterfaceObjects { + if slices.Contains(io.ConcreteTypeNames, typeName) { + requiresConfiguration, exists = dsConfig.RequiredFieldsByRequires(io.InterfaceTypeName, fieldName) + if exists { + requiresConfiguration.TypeName = typeName + return requiresConfiguration, true + } + } + } + + return FederationFieldConfiguration{}, false +} + func (c *nodeSelectionVisitor) handleFieldsRequiredByKey(fieldRef int, parentPath, typeName, fieldName, currentPath string, dsConfig DataSource, sc SourceConnection) { fieldKey := fieldIndexKey{fieldRef, dsConfig.Hash()} _, visited := c.visitedFieldsKeyChecks[fieldKey] @@ -444,6 +455,7 @@ func (c *nodeSelectionVisitor) addPendingFieldRequirements(requestedByFieldRef i if _, exists := requirements.existsTracker[existsKey]; !exists { config := fieldRequirements{ dsHash: dsHash, + typeName: fieldConfiguration.TypeName, path: currentPath, selectionSet: fieldConfiguration.SelectionSet, requestedByFieldRefs: []int{requestedByFieldRef}, @@ -519,7 +531,10 @@ func (c *nodeSelectionVisitor) processPendingFieldRequirements(selectionSetRef i } func (c *nodeSelectionVisitor) addFieldRequirementsToOperation(selectionSetRef int, requirements fieldRequirements) { - typeName := c.walker.EnclosingTypeDefinition.NameString(c.definition) + typeName := requirements.typeName + if typeName == "" { + typeName = c.walker.EnclosingTypeDefinition.NameString(c.definition) + } input := &addRequiredFieldsConfiguration{ operation: c.operation, diff --git a/v2/pkg/engine/plan/node_selection_visitor_request_scoped.go b/v2/pkg/engine/plan/node_selection_visitor_request_scoped.go new file mode 100644 index 0000000000..3231ad63cf --- /dev/null +++ b/v2/pkg/engine/plan/node_selection_visitor_request_scoped.go @@ -0,0 +1,766 @@ +package plan + +import ( + "slices" + "sort" + "strings" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" +) + +type requestScopedGroupKey struct { + l1Key string + dsHash DSHash +} + +type requestScopedParticipant struct { + fieldRef int + selectionSetRef int + enclosingType string + fieldTypeName string + dsHash DSHash + path string +} + +type participantMissing struct { + participant requestScopedParticipant + missingFragment string +} + +type requestScopedSelectionUnion struct { + variants map[string]*requestScopedUnionVariant + responseKeyIndex map[string]map[string]struct{} +} + +type requestScopedUnionVariant struct { + key string + schemaFieldName string + argsPrinted string + directivesPrinted string + observedResponseKeys map[string]struct{} + subSelection *requestScopedSelectionUnion +} + +type requestScopedSelectionSnapshot struct { + fieldRefsByVariantKey map[string]int + responseKeys map[string]struct{} +} + +func (c *nodeSelectionVisitor) propagateRequestScopedWidening() { + groups := c.collectRequestScopedParticipants() + for key, group := range groups { + missing, ok := c.computeRequestScopedMissing(group) + if !ok { + continue + } + + for _, item := range missing { + if item.missingFragment == "" { + continue + } + + c.addFieldRequirementsToOperation(item.participant.selectionSetRef, fieldRequirements{ + dsHash: key.dsHash, + typeName: item.participant.fieldTypeName, + path: item.participant.path, + selectionSet: item.missingFragment, + requestedByFieldRefs: nil, + }) + if c.walker.Report != nil && c.walker.Report.HasErrors() { + return + } + } + } +} + +func (c *nodeSelectionVisitor) collectRequestScopedParticipants() map[requestScopedGroupKey][]requestScopedParticipant { + out := make(map[requestScopedGroupKey][]requestScopedParticipant) + + for _, rootNode := range c.operation.RootNodes { + if rootNode.Kind != ast.NodeKindOperationDefinition { + continue + } + + operationDefinition := c.operation.OperationDefinitions[rootNode.Ref] + operationName := c.operation.OperationDefinitionNameString(rootNode.Ref) + if c.operationName != "" && c.operationName != operationName { + continue + } + + rootTypeNode, ok := c.rootOperationTypeNode(operationDefinition.OperationType) + if !ok || !operationDefinition.HasSelections { + continue + } + + c.collectRequestScopedParticipantsInSelectionSet(operationDefinition.SelectionSet, rootTypeNode, operationDefinition.OperationType.Name(), out) + } + + return out +} + +func (c *nodeSelectionVisitor) collectRequestScopedParticipantsInSelectionSet(selectionSetRef int, enclosingTypeNode ast.Node, parentPath string, out map[requestScopedGroupKey][]requestScopedParticipant) { + enclosingTypeName := enclosingTypeNode.NameString(c.definition) + + for _, selectionRef := range c.operation.SelectionSetFieldSelections(selectionSetRef) { + fieldRef := c.operation.Selections[selectionRef].Ref + fieldName := c.operation.FieldNameString(fieldRef) + currentPath := parentPath + "." + c.operation.FieldAliasOrNameString(fieldRef) + + fieldDefinitionRef, exists := c.definition.NodeFieldDefinitionByName(enclosingTypeNode, c.operation.FieldNameBytes(fieldRef)) + if !exists { + continue + } + + fieldTypeName := c.definition.FieldDefinitionTypeNameString(fieldDefinitionRef) + if fieldSelectionSetRef, ok := c.operation.FieldSelectionSet(fieldRef); ok { + for _, ds := range c.dataSources { + fedMeta := ds.FederationConfiguration() + l1Keys := fedMeta.RequestScopedExportsForField(enclosingTypeName, fieldName) + if len(l1Keys) == 0 { + for _, io := range fedMeta.InterfaceObjects { + if slices.Contains(io.ConcreteTypeNames, enclosingTypeName) { + l1Keys = fedMeta.RequestScopedExportsForField(io.InterfaceTypeName, fieldName) + if len(l1Keys) > 0 { + break + } + } + } + } + + for _, l1Key := range l1Keys { + key := requestScopedGroupKey{l1Key: l1Key, dsHash: ds.Hash()} + out[key] = append(out[key], requestScopedParticipant{ + fieldRef: fieldRef, + selectionSetRef: fieldSelectionSetRef, + enclosingType: enclosingTypeName, + fieldTypeName: fieldTypeName, + dsHash: ds.Hash(), + path: currentPath, + }) + } + } + + fieldTypeNode, ok := c.definition.Index.FirstNodeByNameStr(fieldTypeName) + if ok { + c.collectRequestScopedParticipantsInSelectionSet(fieldSelectionSetRef, fieldTypeNode, currentPath, out) + } + } + } +} + +func (c *nodeSelectionVisitor) rootOperationTypeNode(operationType ast.OperationType) (ast.Node, bool) { + switch operationType { + case ast.OperationTypeQuery: + return c.definition.NodeByName(c.definition.Index.QueryTypeName) + case ast.OperationTypeMutation: + return c.definition.NodeByName(c.definition.Index.MutationTypeName) + case ast.OperationTypeSubscription: + return c.definition.NodeByName(c.definition.Index.SubscriptionTypeName) + default: + return ast.InvalidNode, false + } +} + +func (c *nodeSelectionVisitor) computeRequestScopedMissing(group []requestScopedParticipant) ([]participantMissing, bool) { + if len(group) < 2 { + return nil, true + } + + returnTypeName := group[0].fieldTypeName + for _, participant := range group[1:] { + if participant.fieldTypeName != returnTypeName { + return nil, false + } + } + + ds, ok := c.dataSourceByHash(group[0].dsHash) + if !ok { + return nil, false + } + + typeNode, ok := c.definition.Index.FirstNodeByNameStr(returnTypeName) + if !ok { + return nil, false + } + + union := newRequestScopedSelectionUnion() + for _, participant := range group { + if !union.mergeSelectionSet(c.operation, c.definition, participant.selectionSetRef, typeNode, ds) { + return nil, false + } + if !c.mergeRequestScopedRequiresSelectionSet(union, c.operation, participant.selectionSetRef, typeNode, ds) { + return nil, false + } + } + + syntheticAliases := union.syntheticAliases() + if len(syntheticAliases) > 0 { + for _, participant := range group { + if !union.recordExistingSelectionAliases(c.operation, c.definition, participant.selectionSetRef, typeNode, ds, syntheticAliases, c.requestScopedVisibleResponseKeys, c.requestScopedFetchAliases) { + return nil, false + } + } + } + + out := make([]participantMissing, 0, len(group)) + for _, participant := range group { + out = append(out, participantMissing{ + participant: participant, + missingFragment: union.renderMissingFragment(c.operation, c.definition, participant.selectionSetRef, typeNode, ds), + }) + } + + return out, true +} + +func (c *nodeSelectionVisitor) mergeRequestScopedRequiresSelectionSet(union *requestScopedSelectionUnion, doc *ast.Document, selectionSetRef int, enclosingTypeNode ast.Node, ds DataSource) bool { + enclosingTypeName := enclosingTypeNode.NameString(c.definition) + + for _, selectionRef := range doc.SelectionSets[selectionSetRef].SelectionRefs { + if doc.Selections[selectionRef].Kind != ast.SelectionKindField { + return false + } + + fieldRef := doc.Selections[selectionRef].Ref + fieldName := doc.FieldNameString(fieldRef) + if !fieldBelongsToDataSource(ds, enclosingTypeName, fieldName) { + continue + } + + requiresConfiguration, exists := c.requiresConfigurationForField(ds, enclosingTypeName, fieldName) + if exists { + requiredFieldsDoc, report := RequiredFieldsFragment(requiresConfiguration.TypeName, requiresConfiguration.SelectionSet, false) + if report.HasErrors() || len(requiredFieldsDoc.FragmentDefinitions) == 0 { + return false + } + + requiredSelectionSetRef := requiredFieldsDoc.FragmentDefinitions[0].SelectionSet + if !union.mergeHiddenSelectionSet(requiredFieldsDoc, c.definition, requiredSelectionSetRef, enclosingTypeNode, ds) { + return false + } + if !c.mergeRequestScopedRequiresSelectionSet(union, requiredFieldsDoc, requiredSelectionSetRef, enclosingTypeNode, ds) { + return false + } + } + + fieldSelectionSetRef, hasSelectionSet := doc.FieldSelectionSet(fieldRef) + if !hasSelectionSet { + continue + } + + fieldTypeNode, ok := fieldTypeNodeForSelection(c.definition, enclosingTypeNode, fieldRef, doc.FieldNameBytes(fieldRef)) + if !ok { + return false + } + if !c.mergeRequestScopedRequiresSelectionSet(union, doc, fieldSelectionSetRef, fieldTypeNode, ds) { + return false + } + } + + return true +} + +func newRequestScopedSelectionUnion() *requestScopedSelectionUnion { + return &requestScopedSelectionUnion{ + variants: make(map[string]*requestScopedUnionVariant), + responseKeyIndex: make(map[string]map[string]struct{}), + } +} + +func (u *requestScopedSelectionUnion) mergeSelectionSet(doc, definition *ast.Document, selectionSetRef int, enclosingTypeNode ast.Node, ds DataSource) bool { + for _, selectionRef := range doc.SelectionSets[selectionSetRef].SelectionRefs { + if doc.Selections[selectionRef].Kind != ast.SelectionKindField { + return false + } + + fieldRef := doc.Selections[selectionRef].Ref + fieldName := doc.FieldNameString(fieldRef) + if !fieldBelongsToDataSource(ds, enclosingTypeNode.NameString(definition), fieldName) { + continue + } + + argsPrinted := printFieldArgumentsDeterministic(doc, fieldRef) + directivesPrinted := printFieldDirectivesDeterministic(doc, fieldRef) + responseKey := doc.FieldAliasOrNameString(fieldRef) + variantKey := requestScopedVariantKey(fieldName, argsPrinted, directivesPrinted) + + fieldTypeNode, ok := fieldTypeNodeForSelection(definition, enclosingTypeNode, fieldRef, doc.FieldNameBytes(fieldRef)) + if !ok && doc.FieldHasSelections(fieldRef) { + return false + } + + existing, exists := u.variants[variantKey] + if !exists { + existing = &requestScopedUnionVariant{ + key: variantKey, + schemaFieldName: fieldName, + argsPrinted: argsPrinted, + directivesPrinted: directivesPrinted, + observedResponseKeys: map[string]struct{}{responseKey: {}}, + } + if fieldSelectionSetRef, ok := doc.FieldSelectionSet(fieldRef); ok { + existing.subSelection = newRequestScopedSelectionUnion() + if !existing.subSelection.mergeSelectionSet(doc, definition, fieldSelectionSetRef, fieldTypeNode, ds) { + return false + } + } + u.variants[variantKey] = existing + } else { + existing.observedResponseKeys[responseKey] = struct{}{} + + fieldSelectionSetRef, hasFieldSelectionSet := doc.FieldSelectionSet(fieldRef) + if !hasFieldSelectionSet { + if existing.subSelection != nil { + return false + } + } else { + if existing.subSelection == nil { + return false + } + if !existing.subSelection.mergeSelectionSet(doc, definition, fieldSelectionSetRef, fieldTypeNode, ds) { + return false + } + } + } + + if _, ok := u.responseKeyIndex[responseKey]; !ok { + u.responseKeyIndex[responseKey] = make(map[string]struct{}) + } + u.responseKeyIndex[responseKey][variantKey] = struct{}{} + } + + return true +} + +func (u *requestScopedSelectionUnion) mergeHiddenSelectionSet(doc, definition *ast.Document, selectionSetRef int, enclosingTypeNode ast.Node, ds DataSource) bool { + for _, selectionRef := range doc.SelectionSets[selectionSetRef].SelectionRefs { + if doc.Selections[selectionRef].Kind != ast.SelectionKindField { + return false + } + + fieldRef := doc.Selections[selectionRef].Ref + fieldName := doc.FieldNameString(fieldRef) + if !fieldBelongsToDataSource(ds, enclosingTypeNode.NameString(definition), fieldName) { + continue + } + + argsPrinted := printFieldArgumentsDeterministic(doc, fieldRef) + directivesPrinted := printFieldDirectivesDeterministic(doc, fieldRef) + responseKey := doc.FieldAliasOrNameString(fieldRef) + variantKey := requestScopedVariantKey(fieldName, argsPrinted, directivesPrinted) + + fieldTypeNode, ok := fieldTypeNodeForSelection(definition, enclosingTypeNode, fieldRef, doc.FieldNameBytes(fieldRef)) + if !ok && doc.FieldHasSelections(fieldRef) { + return false + } + + existing, exists := u.variants[variantKey] + if !exists { + existing = &requestScopedUnionVariant{ + key: variantKey, + schemaFieldName: fieldName, + argsPrinted: argsPrinted, + directivesPrinted: directivesPrinted, + observedResponseKeys: map[string]struct{}{responseKey: {}}, + } + if fieldSelectionSetRef, ok := doc.FieldSelectionSet(fieldRef); ok { + existing.subSelection = newRequestScopedSelectionUnion() + if !existing.subSelection.mergeHiddenSelectionSet(doc, definition, fieldSelectionSetRef, fieldTypeNode, ds) { + return false + } + } + u.variants[variantKey] = existing + + if _, ok := u.responseKeyIndex[responseKey]; !ok { + u.responseKeyIndex[responseKey] = make(map[string]struct{}) + } + u.responseKeyIndex[responseKey][variantKey] = struct{}{} + continue + } + + fieldSelectionSetRef, hasFieldSelectionSet := doc.FieldSelectionSet(fieldRef) + if !hasFieldSelectionSet { + if existing.subSelection != nil { + return false + } + continue + } + if existing.subSelection == nil { + return false + } + if !existing.subSelection.mergeHiddenSelectionSet(doc, definition, fieldSelectionSetRef, fieldTypeNode, ds) { + return false + } + } + + return true +} + +func (u *requestScopedSelectionUnion) renderMissingFragment(doc, definition *ast.Document, selectionSetRef int, enclosingTypeNode ast.Node, ds DataSource) string { + snapshot := buildRequestScopedSelectionSnapshot(doc, definition, selectionSetRef, enclosingTypeNode, ds) + syntheticAliases := u.syntheticAliases() + + parts := make([]string, 0, len(u.variants)) + for _, variantKey := range u.sortedVariantKeys() { + variant := u.variants[variantKey] + fieldRef, exists := snapshot.fieldRefsByVariantKey[variantKey] + if !exists { + responseKey := variant.preferredResponseKey() + if synthetic, ok := syntheticAliases[variantKey]; ok { + responseKey = synthetic + } + parts = append(parts, variant.render(responseKey)) + continue + } + + if variant.subSelection == nil { + continue + } + + fieldSelectionSetRef, ok := doc.FieldSelectionSet(fieldRef) + if !ok { + continue + } + + fieldTypeNode, ok := fieldTypeNodeForSelection(definition, enclosingTypeNode, fieldRef, doc.FieldNameBytes(fieldRef)) + if !ok { + continue + } + + subMissing := variant.subSelection.renderMissingFragment(doc, definition, fieldSelectionSetRef, fieldTypeNode, ds) + if subMissing == "" { + continue + } + + parts = append(parts, renderFieldWithExistingResponseKey(doc, fieldRef, subMissing)) + } + + return strings.Join(parts, " ") +} + +func (u *requestScopedSelectionUnion) recordExistingSelectionAliases(doc, definition *ast.Document, selectionSetRef int, enclosingTypeNode ast.Node, ds DataSource, syntheticAliases map[string]string, visibleResponseKeys map[int]string, fetchAliases map[int]string) bool { + for _, selectionRef := range doc.SelectionSets[selectionSetRef].SelectionRefs { + if doc.Selections[selectionRef].Kind != ast.SelectionKindField { + return false + } + + fieldRef := doc.Selections[selectionRef].Ref + fieldName := doc.FieldNameString(fieldRef) + if !fieldBelongsToDataSource(ds, enclosingTypeNode.NameString(definition), fieldName) { + continue + } + + argsPrinted := printFieldArgumentsDeterministic(doc, fieldRef) + directivesPrinted := printFieldDirectivesDeterministic(doc, fieldRef) + responseKey := doc.FieldAliasOrNameString(fieldRef) + variantKey := requestScopedVariantKey(fieldName, argsPrinted, directivesPrinted) + variant, ok := u.variants[variantKey] + if !ok { + continue + } + + if syntheticAlias, hasSyntheticAlias := syntheticAliases[variantKey]; hasSyntheticAlias && responseKey != syntheticAlias { + if _, exists := visibleResponseKeys[fieldRef]; !exists { + visibleResponseKeys[fieldRef] = responseKey + } + fetchAliases[fieldRef] = syntheticAlias + } + + fieldSelectionSetRef, hasFieldSelectionSet := doc.FieldSelectionSet(fieldRef) + if !hasFieldSelectionSet || variant.subSelection == nil { + continue + } + + fieldTypeNode, ok := fieldTypeNodeForSelection(definition, enclosingTypeNode, fieldRef, doc.FieldNameBytes(fieldRef)) + if !ok { + return false + } + if !variant.subSelection.recordExistingSelectionAliases(doc, definition, fieldSelectionSetRef, fieldTypeNode, ds, variant.subSelection.syntheticAliases(), visibleResponseKeys, fetchAliases) { + return false + } + } + + return true +} + +func buildRequestScopedSelectionSnapshot(doc, definition *ast.Document, selectionSetRef int, enclosingTypeNode ast.Node, ds DataSource) requestScopedSelectionSnapshot { + out := requestScopedSelectionSnapshot{ + fieldRefsByVariantKey: make(map[string]int), + responseKeys: make(map[string]struct{}), + } + + for _, fieldRef := range doc.SelectionSetFieldRefs(selectionSetRef) { + fieldName := doc.FieldNameString(fieldRef) + if !fieldBelongsToDataSource(ds, enclosingTypeNode.NameString(definition), fieldName) { + continue + } + + argsPrinted := printFieldArgumentsDeterministic(doc, fieldRef) + directivesPrinted := printFieldDirectivesDeterministic(doc, fieldRef) + responseKey := doc.FieldAliasOrNameString(fieldRef) + variantKey := requestScopedVariantKey(fieldName, argsPrinted, directivesPrinted) + + out.fieldRefsByVariantKey[variantKey] = fieldRef + out.responseKeys[responseKey] = struct{}{} + } + + return out +} + +func requestScopedVariantKey(fieldName, argsPrinted, directivesPrinted string) string { + return fieldName + "\x00" + argsPrinted + "\x00" + directivesPrinted +} + +func (u *requestScopedSelectionUnion) syntheticAliases() map[string]string { + out := make(map[string]string) + reservedResponseKeys := make(map[string]struct{}) + for responseKey := range u.responseKeyIndex { + reservedResponseKeys[responseKey] = struct{}{} + } + + responseKeys := make([]string, 0, len(u.responseKeyIndex)) + for responseKey, variantKeys := range u.responseKeyIndex { + if len(variantKeys) < 2 { + continue + } + responseKeys = append(responseKeys, responseKey) + } + sort.Strings(responseKeys) + + for _, responseKey := range responseKeys { + variantKeys := make([]string, 0, len(u.responseKeyIndex[responseKey])) + for variantKey := range u.responseKeyIndex[responseKey] { + variantKeys = append(variantKeys, variantKey) + } + sort.Strings(variantKeys) + + base := "__request_scoped__" + sanitizeGraphQLName(responseKey) + "_" + for _, variantKey := range variantKeys { + if existingAlias, ok := u.variants[variantKey].existingSyntheticAlias(base); ok { + out[variantKey] = existingAlias + reservedResponseKeys[existingAlias] = struct{}{} + } + } + + nextIndex := 0 + for _, variantKey := range variantKeys { + if _, exists := out[variantKey]; exists { + continue + } + for { + candidate := base + strconvItoa(nextIndex) + nextIndex++ + if _, exists := reservedResponseKeys[candidate]; exists { + continue + } + reservedResponseKeys[candidate] = struct{}{} + out[variantKey] = candidate + break + } + } + } + + return out +} + +func (f *requestScopedUnionVariant) existingSyntheticAlias(base string) (string, bool) { + keys := make([]string, 0, len(f.observedResponseKeys)) + for key := range f.observedResponseKeys { + if strings.HasPrefix(key, base) { + keys = append(keys, key) + } + } + if len(keys) == 0 { + return "", false + } + sort.Strings(keys) + return keys[0], true +} + +func sanitizeGraphQLName(in string) string { + if in == "" { + return "field" + } + + var out strings.Builder + for i := 0; i < len(in); i++ { + b := in[i] + switch { + case b >= 'a' && b <= 'z': + out.WriteByte(b) + case b >= 'A' && b <= 'Z': + out.WriteByte(b) + case b >= '0' && b <= '9': + out.WriteByte(b) + case b == '_': + out.WriteByte(b) + default: + out.WriteByte('_') + } + } + if out.Len() == 0 { + return "field" + } + return out.String() +} + +func (u *requestScopedSelectionUnion) sortedVariantKeys() []string { + keys := make([]string, 0, len(u.variants)) + for key := range u.variants { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +func (f *requestScopedUnionVariant) preferredResponseKey() string { + if _, ok := f.observedResponseKeys[f.schemaFieldName]; ok { + return f.schemaFieldName + } + keys := make([]string, 0, len(f.observedResponseKeys)) + for key := range f.observedResponseKeys { + keys = append(keys, key) + } + sort.Strings(keys) + return keys[0] +} + +func (f *requestScopedUnionVariant) render(responseKey string) string { + selection := "" + if f.subSelection != nil { + selection = f.subSelection.renderCompleteSelection() + } + return renderFieldString(responseKey, f.schemaFieldName, f.argsPrinted, f.directivesPrinted, selection) +} + +func (u *requestScopedSelectionUnion) renderCompleteSelection() string { + parts := make([]string, 0, len(u.variants)) + for _, variantKey := range u.sortedVariantKeys() { + variant := u.variants[variantKey] + parts = append(parts, variant.render(variant.preferredResponseKey())) + } + return strings.Join(parts, " ") +} + +func renderFieldWithExistingResponseKey(doc *ast.Document, fieldRef int, selection string) string { + return renderFieldString( + doc.FieldAliasOrNameString(fieldRef), + doc.FieldNameString(fieldRef), + printFieldArgumentsDeterministic(doc, fieldRef), + printFieldDirectivesDeterministic(doc, fieldRef), + selection, + ) +} + +func renderFieldString(responseKey, schemaFieldName, argsPrinted, directivesPrinted, selection string) string { + var prefix strings.Builder + if responseKey != schemaFieldName { + prefix.WriteString(responseKey) + prefix.WriteString(": ") + } + prefix.WriteString(schemaFieldName) + prefix.WriteString(argsPrinted) + if directivesPrinted != "" { + prefix.WriteByte(' ') + prefix.WriteString(directivesPrinted) + } + if selection == "" { + return prefix.String() + } + prefix.WriteString(" { ") + prefix.WriteString(selection) + prefix.WriteString(" }") + return prefix.String() +} + +func printFieldArgumentsDeterministic(doc *ast.Document, fieldRef int) string { + if !doc.FieldHasArguments(fieldRef) { + return "" + } + + refs := append([]int(nil), doc.FieldArguments(fieldRef)...) + sort.Slice(refs, func(i, j int) bool { + return doc.ArgumentNameString(refs[i]) < doc.ArgumentNameString(refs[j]) + }) + + var out strings.Builder + _ = doc.PrintArguments(refs, &out) + return out.String() +} + +func printFieldDirectivesDeterministic(doc *ast.Document, fieldRef int) string { + if !doc.FieldHasDirectives(fieldRef) { + return "" + } + + refs := append([]int(nil), doc.FieldDirectives(fieldRef)...) + sort.Slice(refs, func(i, j int) bool { + leftName := doc.DirectiveNameString(refs[i]) + rightName := doc.DirectiveNameString(refs[j]) + if leftName == rightName { + return printDirectiveDeterministic(doc, refs[i]) < printDirectiveDeterministic(doc, refs[j]) + } + return leftName < rightName + }) + + parts := make([]string, 0, len(refs)) + for _, ref := range refs { + parts = append(parts, printDirectiveDeterministic(doc, ref)) + } + return strings.Join(parts, " ") +} + +func printDirectiveDeterministic(doc *ast.Document, directiveRef int) string { + directive := doc.Directives[directiveRef] + out := "@" + doc.DirectiveNameString(directiveRef) + if !directive.HasArguments { + return out + } + + refs := append([]int(nil), directive.Arguments.Refs...) + sort.Slice(refs, func(i, j int) bool { + return doc.ArgumentNameString(refs[i]) < doc.ArgumentNameString(refs[j]) + }) + + var args strings.Builder + _ = doc.PrintArguments(refs, &args) + return out + args.String() +} + +func (c *nodeSelectionVisitor) dataSourceByHash(hash DSHash) (DataSource, bool) { + for _, ds := range c.dataSources { + if ds.Hash() == hash { + return ds, true + } + } + return nil, false +} + +func fieldTypeNodeForSelection(definition *ast.Document, enclosingTypeNode ast.Node, fieldRef int, fieldName []byte) (ast.Node, bool) { + fieldDefinitionRef, ok := definition.NodeFieldDefinitionByName(enclosingTypeNode, fieldName) + if !ok { + return ast.InvalidNode, false + } + return definition.Index.FirstNodeByNameStr(definition.FieldDefinitionTypeNameString(fieldDefinitionRef)) +} + +func fieldBelongsToDataSource(ds DataSource, typeName, fieldName string) bool { + if fieldName == typeNameField { + return ds.HasRootNodeWithTypename(typeName) || ds.HasChildNodeWithTypename(typeName) + } + return ds.HasRootNode(typeName, fieldName) || ds.HasChildNode(typeName, fieldName) +} + +func strconvItoa(i int) string { + if i == 0 { + return "0" + } + var digits [20]byte + pos := len(digits) + for i > 0 { + pos-- + digits[pos] = byte('0' + i%10) + i /= 10 + } + return string(digits[pos:]) +} diff --git a/v2/pkg/engine/plan/node_selection_visitor_request_scoped_test.go b/v2/pkg/engine/plan/node_selection_visitor_request_scoped_test.go new file mode 100644 index 0000000000..bc15430774 --- /dev/null +++ b/v2/pkg/engine/plan/node_selection_visitor_request_scoped_test.go @@ -0,0 +1,138 @@ +package plan + +import ( + "testing" + + "github.com/jensneuse/abstractlogger" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/internal/unsafeparser" +) + +type requestScopedUnionTestDataSource struct { + *DataSourceMetadata + + id string + name string + hash DSHash +} + +func newRequestScopedUnionTestDataSource() *requestScopedUnionTestDataSource { + metadata := &DataSourceMetadata{ + ChildNodes: TypeFields{ + { + TypeName: "Viewer", + FieldNames: []string{"name", "email", "handle", "posts"}, + }, + }, + } + metadata.InitNodesIndex() + + return &requestScopedUnionTestDataSource{ + DataSourceMetadata: metadata, + id: "viewer", + name: "viewer", + hash: DSHash(1), + } +} + +func (*requestScopedUnionTestDataSource) UpstreamSchema() (*ast.Document, bool) { + return nil, false +} + +func (*requestScopedUnionTestDataSource) PlanningBehavior() DataSourcePlanningBehavior { + return DataSourcePlanningBehavior{} +} + +func (d *requestScopedUnionTestDataSource) Id() string { + return d.id +} + +func (d *requestScopedUnionTestDataSource) Name() string { + return d.name +} + +func (d *requestScopedUnionTestDataSource) Hash() DSHash { + return d.hash +} + +func (d *requestScopedUnionTestDataSource) FederationConfiguration() FederationMetaData { + return d.FederationMetaData +} + +func (*requestScopedUnionTestDataSource) CreatePlannerConfiguration(abstractlogger.Logger, *objectFetchConfiguration, *plannerPathsConfiguration, *Configuration) PlannerConfiguration { + return nil +} + +func (*requestScopedUnionTestDataSource) GetCostConfig() *DataSourceCostConfig { + return nil +} + +func TestRequestScopedSelectionUnion_DirectiveConflictsUseSyntheticAliases(t *testing.T) { + t.Parallel() + + definition := unsafeparser.ParseGraphqlDocumentString(` + directive @tag(name: String!) on FIELD + + type Query { + currentViewer: Viewer + article: Article + } + + type Article { + currentViewer: Viewer + } + + type Viewer { + name: String! + } + `) + operation := unsafeparser.ParseGraphqlDocumentString(` + query Widening { + currentViewer { + name @tag(name: "root") + } + article { + currentViewer { + name @tag(name: "child") + } + } + } + `) + + operationDefinitionRef := operation.RootNodes[0].Ref + rootSelectionSetRef := operation.OperationDefinitions[operationDefinitionRef].SelectionSet + rootFieldRefs := operation.SelectionSetFieldRefs(rootSelectionSetRef) + require.Len(t, rootFieldRefs, 2) + + rootViewerSelectionSetRef, ok := operation.FieldSelectionSet(rootFieldRefs[0]) + require.True(t, ok) + + articleSelectionSetRef, ok := operation.FieldSelectionSet(rootFieldRefs[1]) + require.True(t, ok) + articleFieldRefs := operation.SelectionSetFieldRefs(articleSelectionSetRef) + require.Len(t, articleFieldRefs, 1) + + childViewerSelectionSetRef, ok := operation.FieldSelectionSet(articleFieldRefs[0]) + require.True(t, ok) + + viewerTypeNode, ok := definition.Index.FirstNodeByNameStr("Viewer") + require.True(t, ok) + + ds := newRequestScopedUnionTestDataSource() + union := newRequestScopedSelectionUnion() + + require.True(t, union.mergeSelectionSet(&operation, &definition, rootViewerSelectionSetRef, viewerTypeNode, ds)) + require.True(t, union.mergeSelectionSet(&operation, &definition, childViewerSelectionSetRef, viewerTypeNode, ds)) + + assert.Equal(t, + `__request_scoped__name_0: name @tag(name: "child")`, + union.renderMissingFragment(&operation, &definition, rootViewerSelectionSetRef, viewerTypeNode, ds), + ) + assert.Equal(t, + `__request_scoped__name_1: name @tag(name: "root")`, + union.renderMissingFragment(&operation, &definition, childViewerSelectionSetRef, viewerTypeNode, ds), + ) +} diff --git a/v2/pkg/engine/plan/path_builder_visitor.go b/v2/pkg/engine/plan/path_builder_visitor.go index b66b41375a..41ecb52d2f 100644 --- a/v2/pkg/engine/plan/path_builder_visitor.go +++ b/v2/pkg/engine/plan/path_builder_visitor.go @@ -110,9 +110,13 @@ type selectionSetTypeInfo struct { } type objectFetchConfiguration struct { - filter *resolve.SubscriptionFilter - planner DataSourceFetchPlanner - isSubscription bool + filter *resolve.SubscriptionFilter + planner DataSourceFetchPlanner + isSubscription bool + // isolatedRootField marks planners for cached query root fields that must + // not merge with other root fields. Set in handlePlanningField; checked in + // planWithExistingPlanners to prevent other fields from joining this planner. + isolatedRootField bool fieldRef int fieldDefinitionRef int sourceID string @@ -560,14 +564,24 @@ func (c *pathBuilderVisitor) handlePlanningField(fieldRef int, typeName, fieldNa } isMutationRoot := c.isMutationRoot(currentPath) + isCachedQueryRoot := c.isCachedQueryRootField(currentPath, typeName, fieldName, ds) var ( plannerIdx int planned bool ) - if isMutationRoot { + if isMutationRoot || isCachedQueryRoot { + // Mutations always need separate planners for sequential execution. + // Cached query root fields need separate planners so each fetch gets + // its own cache configuration (TTL, cache name). Without isolation, + // configureFetchCaching sees mixed root fields and disables L2 caching. plannerIdx, planned = c.addNewPlanner(fieldRef, typeName, fieldName, currentPath, parentPath, isMutationRoot, ds) + if planned && isCachedQueryRoot { + // Mark this planner as isolated so planWithExistingPlanners won't + // merge other root fields into it (see guard in that function). + c.planners[plannerIdx].ObjectFetchConfiguration().isolatedRootField = true + } } else { plannerIdx, planned = c.planWithExistingPlanners(fieldRef, typeName, fieldName, currentPath, parentPath, precedingParentPath, suggestion) if !planned { @@ -766,6 +780,16 @@ func (c *pathBuilderVisitor) planWithExistingPlanners(fieldRef int, typeName, fi isRootNode := suggestion.IsRootNode isChildNode := !isRootNode + // Don't merge other query root fields into isolated planners (cached root fields). + // We check parentPath (not isRootNode) because entity types like Product are + // also datasource root nodes — isRootNode would incorrectly block nested entity + // fields from merging into the planner that needs them. + // isParentPathIsRootOperationPath checks if parentPath is "query"/"mutation"/"subscription", + // ensuring only top-level query fields are prevented from merging. + if c.isParentPathIsRootOperationPath(parentPath) && plannerConfig.ObjectFetchConfiguration().isolatedRootField { + continue + } + if c.secondaryRun && plannerConfig.HasPath(currentPath) { // on the secondary run we need to process only new fields added by the first run return plannerIdx, true @@ -1305,6 +1329,34 @@ func (c *pathBuilderVisitor) isMutationRoot(path string) bool { return strings.Count(path, ".") == 1 } +// isCachedQueryRootField returns true when the field is a direct child of Query +// and has root field caching configured on the datasource. Such fields must be +// isolated into their own planner to get independent cache configs per fetch. +// +// This mirrors the mutation pattern (isMutationRoot) but only applies to query +// fields with explicit RootFieldCacheConfiguration. Without isolation, multiple +// root fields from the same datasource merge into one planner/fetch, and +// configureFetchCaching sees mixed cache configs and disables L2 caching. +func (c *pathBuilderVisitor) isCachedQueryRootField(currentPath, typeName, fieldName string, ds DataSource) bool { + // When entity caching is globally disabled, no isolation needed + if c.plannerConfiguration.DisableEntityCaching { + return false + } + // Only applies to Query operations, not mutations or subscriptions + root := c.walker.Ancestors[0] + rootOperationType := c.operation.OperationDefinitions[root.Ref].OperationType + if rootOperationType != ast.OperationTypeQuery { + return false + } + // Only direct children of the root (e.g. "query.me" has exactly one dot) + if strings.Count(currentPath, ".") != 1 { + return false + } + // Check if this specific field has a cache config on its datasource + fedConfig := ds.FederationConfiguration() + return fedConfig.RootFieldCacheConfig(typeName, fieldName) != nil +} + func (c *pathBuilderVisitor) isNotOperationDefinitionRoot() bool { // potentially this check is not needed, because we should not have root fragments definitions // at this stage of planning diff --git a/v2/pkg/engine/plan/planner.go b/v2/pkg/engine/plan/planner.go index 5897e3b464..9532645d8f 100644 --- a/v2/pkg/engine/plan/planner.go +++ b/v2/pkg/engine/plan/planner.go @@ -146,6 +146,7 @@ func (p *Planner) Plan(operation, definition *ast.Document, operationName string p.planningVisitor.fieldRefDependsOnFieldRefs = selectionsConfig.fieldRefDependsOn p.planningVisitor.fieldDependencyKind = selectionsConfig.fieldDependencyKind p.planningVisitor.fieldRefDependants = inverseMap(selectionsConfig.fieldRefDependsOn) + p.planningVisitor.caching.setRequestScopedMaps(selectionsConfig.requestScopedVisibleResponseKeys, selectionsConfig.requestScopedFetchAliases) p.planningWalker.ResetVisitors() p.planningWalker.SetVisitorFilter(p.planningVisitor) diff --git a/v2/pkg/engine/plan/planner_test.go b/v2/pkg/engine/plan/planner_test.go index b952107f07..9256b52c04 100644 --- a/v2/pkg/engine/plan/planner_test.go +++ b/v2/pkg/engine/plan/planner_test.go @@ -8,6 +8,7 @@ import ( "reflect" "slices" "testing" + "time" "github.com/jensneuse/abstractlogger" "github.com/kylelemons/godebug/diff" @@ -120,6 +121,7 @@ func TestPlanner_Plan(t *testing.T) { Fetch: &resolve.SingleFetch{ FetchConfiguration: resolve.FetchConfiguration{ DataSource: &FakeDataSource{&StatefulSource{}}, + Caching: resolve.FetchCacheConfiguration{}, }, DataSourceIdentifier: []byte("plan.FakeDataSource"), }, @@ -172,6 +174,7 @@ func TestPlanner_Plan(t *testing.T) { }, Configuration{ DisableResolveFieldPositions: true, DisableIncludeInfo: true, + DisableEntityCaching: true, DataSources: []DataSource{testDefinitionDSConfiguration}, })) @@ -190,6 +193,7 @@ func TestPlanner_Plan(t *testing.T) { Fetch: &resolve.SingleFetch{ FetchConfiguration: resolve.FetchConfiguration{ DataSource: &FakeDataSource{&StatefulSource{}}, + Caching: resolve.FetchCacheConfiguration{}, }, DataSourceIdentifier: []byte("plan.FakeDataSource"), }, @@ -226,6 +230,7 @@ func TestPlanner_Plan(t *testing.T) { }, Configuration{ DisableResolveFieldPositions: true, DisableIncludeInfo: true, + DisableEntityCaching: true, DataSources: []DataSource{testDefinitionDSConfiguration}, })) @@ -247,6 +252,7 @@ func TestPlanner_Plan(t *testing.T) { Fetch: &resolve.SingleFetch{ FetchConfiguration: resolve.FetchConfiguration{ DataSource: &FakeDataSource{&StatefulSource{}}, + Caching: resolve.FetchCacheConfiguration{}, }, DataSourceIdentifier: []byte("plan.FakeDataSource"), }, @@ -292,6 +298,7 @@ func TestPlanner_Plan(t *testing.T) { }, Configuration{ DisableResolveFieldPositions: true, DisableIncludeInfo: true, + DisableEntityCaching: true, DataSources: []DataSource{testDefinitionDSConfiguration}, })) @@ -315,6 +322,7 @@ func TestPlanner_Plan(t *testing.T) { Fetch: &resolve.SingleFetch{ FetchConfiguration: resolve.FetchConfiguration{ DataSource: &FakeDataSource{&StatefulSource{}}, + Caching: resolve.FetchCacheConfiguration{}, }, DataSourceIdentifier: []byte("plan.FakeDataSource"), }, @@ -363,6 +371,7 @@ func TestPlanner_Plan(t *testing.T) { }, Configuration{ DisableResolveFieldPositions: true, DisableIncludeInfo: true, + DisableEntityCaching: true, DataSources: []DataSource{testDefinitionDSConfiguration}, })) @@ -384,6 +393,7 @@ func TestPlanner_Plan(t *testing.T) { Fetch: &resolve.SingleFetch{ FetchConfiguration: resolve.FetchConfiguration{ DataSource: &FakeDataSource{&StatefulSource{}}, + Caching: resolve.FetchCacheConfiguration{}, }, DataSourceIdentifier: []byte("plan.FakeDataSource"), }, @@ -425,14 +435,16 @@ func TestPlanner_Plan(t *testing.T) { }, Configuration{ DisableResolveFieldPositions: true, DisableIncludeInfo: true, + DisableEntityCaching: true, DataSources: []DataSource{testDefinitionDSConfiguration}, })) }) t.Run("operation selection", func(t *testing.T) { cfg := Configuration{ - DataSources: []DataSource{testDefinitionDSConfiguration}, - DisableIncludeInfo: true, + DataSources: []DataSource{testDefinitionDSConfiguration}, + DisableIncludeInfo: true, + DisableEntityCaching: true, } t.Run("should successfully plan a single named query by providing an operation name", test(testDefinition, ` @@ -554,6 +566,7 @@ func TestPlanner_Plan(t *testing.T) { Fetch: &resolve.SingleFetch{ FetchConfiguration: resolve.FetchConfiguration{ DataSource: &FakeDataSource{&StatefulSource{}}, + Caching: resolve.FetchCacheConfiguration{}, }, DataSourceIdentifier: []byte("plan.FakeDataSource"), }, @@ -585,6 +598,7 @@ func TestPlanner_Plan(t *testing.T) { Configuration{ DisableResolveFieldPositions: true, DisableIncludeInfo: true, + DisableEntityCaching: true, Fields: FieldConfigurations{ FieldConfiguration{ TypeName: "Character", @@ -611,6 +625,7 @@ func TestPlanner_Plan(t *testing.T) { Fetch: &resolve.SingleFetch{ FetchConfiguration: resolve.FetchConfiguration{ DataSource: &FakeDataSource{&StatefulSource{}}, + Caching: resolve.FetchCacheConfiguration{}, }, DataSourceIdentifier: []byte("plan.FakeDataSource"), }, @@ -644,6 +659,7 @@ func TestPlanner_Plan(t *testing.T) { Configuration{ DisableResolveFieldPositions: true, DisableIncludeInfo: true, + DisableEntityCaching: true, Fields: FieldConfigurations{ FieldConfiguration{ TypeName: "Character", @@ -673,6 +689,7 @@ func TestPlanner_Plan(t *testing.T) { Fetch: &resolve.SingleFetch{ FetchConfiguration: resolve.FetchConfiguration{ DataSource: &FakeDataSource{&StatefulSource{}}, + Caching: resolve.FetchCacheConfiguration{}, }, DataSourceIdentifier: []byte("plan.FakeDataSource"), }, @@ -703,6 +720,7 @@ func TestPlanner_Plan(t *testing.T) { Configuration{ DisableResolveFieldPositions: true, DisableIncludeInfo: true, + DisableEntityCaching: true, DataSources: []DataSource{dsConfig}, }, )) @@ -798,6 +816,542 @@ func TestPlanner_Plan(t *testing.T) { assert.Equal(t, plan2Expected, plan2) }) + + t.Run("reused planner clears field planner metadata between operations", func(t *testing.T) { + type costNodeDataSourceHashes struct { + Field FieldCoordinate + DataSourceHashes []DSHash + Children []costNodeDataSourceHashes + } + var collectCostHashes func(node *CostTreeNode) costNodeDataSourceHashes + collectCostHashes = func(node *CostTreeNode) costNodeDataSourceHashes { + out := costNodeDataSourceHashes{ + Field: node.fieldCoords, + DataSourceHashes: node.dataSourceHashes, + } + for _, child := range node.children { + out.Children = append(out.Children, collectCostHashes(child)) + } + return out + } + costHashes := func(plan Plan) []costNodeDataSourceHashes { + calc := plan.GetCostCalculator() + if calc == nil || calc.tree == nil { + return nil + } + out := make([]costNodeDataSourceHashes, 0, len(calc.tree.children)) + for _, child := range calc.tree.children { + out = append(out, collectCostHashes(child)) + } + return out + } + fieldTrackingDS := func(b *dsBuilder) DataSource { + b.ds.factory = &fieldTrackingFakeFactory[any]{ + FakeFactory: b.ds.factory.(*FakeFactory[any]), + } + return b.DS() + } + + definition := ` + type Account { + id: ID! + name: String + } + type Query { + account: Account + } + ` + accountDS := fieldTrackingDS(dsb(). + WithBehavior(DataSourcePlanningBehavior{ + MergeAliasedRootNodes: true, + }). + Schema(`type Account { + id: ID! + } + type Query { + account: Account + }`). + Id("accountDS"). + Hash(1). + RootNode("Query", "account"). + RootNode("Account", "id"). + KeysMetadata(FederationFieldConfigurations{ + { + TypeName: "Account", + SelectionSet: "id", + }, + })) + addressDS := fieldTrackingDS(dsb(). + WithBehavior(DataSourcePlanningBehavior{ + MergeAliasedRootNodes: true, + }). + Schema(`type Account { + id: ID! + name: String + }`). + KeysMetadata(FederationFieldConfigurations{ + { + TypeName: "Account", + SelectionSet: "id", + }, + }). + Id("addressDS"). + Hash(2). + RootNode("Account", "id", "name")) + planConfiguration := Configuration{ + DataSources: []DataSource{accountDS, addressDS}, + BuildFetchReasons: true, + ComputeCosts: true, + } + def := unsafeparser.ParseGraphqlDocumentStringWithBaseSchema(definition) + operationWithEntityFetch := ` + query { + account { + name + } + }` + operationWithoutEntityFetch := ` + query { + account { + id + } + }` + + sharedPlanner, err := NewPlanner(planConfiguration) + require.NoError(t, err) + + op1 := unsafeparser.ParseGraphqlDocumentString(operationWithEntityFetch) + report1 := &operationreport.Report{} + plan1 := sharedPlanner.Plan(&op1, &def, "", report1) + require.False(t, report1.HasErrors()) + assert.Equal(t, []costNodeDataSourceHashes{ + { + Field: FieldCoordinate{TypeName: "Query", FieldName: "account"}, + DataSourceHashes: []DSHash{2, 1}, + Children: []costNodeDataSourceHashes{ + { + Field: FieldCoordinate{TypeName: "Account", FieldName: "name"}, + DataSourceHashes: []DSHash{2}, + }, + { + Field: FieldCoordinate{TypeName: "Account", FieldName: "__typename"}, + DataSourceHashes: []DSHash{1}, + }, + { + Field: FieldCoordinate{TypeName: "Account", FieldName: "id"}, + DataSourceHashes: []DSHash{1}, + }, + }, + }, + }, costHashes(plan1)) + + op2Expected := unsafeparser.ParseGraphqlDocumentString(operationWithoutEntityFetch) + expectedPlanner, err := NewPlanner(planConfiguration) + require.NoError(t, err) + expectedReport := &operationreport.Report{} + expectedPlan2 := expectedPlanner.Plan(&op2Expected, &def, "", expectedReport) + require.False(t, expectedReport.HasErrors()) + + op2 := unsafeparser.ParseGraphqlDocumentString(operationWithoutEntityFetch) + report2 := &operationreport.Report{} + plan2 := sharedPlanner.Plan(&op2, &def, "", report2) + require.False(t, report2.HasErrors()) + + assert.Equal(t, expectedPlan2, plan2) + assert.Equal(t, []costNodeDataSourceHashes{ + { + Field: FieldCoordinate{TypeName: "Query", FieldName: "account"}, + DataSourceHashes: []DSHash{1}, + Children: []costNodeDataSourceHashes{ + { + Field: FieldCoordinate{TypeName: "Account", FieldName: "id"}, + DataSourceHashes: []DSHash{1}, + }, + }, + }, + }, costHashes(plan2)) + assert.Equal(t, map[int][]int{ + 0: {0}, + 1: {0}, + }, sharedPlanner.planningVisitor.fieldPlanners) + }) + + // Root field caching isolation tests + // When a root field has caching configured, the planner must isolate it into its own + // planner/fetch so it gets an independent cache config (TTL, cache name, etc.). + // This uses the same pattern as mutations: cached root fields skip planWithExistingPlanners + // and go straight to addNewPlanner. Other fields are prevented from merging into + // isolated planners via the isolatedRootField flag. + t.Run("root field caching isolation", func(t *testing.T) { + const schema = ` + type Query { + me: User + cat: Cat + user(id: ID!): User + } + type User { + id: ID! + username: String! + } + type Cat { + name: String! + } + ` + // Minimal CacheKeyTemplate to enable configureFetchCaching to populate cache config. + // Without this, configureFetchCaching bails early (CacheKeyTemplate == nil). + cacheKeyTpl := &resolve.RootQueryCacheKeyTemplate{} + + // Two cached root fields produce parallel, independent fetches (FetchID 0 and 1, no DependsOnFetchIDs). + // Each fetch gets its own cache config (Enabled, CacheName, TTL). + t.Run("two cached root fields get separate parallel fetches with correct cache configs", test(schema, + `query Q { me { id username } cat { name } }`, "Q", + &SynchronousResponsePlan{ + Response: &resolve.GraphQLResponse{ + RawFetches: []*resolve.FetchItem{ + { + Fetch: &resolve.SingleFetch{ + FetchDependencies: resolve.FetchDependencies{ + FetchID: 0, + }, + FetchConfiguration: resolve.FetchConfiguration{ + DataSource: &FakeDataSource{&StatefulSource{}}, + Caching: resolve.FetchCacheConfiguration{ + Enabled: true, + CacheName: "users", + TTL: 30 * time.Second, + CacheKeyTemplate: cacheKeyTpl, + }, + }, + DataSourceIdentifier: []byte("plan.FakeDataSource"), + }, + }, + { + Fetch: &resolve.SingleFetch{ + FetchDependencies: resolve.FetchDependencies{ + FetchID: 1, + }, + FetchConfiguration: resolve.FetchConfiguration{ + DataSource: &FakeDataSource{&StatefulSource{}}, + Caching: resolve.FetchCacheConfiguration{ + Enabled: true, + CacheName: "pets", + TTL: 60 * time.Second, + CacheKeyTemplate: cacheKeyTpl, + }, + }, + DataSourceIdentifier: []byte("plan.FakeDataSource"), + }, + }, + }, + Data: &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("me"), + Value: &resolve.Object{ + Path: []string{"me"}, + Nullable: true, + TypeName: "User", + PossibleTypes: map[string]struct{}{"User": {}}, + Fields: []*resolve.Field{ + { + Name: []byte("id"), + Value: &resolve.Scalar{Path: []string{"id"}}, + }, + { + Name: []byte("username"), + Value: &resolve.String{Path: []string{"username"}}, + }, + }, + }, + }, + { + Name: []byte("cat"), + Value: &resolve.Object{ + Path: []string{"cat"}, + Nullable: true, + TypeName: "Cat", + PossibleTypes: map[string]struct{}{"Cat": {}}, + Fields: []*resolve.Field{ + { + Name: []byte("name"), + Value: &resolve.String{Path: []string{"name"}}, + }, + }, + }, + }, + }, + }, + }, + }, + Configuration{ + DataSources: []DataSource{dsb(). + Id("accounts"). + WithBehavior(DataSourcePlanningBehavior{MergeAliasedRootNodes: true}). + CacheKeyTemplate(cacheKeyTpl). + RootNode("Query", "me", "cat"). + ChildNode("User", "id", "username"). + ChildNode("Cat", "name"). + Schema(schema). + WithMetadata(func(data *FederationMetaData) { + data.RootFieldCaching = RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "me", CacheName: "users", TTL: 30 * time.Second}, + {TypeName: "Query", FieldName: "cat", CacheName: "pets", TTL: 60 * time.Second}, + } + }). + DS()}, + DisableResolveFieldPositions: true, + DisableIncludeInfo: true, + DisableEntityCaching: false, + }, + )) + + // Cached "me" is isolated from uncached "user" — each gets its own fetch. + // Only the cached field gets Enabled:true. + t.Run("cached field isolated from uncached field - only cached gets L2", test(schema, + `query Q { me { id } user(id: "1") { username } }`, "Q", + &SynchronousResponsePlan{ + Response: &resolve.GraphQLResponse{ + RawFetches: []*resolve.FetchItem{ + { + Fetch: &resolve.SingleFetch{ + FetchDependencies: resolve.FetchDependencies{ + FetchID: 0, + }, + FetchConfiguration: resolve.FetchConfiguration{ + DataSource: &FakeDataSource{&StatefulSource{}}, + Caching: resolve.FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: cacheKeyTpl, + }, + }, + DataSourceIdentifier: []byte("plan.FakeDataSource"), + }, + }, + { + Fetch: &resolve.SingleFetch{ + FetchDependencies: resolve.FetchDependencies{ + FetchID: 1, + }, + FetchConfiguration: resolve.FetchConfiguration{ + DataSource: &FakeDataSource{&StatefulSource{}}, + Caching: resolve.FetchCacheConfiguration{ + CacheKeyTemplate: cacheKeyTpl, + }, + }, + DataSourceIdentifier: []byte("plan.FakeDataSource"), + }, + }, + }, + Data: &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("me"), + Value: &resolve.Object{ + Path: []string{"me"}, + Nullable: true, + TypeName: "User", + PossibleTypes: map[string]struct{}{"User": {}}, + Fields: []*resolve.Field{ + { + Name: []byte("id"), + Value: &resolve.Scalar{Path: []string{"id"}}, + }, + }, + }, + }, + { + Name: []byte("user"), + Value: &resolve.Object{ + Path: []string{"user"}, + Nullable: true, + TypeName: "User", + PossibleTypes: map[string]struct{}{"User": {}}, + Fields: []*resolve.Field{ + { + Name: []byte("username"), + Value: &resolve.String{Path: []string{"username"}}, + }, + }, + }, + }, + }, + }, + }, + }, + Configuration{ + DataSources: []DataSource{dsb(). + Id("accounts"). + WithBehavior(DataSourcePlanningBehavior{MergeAliasedRootNodes: true}). + CacheKeyTemplate(cacheKeyTpl). + RootNode("Query", "me", "user"). + ChildNode("User", "id", "username"). + Schema(schema). + WithMetadata(func(data *FederationMetaData) { + data.RootFieldCaching = RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "me", CacheName: "default", TTL: 30 * time.Second}, + } + }). + DS()}, + DisableResolveFieldPositions: true, + DisableIncludeInfo: true, + }, + )) + + // DisableEntityCaching skips isolation — fields merge into one fetch, L2 disabled. + t.Run("DisableEntityCaching - fields merge and no L2 caching", test(schema, + `query Q { me { id username } cat { name } }`, "Q", + &SynchronousResponsePlan{ + Response: &resolve.GraphQLResponse{ + RawFetches: []*resolve.FetchItem{ + { + Fetch: &resolve.SingleFetch{ + FetchConfiguration: resolve.FetchConfiguration{ + DataSource: &FakeDataSource{&StatefulSource{}}, + Caching: resolve.FetchCacheConfiguration{ + CacheKeyTemplate: cacheKeyTpl, + }, + }, + DataSourceIdentifier: []byte("plan.FakeDataSource"), + }, + }, + }, + Data: &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("me"), + Value: &resolve.Object{ + Path: []string{"me"}, + Nullable: true, + TypeName: "User", + PossibleTypes: map[string]struct{}{"User": {}}, + Fields: []*resolve.Field{ + { + Name: []byte("id"), + Value: &resolve.Scalar{Path: []string{"id"}}, + }, + { + Name: []byte("username"), + Value: &resolve.String{Path: []string{"username"}}, + }, + }, + }, + }, + { + Name: []byte("cat"), + Value: &resolve.Object{ + Path: []string{"cat"}, + Nullable: true, + TypeName: "Cat", + PossibleTypes: map[string]struct{}{"Cat": {}}, + Fields: []*resolve.Field{ + { + Name: []byte("name"), + Value: &resolve.String{Path: []string{"name"}}, + }, + }, + }, + }, + }, + }, + }, + }, + Configuration{ + DataSources: []DataSource{dsb(). + Id("accounts"). + WithBehavior(DataSourcePlanningBehavior{MergeAliasedRootNodes: true}). + CacheKeyTemplate(cacheKeyTpl). + RootNode("Query", "me", "cat"). + ChildNode("User", "id", "username"). + ChildNode("Cat", "name"). + Schema(schema). + WithMetadata(func(data *FederationMetaData) { + data.RootFieldCaching = RootFieldCacheConfigurations{ + {TypeName: "Query", FieldName: "me", CacheName: "default", TTL: 30 * time.Second}, + {TypeName: "Query", FieldName: "cat", CacheName: "default", TTL: 60 * time.Second}, + } + }). + DS()}, + DisableResolveFieldPositions: true, + DisableIncludeInfo: true, + DisableEntityCaching: true, + }, + )) + + // No RootFieldCaching at all — fields merge normally, L2 disabled. + t.Run("no caching configured - fields merge normally", test(schema, + `query Q { me { id username } cat { name } }`, "Q", + &SynchronousResponsePlan{ + Response: &resolve.GraphQLResponse{ + RawFetches: []*resolve.FetchItem{ + { + Fetch: &resolve.SingleFetch{ + FetchConfiguration: resolve.FetchConfiguration{ + DataSource: &FakeDataSource{&StatefulSource{}}, + Caching: resolve.FetchCacheConfiguration{ + CacheKeyTemplate: cacheKeyTpl, + }, + }, + DataSourceIdentifier: []byte("plan.FakeDataSource"), + }, + }, + }, + Data: &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("me"), + Value: &resolve.Object{ + Path: []string{"me"}, + Nullable: true, + TypeName: "User", + PossibleTypes: map[string]struct{}{"User": {}}, + Fields: []*resolve.Field{ + { + Name: []byte("id"), + Value: &resolve.Scalar{Path: []string{"id"}}, + }, + { + Name: []byte("username"), + Value: &resolve.String{Path: []string{"username"}}, + }, + }, + }, + }, + { + Name: []byte("cat"), + Value: &resolve.Object{ + Path: []string{"cat"}, + Nullable: true, + TypeName: "Cat", + PossibleTypes: map[string]struct{}{"Cat": {}}, + Fields: []*resolve.Field{ + { + Name: []byte("name"), + Value: &resolve.String{Path: []string{"name"}}, + }, + }, + }, + }, + }, + }, + }, + }, + Configuration{ + DataSources: []DataSource{dsb(). + Id("accounts"). + WithBehavior(DataSourcePlanningBehavior{MergeAliasedRootNodes: true}). + CacheKeyTemplate(cacheKeyTpl). + RootNode("Query", "me", "cat"). + ChildNode("User", "id", "username"). + ChildNode("Cat", "name"). + Schema(schema). + DS()}, + DisableResolveFieldPositions: true, + DisableIncludeInfo: true, + }, + )) + }) } var expectedMyHeroPlan = &SynchronousResponsePlan{ @@ -808,6 +1362,7 @@ var expectedMyHeroPlan = &SynchronousResponsePlan{ Fetch: &resolve.SingleFetch{ FetchConfiguration: resolve.FetchConfiguration{ DataSource: &FakeDataSource{&StatefulSource{}}, + Caching: resolve.FetchCacheConfiguration{}, }, DataSourceIdentifier: []byte("plan.FakeDataSource"), }, @@ -853,6 +1408,7 @@ var expectedMyHeroPlanWithFragment = &SynchronousResponsePlan{ Fetch: &resolve.SingleFetch{ FetchConfiguration: resolve.FetchConfiguration{ DataSource: &FakeDataSource{&StatefulSource{}}, + Caching: resolve.FetchCacheConfiguration{}, }, DataSourceIdentifier: []byte("plan.FakeDataSource"), }, @@ -991,8 +1547,9 @@ func (s *StatefulSource) Start() { } type FakeFactory[T any] struct { - upstreamSchema *ast.Document - behavior *DataSourcePlanningBehavior + upstreamSchema *ast.Document + behavior *DataSourcePlanningBehavior + cacheKeyTemplate resolve.CacheKeyTemplate } func (f *FakeFactory[T]) UpstreamSchema(_ DataSourceConfiguration[T]) (*ast.Document, bool) { @@ -1010,9 +1567,10 @@ func (f *FakeFactory[T]) Planner(_ abstractlogger.Logger) DataSourcePlanner[T] { source := &StatefulSource{} go source.Start() return &FakePlanner[T]{ - source: source, - upstreamSchema: f.upstreamSchema, - behavior: f.behavior, + source: source, + upstreamSchema: f.upstreamSchema, + behavior: f.behavior, + cacheKeyTemplate: f.cacheKeyTemplate, } } @@ -1020,11 +1578,23 @@ func (f *FakeFactory[T]) Context() context.Context { return context.TODO() } +type fieldTrackingFakeFactory[T any] struct { + *FakeFactory[T] +} + +func (f *fieldTrackingFakeFactory[T]) Planner(logger abstractlogger.Logger) DataSourcePlanner[T] { + planner := f.FakeFactory.Planner(logger).(*FakePlanner[T]) + return &fieldTrackingFakePlanner[T]{ + FakePlanner: planner, + } +} + type FakePlanner[T any] struct { - id int - source *StatefulSource - upstreamSchema *ast.Document - behavior *DataSourcePlanningBehavior + id int + source *StatefulSource + upstreamSchema *ast.Document + behavior *DataSourcePlanningBehavior + cacheKeyTemplate resolve.CacheKeyTemplate } func (f *FakePlanner[T]) ID() int { @@ -1044,12 +1614,33 @@ func (f *FakePlanner[T]) Register(visitor *Visitor, _ DataSourceConfiguration[T] return nil } +type fieldTrackingFakePlanner[T any] struct { + *FakePlanner[T] +} + +func (f *fieldTrackingFakePlanner[T]) Register(visitor *Visitor, _ DataSourceConfiguration[T], _ DataSourcePlannerConfiguration) error { + visitor.Walker.RegisterEnterDocumentVisitor(f) + visitor.Walker.RegisterEnterFieldVisitor(f) + visitor.Walker.RegisterLeaveFieldVisitor(f) + return nil +} + +func (f *fieldTrackingFakePlanner[T]) EnterField(ref int) { +} + +func (f *fieldTrackingFakePlanner[T]) LeaveField(ref int) { +} + func (f *FakePlanner[T]) ConfigureFetch() resolve.FetchConfiguration { - return resolve.FetchConfiguration{ + cfg := resolve.FetchConfiguration{ DataSource: &FakeDataSource{ source: f.source, }, } + if f.cacheKeyTemplate != nil { + cfg.Caching.CacheKeyTemplate = f.cacheKeyTemplate + } + return cfg } func (f *FakePlanner[T]) ConfigureSubscription() SubscriptionConfiguration { diff --git a/v2/pkg/engine/plan/representation_variable.go b/v2/pkg/engine/plan/representation_variable.go new file mode 100644 index 0000000000..747620a7e9 --- /dev/null +++ b/v2/pkg/engine/plan/representation_variable.go @@ -0,0 +1,359 @@ +package plan + +import ( + "bytes" + "slices" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/astvisitor" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +type representationObjectFields struct { + popOnField int + isRoot bool + fields *[]*resolve.Field +} + +// BuildRepresentationVariableNode builds a resolve.Object node from a FederationFieldConfiguration +// and the given AST definition. It creates a representation variable with __typename and the fields +// specified in the configuration's SelectionSet. +func BuildRepresentationVariableNode(definition *ast.Document, cfg FederationFieldConfiguration, federationCfg FederationMetaData) (*resolve.Object, error) { + key, report := RequiredFieldsFragment(cfg.TypeName, cfg.SelectionSet, false) + if report.HasErrors() { + return nil, report + } + + walker := astvisitor.WalkerFromPool() + defer walker.Release() + + var interfaceObjectTypeName *string + for _, interfaceObjCfg := range federationCfg.InterfaceObjects { + if slices.Contains(interfaceObjCfg.ConcreteTypeNames, cfg.TypeName) { + interfaceObjectTypeName = &interfaceObjCfg.InterfaceTypeName + break + } + } + var entityInterfaceTypeName *string + for _, entityInterfaceCfg := range federationCfg.EntityInterfaces { + if slices.Contains(entityInterfaceCfg.ConcreteTypeNames, cfg.TypeName) { + entityInterfaceTypeName = &entityInterfaceCfg.InterfaceTypeName + break + } + } + + visitor := &planRepresentationVariableVisitor{ + typeName: cfg.TypeName, + interfaceObjectTypeName: interfaceObjectTypeName, + entityInterfaceTypeName: entityInterfaceTypeName, + addOnType: true, + addTypeName: true, + remapPaths: cfg.RemappedPaths, + Walker: walker, + } + walker.RegisterEnterDocumentVisitor(visitor) + walker.RegisterFieldVisitor(visitor) + + walker.Walk(key, definition, report) + if report.HasErrors() { + return nil, report + } + + return visitor.rootObject, nil +} + +// MergeRepresentationVariableNodes merges multiple representation variable objects into one. +// It is part of the public planner API consumed by external integrations +// such as wundergraph/cosmo; breaking changes require coordinated downstream updates. +func MergeRepresentationVariableNodes(objects []*resolve.Object) *resolve.Object { + fieldCount := 0 + for _, object := range objects { + fieldCount += len(object.Fields) + } + + fields := make([]*resolve.Field, 0, fieldCount) + + for _, object := range objects { + for _, field := range object.Fields { + if i, ok := representationFieldsHasField(fields, field); ok { + fields[i] = mergeRepresentationFields(fields[i], field) + } else { + fields = append(fields, field) + } + } + } + + return &resolve.Object{ + Nullable: true, + Fields: fields, + } +} + +func mergeRepresentationFields(left, right *resolve.Field) *resolve.Field { + switch left.Value.NodeKind() { + case resolve.NodeKindObject: + left.Value = mergeRepresentationObjects(left.Value, right.Value) + case resolve.NodeKindArray: + left.Value = mergeRepresentationArrays(left.Value, right.Value) + } + return left +} + +func mergeRepresentationArrays(left, right resolve.Node) resolve.Node { + leftArray, _ := left.(*resolve.Array) + rightArray, _ := right.(*resolve.Array) + if leftArray.Item.NodeKind() == resolve.NodeKindObject { + leftArray.Item = mergeRepresentationObjects(leftArray.Item, rightArray.Item) + } + return leftArray +} + +func mergeRepresentationObjects(left, right resolve.Node) resolve.Node { + leftObject, _ := left.(*resolve.Object) + rightObject, _ := right.(*resolve.Object) + for _, field := range rightObject.Fields { + if i, ok := representationFieldsHasField(leftObject.Fields, field); ok { + leftObject.Fields[i] = mergeRepresentationFields(leftObject.Fields[i], field) + } else { + leftObject.Fields = append(leftObject.Fields, field) + } + } + return leftObject +} + +func representationIsOnTypeEqual(a, b [][]byte) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if !bytes.Equal(a[i], b[i]) { + return false + } + } + return true +} + +func representationFieldsHasField(fields []*resolve.Field, field *resolve.Field) (int, bool) { + for i, f := range fields { + if bytes.Equal(f.Name, field.Name) && representationIsOnTypeEqual(f.OnTypeNames, field.OnTypeNames) { + return i, true + } + } + return -1, false +} + +type planRepresentationVariableVisitor struct { + *astvisitor.Walker + + key, definition *ast.Document + + currentFields []representationObjectFields + rootObject *resolve.Object + + typeName string + interfaceObjectTypeName *string + entityInterfaceTypeName *string + + addOnType bool + addTypeName bool + remapPaths map[string]string +} + +func (v *planRepresentationVariableVisitor) EnterDocument(key, definition *ast.Document) { + v.key = key + v.definition = definition + + fields := make([]*resolve.Field, 0, 2) + if v.addTypeName { + typeNameField := &resolve.Field{ + Name: []byte("__typename"), + } + + if v.interfaceObjectTypeName != nil { + typeNameField.Value = &resolve.StaticString{ + Path: []string{"__typename"}, + Value: *v.interfaceObjectTypeName, + } + } else { + typeNameField.Value = &resolve.String{ + Path: []string{"__typename"}, + } + } + + if v.addOnType { + v.addTypeNameToField(typeNameField) + } + + fields = append(fields, typeNameField) + } + + v.rootObject = &resolve.Object{ + Nullable: true, + Fields: fields, + } + + v.currentFields = append(v.currentFields, representationObjectFields{ + fields: &v.rootObject.Fields, + popOnField: -1, + isRoot: true, + }) +} + +func (v *planRepresentationVariableVisitor) EnterField(ref int) { + fieldName := v.key.FieldNameBytes(ref) + + fieldDefinition, ok := v.Walker.FieldDefinition(ref) + if !ok { + return + } + fieldDefinitionType := v.definition.FieldDefinitionType(fieldDefinition) + + currentPath := v.Walker.Path.DotDelimitedString() + "." + string(fieldName) + + fieldPath := string(fieldName) + if remapPath, ok := v.remapPaths[currentPath]; ok { + fieldPath = remapPath + } + + currentField := &resolve.Field{ + Name: fieldName, + Value: v.resolveFieldValue(ref, fieldDefinitionType, true, []string{fieldPath}), + OnTypeNames: v.resolveOnTypeNames(ref), + } + + if v.addOnType && v.currentFields[len(v.currentFields)-1].isRoot { + v.addTypeNameToField(currentField) + } + + *v.currentFields[len(v.currentFields)-1].fields = append(*v.currentFields[len(v.currentFields)-1].fields, currentField) +} + +func (v *planRepresentationVariableVisitor) addTypeNameToField(field *resolve.Field) { + switch { + case v.interfaceObjectTypeName != nil: + field.OnTypeNames = [][]byte{[]byte(v.typeName), []byte(*v.interfaceObjectTypeName)} + case v.entityInterfaceTypeName != nil: + field.OnTypeNames = [][]byte{[]byte(v.typeName), []byte(*v.entityInterfaceTypeName)} + default: + field.OnTypeNames = [][]byte{[]byte(v.typeName)} + } +} + +func (v *planRepresentationVariableVisitor) LeaveField(ref int) { + if v.currentFields[len(v.currentFields)-1].popOnField == ref { + v.currentFields = v.currentFields[:len(v.currentFields)-1] + } +} + +func (v *planRepresentationVariableVisitor) resolveFieldValue(fieldRef, typeRef int, nullable bool, path []string) resolve.Node { + ofType := v.definition.Types[typeRef].OfType + + switch v.definition.Types[typeRef].TypeKind { + case ast.TypeKindNonNull: + return v.resolveFieldValue(fieldRef, ofType, false, path) + case ast.TypeKindList: + listItem := v.resolveFieldValue(fieldRef, ofType, true, nil) + return &resolve.Array{ + Nullable: nullable, + Path: path, + Item: listItem, + } + case ast.TypeKindNamed: + typeName := v.definition.ResolveTypeNameString(typeRef) + typeDefinitionNode, ok := v.definition.Index.FirstNodeByNameStr(typeName) + if !ok { + return &resolve.Null{} + } + switch typeDefinitionNode.Kind { + case ast.NodeKindScalarTypeDefinition: + switch typeName { + case "String": + return &resolve.String{Path: path, Nullable: nullable} + case "Boolean": + return &resolve.Boolean{Path: path, Nullable: nullable} + case "Int": + return &resolve.Integer{Path: path, Nullable: nullable} + case "Float": + return &resolve.Float{Path: path, Nullable: nullable} + default: + return &resolve.Scalar{Path: path, Nullable: nullable} + } + case ast.NodeKindEnumTypeDefinition: + return &resolve.String{Path: path, Nullable: nullable} + case ast.NodeKindObjectTypeDefinition, ast.NodeKindInterfaceTypeDefinition, ast.NodeKindUnionTypeDefinition: + object := &resolve.Object{ + Nullable: nullable, + Path: path, + Fields: []*resolve.Field{}, + } + v.Walker.DefferOnEnterField(func() { + v.currentFields = append(v.currentFields, representationObjectFields{ + popOnField: fieldRef, + fields: &object.Fields, + }) + }) + return object + default: + return &resolve.Null{} + } + default: + return &resolve.Null{} + } +} + +func (v *planRepresentationVariableVisitor) resolveOnTypeNames(fieldRef int) [][]byte { + if len(v.Walker.Ancestors) < 2 { + return nil + } + inlineFragment := v.Walker.Ancestors[len(v.Walker.Ancestors)-2] + if inlineFragment.Kind != ast.NodeKindInlineFragment { + return nil + } + typeName := v.key.InlineFragmentTypeConditionName(inlineFragment.Ref) + if typeName == nil { + typeName = v.Walker.EnclosingTypeDefinition.NameBytes(v.definition) + } + node, exists := v.definition.NodeByName(typeName) + if !exists || !node.Kind.IsAbstractType() { + return [][]byte{typeName} + } + if node.Kind == ast.NodeKindUnionTypeDefinition { + panic("resolveOnTypeNames called with a union type") + } + onTypeNames := make([][]byte, 0, 2) + for objectTypeDefinitionRef := range v.definition.ObjectTypeDefinitions { + if v.definition.ObjectTypeDefinitionImplementsInterface(objectTypeDefinitionRef, typeName) { + onTypeNames = append(onTypeNames, v.definition.ObjectTypeDefinitionNameBytes(objectTypeDefinitionRef)) + } + } + if len(onTypeNames) == 0 { + return nil + } + + if len(v.Walker.TypeDefinitions) > 1 { + grandParent := v.Walker.TypeDefinitions[len(v.Walker.TypeDefinitions)-2] + if grandParent.Kind == ast.NodeKindUnionTypeDefinition { + for i := 0; i < len(onTypeNames); i++ { + possibleMember, exists := v.definition.Index.FirstNodeByNameStr(string(onTypeNames[i])) + if !exists { + continue + } + if !v.definition.NodeIsUnionMember(possibleMember, grandParent) { + onTypeNames = append(onTypeNames[:i], onTypeNames[i+1:]...) + i-- + } + } + } + if grandParent.Kind == ast.NodeKindInterfaceTypeDefinition { + objectTypesImplementingGrandParent, _ := v.definition.InterfaceTypeDefinitionImplementedByObjectWithNames(grandParent.Ref) + for i := 0; i < len(onTypeNames); i++ { + if !slices.Contains(objectTypesImplementingGrandParent, string(onTypeNames[i])) { + onTypeNames = append(onTypeNames[:i], onTypeNames[i+1:]...) + i-- + } + } + } + } + + return onTypeNames +} diff --git a/v2/pkg/engine/plan/representation_variable_test.go b/v2/pkg/engine/plan/representation_variable_test.go new file mode 100644 index 0000000000..20ae8e2d31 --- /dev/null +++ b/v2/pkg/engine/plan/representation_variable_test.go @@ -0,0 +1,605 @@ +package plan + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/astparser" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// Verifies that BuildRepresentationVariableNode produces the correct resolve.Object +// tree for entity representation variables (_Any types) used in _entities queries. +// Incorrect representation variables cause entity resolution failures at runtime. +func TestBuildRepresentationVariableNode(t *testing.T) { + runTest := func(t *testing.T, definitionStr string, cfg FederationFieldConfiguration, federationMeta FederationMetaData, expectedNode *resolve.Object) { + t.Helper() + definition, report := astparser.ParseGraphqlDocumentString(definitionStr) + require.False(t, report.HasErrors(), report.Error()) + + node, err := BuildRepresentationVariableNode(&definition, cfg, federationMeta) + require.NoError(t, err) + assert.Equal(t, expectedNode, node) + } + + t.Run("simple scalar fields", func(t *testing.T) { + runTest(t, ` + scalar String + + type User { + id: String! + name: String! + } + `, + FederationFieldConfiguration{ + TypeName: "User", + SelectionSet: "id name", + }, + FederationMetaData{}, + &resolve.Object{ + Nullable: true, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.String{ + Path: []string{"__typename"}, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("id"), + Value: &resolve.String{ + Path: []string{"id"}, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("name"), + Value: &resolve.String{ + Path: []string{"name"}, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + }) + }) + + t.Run("with RemappedPaths", func(t *testing.T) { + runTest(t, ` + scalar String + + type User { + id: String! + name: String! + } + `, + FederationFieldConfiguration{ + TypeName: "User", + SelectionSet: "id name", + RemappedPaths: map[string]string{ + "User.id": "userId", + "User.name": "displayName", + }, + }, + FederationMetaData{}, + &resolve.Object{ + Nullable: true, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.String{ + Path: []string{"__typename"}, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("id"), + Value: &resolve.String{ + Path: []string{"userId"}, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("name"), + Value: &resolve.String{ + Path: []string{"displayName"}, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + }) + }) + + t.Run("with interface object type name", func(t *testing.T) { + runTest(t, ` + scalar String + + type User { + id: String! + name: String! + } + `, + FederationFieldConfiguration{ + TypeName: "User", + SelectionSet: "id name", + }, + FederationMetaData{ + InterfaceObjects: []EntityInterfaceConfiguration{ + { + InterfaceTypeName: "Account", + ConcreteTypeNames: []string{"User", "Admin"}, + }, + }, + }, + &resolve.Object{ + Nullable: true, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.StaticString{ + Path: []string{"__typename"}, + Value: "Account", + }, + OnTypeNames: [][]byte{[]byte("User"), []byte("Account")}, + }, + { + Name: []byte("id"), + Value: &resolve.String{ + Path: []string{"id"}, + }, + OnTypeNames: [][]byte{[]byte("User"), []byte("Account")}, + }, + { + Name: []byte("name"), + Value: &resolve.String{ + Path: []string{"name"}, + }, + OnTypeNames: [][]byte{[]byte("User"), []byte("Account")}, + }, + }, + }) + }) + + t.Run("with entity interface type name", func(t *testing.T) { + runTest(t, ` + scalar String + + type User { + id: String! + name: String! + } + `, + FederationFieldConfiguration{ + TypeName: "User", + SelectionSet: "id name", + }, + FederationMetaData{ + EntityInterfaces: []EntityInterfaceConfiguration{ + { + InterfaceTypeName: "Node", + ConcreteTypeNames: []string{"User", "Product"}, + }, + }, + }, + &resolve.Object{ + Nullable: true, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.String{ + Path: []string{"__typename"}, + }, + OnTypeNames: [][]byte{[]byte("User"), []byte("Node")}, + }, + { + Name: []byte("id"), + Value: &resolve.String{ + Path: []string{"id"}, + }, + OnTypeNames: [][]byte{[]byte("User"), []byte("Node")}, + }, + { + Name: []byte("name"), + Value: &resolve.String{ + Path: []string{"name"}, + }, + OnTypeNames: [][]byte{[]byte("User"), []byte("Node")}, + }, + }, + }) + }) + + t.Run("deeply nested fields", func(t *testing.T) { + runTest(t, ` + scalar String + scalar Int + scalar Float + + type User { + id: String! + account: Account! + } + + type Account { + accountID: Int! + address: Address! + } + + type Address { + zip: Float! + } + `, + FederationFieldConfiguration{ + TypeName: "User", + SelectionSet: "id account { accountID address { zip } }", + }, + FederationMetaData{}, + &resolve.Object{ + Nullable: true, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.String{ + Path: []string{"__typename"}, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("id"), + Value: &resolve.String{ + Path: []string{"id"}, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("account"), + Value: &resolve.Object{ + Path: []string{"account"}, + Fields: []*resolve.Field{ + { + Name: []byte("accountID"), + Value: &resolve.Integer{ + Path: []string{"accountID"}, + }, + }, + { + Name: []byte("address"), + Value: &resolve.Object{ + Path: []string{"address"}, + Fields: []*resolve.Field{ + { + Name: []byte("zip"), + Value: &resolve.Float{ + Path: []string{"zip"}, + }, + }, + }, + }, + }, + }, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + }) + }) + + t.Run("with inline fragment on interface", func(t *testing.T) { + runTest(t, ` + scalar String + + type User { + id: String! + info: Info! + } + + interface Info { + title: String! + } + + type PersonalInfo implements Info { + title: String! + nickname: String! + } + + type WorkInfo implements Info { + title: String! + role: String! + } + `, + FederationFieldConfiguration{ + TypeName: "User", + SelectionSet: "id info { ... on PersonalInfo { nickname } }", + }, + FederationMetaData{}, + &resolve.Object{ + Nullable: true, + Fields: []*resolve.Field{ + { + Name: []byte("__typename"), + Value: &resolve.String{ + Path: []string{"__typename"}, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("id"), + Value: &resolve.String{ + Path: []string{"id"}, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("info"), + Value: &resolve.Object{ + Path: []string{"info"}, + Fields: []*resolve.Field{ + { + Name: []byte("nickname"), + Value: &resolve.String{ + Path: []string{"nickname"}, + }, + OnTypeNames: [][]byte{[]byte("PersonalInfo")}, + }, + }, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + }) + }) +} + +// Verifies that merging multiple representation variable nodes correctly +// combines fields from different entity types into a single representation object. +func TestMergeRepresentationVariableNodes(t *testing.T) { + t.Run("different entities by OnTypeNames", func(t *testing.T) { + userRepresentation := &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("id"), + Value: &resolve.String{ + Path: []string{"id"}, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + } + + adminRepresentation := &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("id"), + Value: &resolve.String{ + Path: []string{"id"}, + }, + OnTypeNames: [][]byte{[]byte("Admin")}, + }, + }, + } + + expected := &resolve.Object{ + Nullable: true, + Fields: []*resolve.Field{ + { + Name: []byte("id"), + Value: &resolve.String{ + Path: []string{"id"}, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("id"), + Value: &resolve.String{ + Path: []string{"id"}, + }, + OnTypeNames: [][]byte{[]byte("Admin")}, + }, + }, + } + + merged := MergeRepresentationVariableNodes([]*resolve.Object{userRepresentation, adminRepresentation}) + assert.Equal(t, expected, merged) + }) + + t.Run("same entity disjoint fields", func(t *testing.T) { + keyRepresentation := &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("id"), + Value: &resolve.String{ + Path: []string{"id"}, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + } + + requiresRepresentation := &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("name"), + Value: &resolve.String{ + Path: []string{"name"}, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + } + + expected := &resolve.Object{ + Nullable: true, + Fields: []*resolve.Field{ + { + Name: []byte("id"), + Value: &resolve.String{ + Path: []string{"id"}, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + { + Name: []byte("name"), + Value: &resolve.String{ + Path: []string{"name"}, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + } + + merged := MergeRepresentationVariableNodes([]*resolve.Object{keyRepresentation, requiresRepresentation}) + assert.Equal(t, expected, merged) + }) + + t.Run("overlapping nested fields are merged", func(t *testing.T) { + first := &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("info"), + Value: &resolve.Object{ + Path: []string{"info"}, + Fields: []*resolve.Field{ + { + Name: []byte("kind"), + Value: &resolve.String{ + Path: []string{"kind"}, + }, + }, + }, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + } + + second := &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("info"), + Value: &resolve.Object{ + Path: []string{"info"}, + Fields: []*resolve.Field{ + { + Name: []byte("type"), + Value: &resolve.String{ + Path: []string{"type"}, + }, + }, + }, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + } + + expected := &resolve.Object{ + Nullable: true, + Fields: []*resolve.Field{ + { + Name: []byte("info"), + Value: &resolve.Object{ + Path: []string{"info"}, + Fields: []*resolve.Field{ + { + Name: []byte("kind"), + Value: &resolve.String{ + Path: []string{"kind"}, + }, + }, + { + Name: []byte("type"), + Value: &resolve.String{ + Path: []string{"type"}, + }, + }, + }, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + } + + merged := MergeRepresentationVariableNodes([]*resolve.Object{first, second}) + assert.Equal(t, expected, merged) + }) + + t.Run("overlapping array fields are merged", func(t *testing.T) { + first := &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("items"), + Value: &resolve.Array{ + Path: []string{"items"}, + Item: &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("id"), + Value: &resolve.String{ + Path: []string{"id"}, + }, + }, + }, + }, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + } + + second := &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("items"), + Value: &resolve.Array{ + Path: []string{"items"}, + Item: &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("name"), + Value: &resolve.String{ + Path: []string{"name"}, + }, + }, + }, + }, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + } + + expected := &resolve.Object{ + Nullable: true, + Fields: []*resolve.Field{ + { + Name: []byte("items"), + Value: &resolve.Array{ + Path: []string{"items"}, + Item: &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("id"), + Value: &resolve.String{ + Path: []string{"id"}, + }, + }, + { + Name: []byte("name"), + Value: &resolve.String{ + Path: []string{"name"}, + }, + }, + }, + }, + }, + OnTypeNames: [][]byte{[]byte("User")}, + }, + }, + } + + merged := MergeRepresentationVariableNodes([]*resolve.Object{first, second}) + assert.Equal(t, expected, merged) + }) +} diff --git a/v2/pkg/engine/plan/request_scoped_provides_data_test.go b/v2/pkg/engine/plan/request_scoped_provides_data_test.go new file mode 100644 index 0000000000..9ce06f727d --- /dev/null +++ b/v2/pkg/engine/plan/request_scoped_provides_data_test.go @@ -0,0 +1,173 @@ +package plan + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// TestPopulateRequestScopedFieldsProvidesData verifies that the function correctly +// locates requestScoped fields in the planner's response Object tree by their +// response key (alias or schema name) and populates ProvidesData. +func TestPopulateRequestScopedFieldsProvidesData(t *testing.T) { + t.Parallel() + caching := newCachingPlannerState(&Visitor{}) + + t.Run("no plannerObj leaves fields unchanged", func(t *testing.T) { + t.Parallel() + fields := []resolve.RequestScopedField{ + {FieldName: "currentViewer", FieldPath: []string{"currentViewer"}, L1Key: "k"}, + } + out := caching.populateRequestScopedFieldsProvidesData(fields, nil) + assert.Equal(t, fields, out) + }) + + t.Run("no matching field leaves ProvidesData nil", func(t *testing.T) { + t.Parallel() + plannerObj := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id"), Value: &resolve.Scalar{}}, + }, + } + fields := []resolve.RequestScopedField{ + {FieldName: "currentViewer", FieldPath: []string{"currentViewer"}, L1Key: "k"}, + } + out := caching.populateRequestScopedFieldsProvidesData(fields, plannerObj) + assert.Len(t, out, 1) + assert.Equal(t, "currentViewer", out[0].FieldName) + assert.Nil(t, out[0].ProvidesData) + }) + + t.Run("matching field by response key populates ProvidesData", func(t *testing.T) { + t.Parallel() + viewerObj := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id"), Value: &resolve.Scalar{}}, + {Name: []byte("name"), Value: &resolve.Scalar{}}, + }, + } + plannerObj := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("currentViewer"), Value: viewerObj}, + }, + } + fields := []resolve.RequestScopedField{ + {FieldName: "currentViewer", FieldPath: []string{"currentViewer"}, L1Key: "k"}, + } + out := caching.populateRequestScopedFieldsProvidesData(fields, plannerObj) + assert.Len(t, out, 1) + assert.Equal(t, "currentViewer", out[0].FieldName) + assert.Equal(t, []string{"currentViewer"}, out[0].FieldPath) + assert.Same(t, viewerObj, out[0].ProvidesData) + }) + + t.Run("aliased field matched by alias (response key)", func(t *testing.T) { + t.Parallel() + viewerObj := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id"), Value: &resolve.Scalar{}}, + {Name: []byte("name"), Value: &resolve.Scalar{}}, + }, + } + // Outer query: { articles { viewer: currentViewer { id name } } } + // The datasource planner already resolved the alias, so FieldName="viewer". + // plannerObj has the field under the alias "viewer". + plannerObj := &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("viewer"), // alias (= response key) + OriginalName: []byte("currentViewer"), // schema name + Value: viewerObj, + }, + }, + } + fields := []resolve.RequestScopedField{ + {FieldName: "viewer", FieldPath: []string{"viewer"}, L1Key: "k"}, + } + out := caching.populateRequestScopedFieldsProvidesData(fields, plannerObj) + assert.Len(t, out, 1) + assert.Equal(t, "viewer", out[0].FieldName) + assert.Equal(t, []string{"viewer"}, out[0].FieldPath) + assert.Same(t, viewerObj, out[0].ProvidesData) + }) + + t.Run("multiple fields, mix of aliased and unaliased", func(t *testing.T) { + t.Parallel() + viewerObj := &resolve.Object{Fields: []*resolve.Field{{Name: []byte("id"), Value: &resolve.Scalar{}}}} + tenantObj := &resolve.Object{Fields: []*resolve.Field{{Name: []byte("id"), Value: &resolve.Scalar{}}}} + plannerObj := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("viewer"), OriginalName: []byte("currentViewer"), Value: viewerObj}, + {Name: []byte("tenantConfig"), Value: tenantObj}, + }, + } + fields := []resolve.RequestScopedField{ + {FieldName: "viewer", FieldPath: []string{"viewer"}, L1Key: "k1"}, + {FieldName: "tenantConfig", FieldPath: []string{"tenantConfig"}, L1Key: "k2"}, + } + out := caching.populateRequestScopedFieldsProvidesData(fields, plannerObj) + assert.Len(t, out, 2) + assert.Same(t, viewerObj, out[0].ProvidesData) + assert.Same(t, tenantObj, out[1].ProvidesData) + }) + + t.Run("scalar field value does not populate ProvidesData", func(t *testing.T) { + t.Parallel() + plannerObj := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("locale"), Value: &resolve.Scalar{}}, + }, + } + fields := []resolve.RequestScopedField{ + {FieldName: "locale", FieldPath: []string{"locale"}, L1Key: "k"}, + } + out := caching.populateRequestScopedFieldsProvidesData(fields, plannerObj) + assert.Len(t, out, 1) + assert.Nil(t, out[0].ProvidesData) // Scalar, not Object + }) +} + +// TestFindObjectFieldByResponseKey verifies the response-key lookup helper. +func TestFindObjectFieldByResponseKey(t *testing.T) { + t.Parallel() + caching := newCachingPlannerState(&Visitor{}) + + obj := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id"), Value: &resolve.Scalar{}}, + {Name: []byte("cv"), OriginalName: []byte("currentViewer"), Value: &resolve.Object{}}, + }, + } + + t.Run("matches by response key", func(t *testing.T) { + t.Parallel() + sub := caching.findObjectFieldByResponseKey(obj, "cv") + assert.NotNil(t, sub) + }) + + t.Run("schema name does not match when aliased", func(t *testing.T) { + t.Parallel() + sub := caching.findObjectFieldByResponseKey(obj, "currentViewer") + assert.Nil(t, sub) + }) + + t.Run("scalar field returns nil", func(t *testing.T) { + t.Parallel() + sub := caching.findObjectFieldByResponseKey(obj, "id") + assert.Nil(t, sub) + }) + + t.Run("not found returns nil", func(t *testing.T) { + t.Parallel() + sub := caching.findObjectFieldByResponseKey(obj, "unknown") + assert.Nil(t, sub) + }) + + t.Run("nil obj returns nil", func(t *testing.T) { + t.Parallel() + sub := caching.findObjectFieldByResponseKey(nil, "anything") + assert.Nil(t, sub) + }) +} diff --git a/v2/pkg/engine/plan/required_fields_provided_visitor.go b/v2/pkg/engine/plan/required_fields_provided_visitor.go index 557e099526..3777e53675 100644 --- a/v2/pkg/engine/plan/required_fields_provided_visitor.go +++ b/v2/pkg/engine/plan/required_fields_provided_visitor.go @@ -47,10 +47,6 @@ type areRequiredFieldsProvidedInput struct { // When one of the parent nodes provides fields, which are mentioned in requires. // We can skip fetching these requirements, because fields are already available under the given path. func areRequiredFieldsProvided(input areRequiredFieldsProvidedInput) (bool, *operationreport.Report) { - if len(input.providedFields) == 0 { - return false, operationreport.NewReport() - } - key, report := RequiredFieldsFragment(input.typeName, input.requiredFields, false) if report.HasErrors() { return false, report diff --git a/v2/pkg/engine/plan/required_fields_provided_visitor_test.go b/v2/pkg/engine/plan/required_fields_provided_visitor_test.go index b83d5c2eda..8aa1505329 100644 --- a/v2/pkg/engine/plan/required_fields_provided_visitor_test.go +++ b/v2/pkg/engine/plan/required_fields_provided_visitor_test.go @@ -135,6 +135,17 @@ func TestAreRequiredFieldsProvided(t *testing.T) { }, expected: true, }, + { + name: "local child field is implicitly accessible without explicit provided fields", + typeName: "User", + requiredFields: "name", + parentPath: "query.me", + providedFields: map[string]struct{}{}, + expected: true, + datasource: dsb(). + ChildNode("User", "name"). + DS(), + }, { name: "no provided fields", typeName: "User", diff --git a/v2/pkg/engine/plan/required_fields_visitor.go b/v2/pkg/engine/plan/required_fields_visitor.go index 2123605015..bc05878ad0 100644 --- a/v2/pkg/engine/plan/required_fields_visitor.go +++ b/v2/pkg/engine/plan/required_fields_visitor.go @@ -225,6 +225,7 @@ func (v *requiredFieldsVisitor) EnterField(ref int) { func (v *requiredFieldsVisitor) handleRequiredField(ref int) { fieldName := v.key.FieldNameBytes(ref) + fieldAliasOrName := v.key.FieldAliasOrNameBytes(ref) isTypeName := bytes.Equal(fieldName, typeNameFieldBytes) // we need to add alias if operation has such field and: @@ -234,7 +235,7 @@ func (v *requiredFieldsVisitor) handleRequiredField(ref int) { needAlias := v.key.FieldHasArguments(ref) selectionSetRef := v.OperationNodes[len(v.OperationNodes)-1].Ref - operationHasField, operationFieldRef := v.config.operation.SelectionSetHasFieldSelectionWithExactName(selectionSetRef, fieldName) + operationHasField, operationFieldRef := v.config.operation.SelectionSetHasFieldSelectionWithExactName(selectionSetRef, fieldAliasOrName) if operationHasField && !needAlias { // we are skipping adding __typename field to the required fields, @@ -309,7 +310,12 @@ func (v *requiredFieldsVisitor) addRequiredField(keyRef int, fieldName ast.ByteS SelectionSet: ast.InvalidRef, } - if addAlias { + if v.key.FieldAliasIsDefined(keyRef) { + field.Alias = ast.Alias{ + IsDefined: true, + Name: v.config.operation.Input.AppendInputBytes(v.key.FieldAliasBytes(keyRef)), + } + } else if addAlias { aliasName := bytes.NewBuffer([]byte("__internal_")) aliasName.Write(fieldName) fullAliasName := aliasName.Bytes() diff --git a/v2/pkg/engine/plan/visitor.go b/v2/pkg/engine/plan/visitor.go index 69faf9ecd4..74cd0b2d02 100644 --- a/v2/pkg/engine/plan/visitor.go +++ b/v2/pkg/engine/plan/visitor.go @@ -66,10 +66,11 @@ type Visitor struct { // fieldEnclosingTypeNames maps fieldRef to the enclosing type name. fieldEnclosingTypeNames map[int]string + caching *cachingPlannerState } func NewVisitor(w *astvisitor.Walker) *Visitor { - return &Visitor{ + visitor := &Visitor{ Walker: w, fieldConfigs: map[int]*FieldConfiguration{}, exportedVariables: map[string]struct{}{}, @@ -80,6 +81,15 @@ func NewVisitor(w *astvisitor.Walker) *Visitor { fieldPlanners: map[int][]int{}, fieldEnclosingTypeNames: map[int]string{}, } + visitor.caching = newCachingPlannerState(visitor) + return visitor +} + +func (v *Visitor) RequestScopedFetchAlias(fieldRef int) (string, bool) { + if v == nil { + return "", false + } + return v.caching.fetchAlias(fieldRef) } type indirectInterfaceField struct { @@ -363,6 +373,18 @@ func (v *Visitor) EnterField(ref int) { if !v.Config.DisableIncludeFieldDependencies { v.fieldEnclosingTypeNames[ref] = strings.Clone(v.Walker.EnclosingTypeDefinition.NameString(v.Definition)) } + + // Track field for each planner that should handle it. + // trackFieldForPlanner delegates the ownership check to shouldPlannerHandleField + // and returns early for planners that don't own this path. A reverse index + // (fieldRef → owning plannerIDs) is not usable here because the walker + // invokes planningVisitor.EnterField before AllowVisitor has fired for the + // individual planner visitors — so the fieldPlanners map is not yet + // populated at this point. + for plannerID := range v.planners { + v.caching.trackFieldForPlanner(plannerID, ref) + } + // check if we have to skip the field in the response // it means it was requested by the planner not the user if v.skipField(ref) { @@ -371,8 +393,16 @@ func (v *Visitor) EnterField(ref int) { fieldName := v.Operation.FieldNameBytes(ref) fieldAliasOrName := v.Operation.FieldAliasOrNameBytes(ref) + responseFieldName := fieldAliasOrName + if visible, ok := v.caching.visibleResponseKey(ref); ok { + responseFieldName = []byte(visible) + } + fetchResponseKey := v.Operation.FieldAliasOrNameString(ref) + if fetchAlias, ok := v.caching.fetchAlias(ref); ok { + fetchResponseKey = fetchAlias + } - if bytes.Equal(fieldAliasOrName, []byte("__internal__typename_placeholder")) { + if bytes.Equal(responseFieldName, []byte("__internal__typename_placeholder")) { // we should skip such typename as it was added as a placeholder to keep query valid return } @@ -386,11 +416,14 @@ func (v *Visitor) EnterField(ref int) { onTypeNames := v.resolveOnTypeNames(ref, fieldName) v.currentField = &resolve.Field{ - Name: fieldAliasOrName, + Name: responseFieldName, OnTypeNames: onTypeNames, Position: v.resolveFieldPosition(ref), Info: v.resolveFieldInfo(ref, fieldDefinitionTypeRef, onTypeNames), } + if _, ok := v.caching.visibleResponseKey(ref); ok && !bytes.Equal(responseFieldName, fieldName) { + v.currentField.OriginalName = fieldName + } if bytes.Equal(fieldName, literal.TYPENAME) { typeName := v.Walker.EnclosingTypeDefinition.NameBytes(v.Definition) @@ -398,20 +431,20 @@ func (v *Visitor) EnterField(ref int) { if isRootQueryType { str := &resolve.StaticString{ - Path: []string{v.Operation.FieldAliasOrNameString(ref)}, + Path: []string{fetchResponseKey}, Value: string(typeName), } v.currentField.Value = str } else { str := &resolve.String{ Nullable: false, - Path: []string{v.Operation.FieldAliasOrNameString(ref)}, + Path: []string{fetchResponseKey}, IsTypeName: true, } v.currentField.Value = str } } else { - path := []string{v.Operation.FieldAliasOrNameString(ref)} + path := []string{fetchResponseKey} v.currentField.Value = v.resolveFieldValue(ref, fieldDefinitionTypeRef, true, path) } @@ -495,6 +528,14 @@ func (v *Visitor) resolveFieldInfo(ref, typeRef int, onTypeNames [][]byte) *reso } } + // Mark non-key fields on concrete entity types for cache analytics hashing; + // polymorphic parents fall through to the runtime fallback. + if v.Walker.EnclosingTypeDefinition.Kind == ast.NodeKindObjectTypeDefinition { + if analytics := v.caching.entityCacheAnalytics(enclosingTypeName); analytics != nil { + fieldInfo.CacheAnalyticsHash = !analytics.IsKeyField(fieldName) + } + } + return fieldInfo } @@ -633,6 +674,11 @@ func (v *Visitor) addInterfaceObjectNameToTypeNames(fieldRef int, typeName []byt func (v *Visitor) LeaveField(fieldRef int) { v.debugOnLeaveNode(ast.NodeKindField, fieldRef) + // Pop fields for each planner that tracked this field + for plannerID := range v.planners { + v.caching.popFieldsForPlanner(plannerID, fieldRef) + } + if v.skipField(fieldRef) { // we should also check skips on field leave // cause on nested keys we could mistakenly remove wrong object @@ -880,6 +926,14 @@ func (v *Visitor) resolveFieldValue(fieldRef, typeRef int, nullable bool, path [ } } + // Annotate entity types with cache analytics config (plan-time). + switch typeDefinitionNode.Kind { + case ast.NodeKindObjectTypeDefinition: + object.CacheAnalytics = v.caching.entityCacheAnalytics(typeName) + case ast.NodeKindInterfaceTypeDefinition, ast.NodeKindUnionTypeDefinition: + object.CacheAnalytics = v.caching.polymorphicEntityCacheAnalytics(object.PossibleTypes) + } + v.objects = append(v.objects, object) v.Walker.DefferOnEnterField(func() { v.currentFields = append(v.currentFields, objectFields{ @@ -1024,6 +1078,10 @@ func (v *Visitor) EnterOperationDefinition(opRef int) { } } + // Initialize per-planner object and field tracking structures used to build + // the ProvidesData tree that each subgraph fetch will populate at runtime. + v.caching.initializePlannerStructures() + if operationKind == ast.OperationTypeSubscription { v.subscription = &resolve.GraphQLSubscription{ Response: v.response, @@ -1082,6 +1140,18 @@ func (v *Visitor) resolveFieldPath(ref int) []string { func (v *Visitor) EnterDocument(operation, definition *ast.Document) { v.Operation, v.Definition = operation, definition + // Per-walk state is reset here rather than in NewVisitor so the same *Visitor + // can be reused across operations (common in tests and in the planner cache). + // Clear in place: the cost visitor captures this map before the walk starts. + clear(v.fieldPlanners) + v.fieldConfigs = map[int]*FieldConfiguration{} + v.exportedVariables = map[string]struct{}{} + v.skipIncludeOnFragments = map[int]skipIncludeInfo{} + v.indirectInterfaceFields = map[int]indirectInterfaceField{} + v.pathCache = map[astvisitor.VisitorKind]map[int]string{} + v.plannerFields = map[int][]int{} + v.fieldEnclosingTypeNames = map[int]string{} + v.caching.resetPlannerStructures() } func (v *Visitor) LeaveDocument(_, _ *ast.Document) { @@ -1136,6 +1206,49 @@ func (v *Visitor) pathDeepness(path string) int { return strings.Count(path, ".") } +func (v *Visitor) resolveEntityOnTypeNames(plannerID, fieldRef int, fieldName ast.ByteSlice) (onTypeNames [][]byte) { + // If this is an entity root field, return the enclosing type name + if v.caching.isEntityRootField(plannerID, fieldRef) { + enclosingTypeName := v.Walker.EnclosingTypeDefinition.NameBytes(v.Definition) + if enclosingTypeName != nil { + return [][]byte{enclosingTypeName} + } + } + + // Otherwise, use the regular resolution logic + onTypeNames = v.resolveOnTypeNames(fieldRef, fieldName) + return onTypeNames +} + +func (v *Visitor) shouldPlannerHandleField(plannerID int, fieldRef int) bool { + if v.planners == nil || plannerID >= len(v.planners) { + return false + } + + // Use the same logic as AllowVisitor to check if a planner handles a field + path := v.Walker.Path.DotDelimitedString() + if v.Walker.CurrentKind == ast.NodeKindField { + path = path + "." + v.Operation.FieldAliasOrNameString(fieldRef) + } + + config := v.planners[plannerID] + if !config.HasPath(path) { + return false + } + + enclosingTypeName := v.Walker.EnclosingTypeDefinition.NameString(v.Definition) + + allow := config.HasPathWithFieldRef(fieldRef) || config.HasParent(path) + if !allow { + return false + } + + shouldWalkFieldsOnPath := config.ShouldWalkFieldsOnPath(path, enclosingTypeName) || + config.ShouldWalkFieldsOnPath(path, "") + + return shouldWalkFieldsOnPath +} + func (v *Visitor) resolveInputTemplates(config *objectFetchConfiguration, input *string, variables *resolve.Variables) { *input = templateRegex.ReplaceAllStringFunc(*input, func(s string) string { selectors := selectorRegex.FindStringSubmatch(s) @@ -1306,6 +1419,8 @@ func (v *Visitor) configureSubscription(config *objectFetchConfiguration) { v.subscription.Trigger.SourceName = config.sourceName v.subscription.Trigger.SourceID = config.sourceID v.subscription.Filter = config.filter + + v.caching.configureSubscriptionEntityCachePopulation(config) } func (v *Visitor) configureObjectFetch(config *objectFetchConfiguration) { @@ -1332,6 +1447,9 @@ func (v *Visitor) configureFetch(internal *objectFetchConfiguration, external re dataSourceType := reflect.TypeOf(external.DataSource).String() dataSourceType = strings.TrimPrefix(dataSourceType, "*") + // Configure caching based on FederationMetaData (opt-in per entity) + external.Caching = v.caching.configureFetchCaching(internal, external) + singleFetch := &resolve.SingleFetch{ FetchConfiguration: external, FetchDependencies: resolve.FetchDependencies{ @@ -1351,7 +1469,13 @@ func (v *Visitor) configureFetch(internal *objectFetchConfiguration, external re OperationType: internal.operationType, QueryPlan: external.QueryPlan, } - + if !v.Config.DisableFetchProvidesData { + // Set ProvidesData from the planner's object structure + if providesData, ok := v.caching.plannerObjects[internal.fetchID]; ok { + resolve.ComputeHasAliases(providesData) + singleFetch.Info.ProvidesData = providesData + } + } if v.Config.DisableIncludeFieldDependencies { return singleFetch } diff --git a/v2/pkg/engine/plan/visitor_path_normalization_test.go b/v2/pkg/engine/plan/visitor_path_normalization_test.go new file mode 100644 index 0000000000..2f156a1b40 --- /dev/null +++ b/v2/pkg/engine/plan/visitor_path_normalization_test.go @@ -0,0 +1,102 @@ +package plan + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +// TestNormalizePathRemovingFragments locks the invariant that the regex used by +// isEntityBoundaryField / isEntityRootField strips inline-fragment type markers +// from walker paths so that boundary comparisons are shape-independent. +// +// Regression guard: isEntityRootField previously compared a non-normalized +// current path against a normalized boundary path, so a query that wraps the +// boundary in `... on User { ... }` caused the prefix check to silently fail. +func TestNormalizePathRemovingFragments(t *testing.T) { + v := &Visitor{} + v.caching = newCachingPlannerState(v) + + cases := []struct { + name string + in string + want string + }{ + {"no fragment", "query.meInterface.reviews", "query.meInterface.reviews"}, + {"single inline fragment", "query.meInterface.$0User.reviews", "query.meInterface.reviews"}, + {"nested inline fragments", "query.meUnion.$0User.profile.$1Admin.role", "query.meUnion.profile.role"}, + {"trailing inline fragment", "query.meUnion.$0User", "query.meUnion"}, + {"fragment marker with digit", "query.root.$10Foo.child", "query.root.child"}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + got := v.caching.normalizePathRemovingFragments(tc.in) + assert.Equal(t, tc.want, got) + }) + } +} + +// TestIsEntityRootPath is the focused A42 regression. Boundary paths stored by +// isEntityBoundaryField are already normalized (inline-fragment markers +// stripped). If the walker-side path check doesn't re-normalize before the +// prefix comparison, queries that wrap the boundary in an inline fragment +// silently fail entity-root detection — at runtime that shows up as missing +// entity L1/L2 population for subgraphs that return their entity boundary +// behind a fragment like `... on User { reviews }`. +// +// Before the fix this test's "fragment wraps the boundary directly" case +// returned false; after the fix it returns true. +func TestIsEntityRootPath(t *testing.T) { + v := &Visitor{} + v.caching = newCachingPlannerState(v) + + cases := []struct { + name string + boundaryPath string + fullPath string + want bool + }{ + { + name: "no fragment — direct child", + boundaryPath: "query.meInterface.reviews", + fullPath: "query.meInterface.reviews.body", + want: true, + }, + { + name: "fragment inside the path — direct child after normalization", + boundaryPath: "query.meInterface.reviews", + fullPath: "query.meInterface.$0User.reviews.body", + want: true, + }, + { + name: "fragment after the boundary — direct child after normalization", + boundaryPath: "query.meInterface.reviews", + fullPath: "query.meInterface.reviews.$0Review.body", + want: true, + }, + { + name: "deeper descendant is not a direct child", + boundaryPath: "query.meInterface.reviews", + fullPath: "query.meInterface.reviews.author.name", + want: false, + }, + { + name: "deeper descendant through fragment — still not a direct child", + boundaryPath: "query.meInterface.reviews", + fullPath: "query.meInterface.$0User.reviews.author.name", + want: false, + }, + { + name: "unrelated path", + boundaryPath: "query.meInterface.reviews", + fullPath: "query.products.price", + want: false, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + got := v.caching.isEntityRootPath(tc.boundaryPath, tc.fullPath) + assert.Equal(t, tc.want, got) + }) + } +} diff --git a/v2/pkg/engine/plan/visitor_subscription_entity_population_test.go b/v2/pkg/engine/plan/visitor_subscription_entity_population_test.go new file mode 100644 index 0000000000..f1befbc1bd --- /dev/null +++ b/v2/pkg/engine/plan/visitor_subscription_entity_population_test.go @@ -0,0 +1,76 @@ +package plan + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" +) + +func TestVisitorEntityKeyFieldNames(t *testing.T) { + t.Run("extracts only top level key fields", func(t *testing.T) { + keys := []FederationFieldConfiguration{ + { + TypeName: "User", + SelectionSet: "id info {a b}", + }, + { + TypeName: "User", + SelectionSet: "profile {displayName}", + }, + } + + for i := range keys { + err := keys[i].parseSelectionSet() + require.NoError(t, err) + } + + fieldNames := newCachingPlannerState(&Visitor{}).entityKeyFieldNames(keys) + + assert.Equal(t, map[string]struct{}{ + "id": {}, + "info": {}, + "profile": {}, + }, fieldNames) + }) + + t.Run("skips invalid and empty parsed keys", func(t *testing.T) { + unnamedFieldDoc := ast.NewDocument() + selectionSetRef := unnamedFieldDoc.AddSelectionSet().Ref + fieldRef := unnamedFieldDoc.AddField(ast.Field{}).Ref + unnamedFieldDoc.AddSelection(selectionSetRef, ast.Selection{ + Kind: ast.SelectionKindField, + Ref: fieldRef, + }) + unnamedFieldDoc.FragmentDefinitions = append(unnamedFieldDoc.FragmentDefinitions, ast.FragmentDefinition{ + SelectionSet: selectionSetRef, + }) + + fieldNames := newCachingPlannerState(&Visitor{}).entityKeyFieldNames([]FederationFieldConfiguration{ + { + TypeName: "User", + SelectionSet: "{", + }, + { + TypeName: "User", + SelectionSet: "id", + parsedSelectionSet: &ast.Document{}, + }, + { + TypeName: "User", + SelectionSet: "id", + parsedSelectionSet: unnamedFieldDoc, + }, + { + TypeName: "User", + SelectionSet: "name", + }, + }) + + assert.Equal(t, map[string]struct{}{ + "name": {}, + }, fieldNames) + }) +} diff --git a/v2/pkg/engine/postprocess/add_missing_nested_dependencies_test.go b/v2/pkg/engine/postprocess/add_missing_nested_dependencies_test.go deleted file mode 100644 index e24eaef73c..0000000000 --- a/v2/pkg/engine/postprocess/add_missing_nested_dependencies_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package postprocess - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" -) - -func TestAddMissingNestedDependencies_ProcessFetchTree(t *testing.T) { - t.Run("add missing dependencies to nested fetches on same merge path", func(t *testing.T) { - input := resolve.Sequence( - resolve.Single(&resolve.SingleFetch{ - FetchConfiguration: resolve.FetchConfiguration{ - Input: "a", - PostProcessing: resolve.PostProcessingConfiguration{ - MergePath: []string{"a"}, - }, - }, - FetchDependencies: resolve.FetchDependencies{ - FetchID: 0, - }, - }), - resolve.Single(&resolve.SingleFetch{ - FetchConfiguration: resolve.FetchConfiguration{ - Input: "b", - PostProcessing: resolve.PostProcessingConfiguration{ - MergePath: []string{"b"}, - }, - }, - FetchDependencies: resolve.FetchDependencies{ - FetchID: 1, - }, - }), - resolve.SingleWithPath(&resolve.SingleFetch{ - FetchConfiguration: resolve.FetchConfiguration{ - Input: "c", - }, - FetchDependencies: resolve.FetchDependencies{ - FetchID: 2, - }, - }, "a", resolve.ObjectPath("a")), - resolve.SingleWithPath(&resolve.SingleFetch{ - FetchConfiguration: resolve.FetchConfiguration{ - Input: "d", - }, - FetchDependencies: resolve.FetchDependencies{ - FetchID: 3, - }, - }, "b.c", resolve.ObjectPath("b"), resolve.ObjectPath("c")), - resolve.SingleWithPath(&resolve.SingleFetch{ - FetchConfiguration: resolve.FetchConfiguration{ - Input: "x", - }, - FetchDependencies: resolve.FetchDependencies{ - FetchID: 4, - DependsOnFetchIDs: []int{0}, - }, - }, "a", resolve.ObjectPath("a")), - resolve.Single(&resolve.SingleFetch{ - FetchConfiguration: resolve.FetchConfiguration{ - Input: "y", - PostProcessing: resolve.PostProcessingConfiguration{ - MergePath: []string{"y"}, - }, - }, - FetchDependencies: resolve.FetchDependencies{ - FetchID: 5, - }, - }), - ) - - expected := resolve.Sequence( - resolve.Single(&resolve.SingleFetch{ - FetchConfiguration: resolve.FetchConfiguration{ - Input: "a", - PostProcessing: resolve.PostProcessingConfiguration{ - MergePath: []string{"a"}, - }, - }, - FetchDependencies: resolve.FetchDependencies{ - FetchID: 0, - }, - }), - resolve.Single(&resolve.SingleFetch{ - FetchConfiguration: resolve.FetchConfiguration{ - Input: "b", - PostProcessing: resolve.PostProcessingConfiguration{ - MergePath: []string{"b"}, - }, - }, - FetchDependencies: resolve.FetchDependencies{ - FetchID: 1, - }, - }), - resolve.SingleWithPath(&resolve.SingleFetch{ - FetchConfiguration: resolve.FetchConfiguration{ - Input: "c", - }, - FetchDependencies: resolve.FetchDependencies{ - FetchID: 2, - DependsOnFetchIDs: []int{0}, - }, - }, "a", resolve.ObjectPath("a")), - resolve.SingleWithPath(&resolve.SingleFetch{ - FetchConfiguration: resolve.FetchConfiguration{ - Input: "d", - }, - FetchDependencies: resolve.FetchDependencies{ - FetchID: 3, - DependsOnFetchIDs: []int{1}, - }, - }, "b.c", resolve.ObjectPath("b"), resolve.ObjectPath("c")), - resolve.SingleWithPath(&resolve.SingleFetch{ - FetchConfiguration: resolve.FetchConfiguration{ - Input: "x", - }, - FetchDependencies: resolve.FetchDependencies{ - FetchID: 4, - DependsOnFetchIDs: []int{0}, - }, - }, "a", resolve.ObjectPath("a")), - resolve.Single(&resolve.SingleFetch{ - FetchConfiguration: resolve.FetchConfiguration{ - Input: "y", - PostProcessing: resolve.PostProcessingConfiguration{ - MergePath: []string{"y"}, - }, - }, - FetchDependencies: resolve.FetchDependencies{ - FetchID: 5, - }, - }), - ) - - processor := &addMissingNestedDependencies{} - processor.ProcessFetchTree(input) - require.Equal(t, expected, input) - }) -} diff --git a/v2/pkg/engine/postprocess/create_concrete_single_fetch_types.go b/v2/pkg/engine/postprocess/create_concrete_single_fetch_types.go index 44b3225fbe..1fdccad92f 100644 --- a/v2/pkg/engine/postprocess/create_concrete_single_fetch_types.go +++ b/v2/pkg/engine/postprocess/create_concrete_single_fetch_types.go @@ -98,6 +98,7 @@ func (d *createConcreteSingleFetchTypes) createEntityBatchFetch(fetch *resolve.S }, DataSource: fetch.DataSource, PostProcessing: fetch.PostProcessing, + Caching: fetch.Caching, } } @@ -131,5 +132,6 @@ func (d *createConcreteSingleFetchTypes) createEntityFetch(fetch *resolve.Single }, DataSource: fetch.DataSource, PostProcessing: fetch.PostProcessing, + Caching: fetch.Caching, } } diff --git a/v2/pkg/engine/postprocess/optimize_l1_cache.go b/v2/pkg/engine/postprocess/optimize_l1_cache.go new file mode 100644 index 0000000000..8a229cbafd --- /dev/null +++ b/v2/pkg/engine/postprocess/optimize_l1_cache.go @@ -0,0 +1,572 @@ +package postprocess + +import ( + "bytes" + "slices" + "strings" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// optimizeL1Cache is a postprocessor that optimizes L1 cache usage by only enabling it +// for fetches that can actually benefit from cache hits. This saves memory and CPU +// by skipping cache key generation, lookup, and population when L1 cannot help. +// +// L1 cache is effective when: +// 1. A prior fetch (parent query) returns the same entity type (current fetch can READ) +// 2. A later fetch needs the same entity type with a subset of fields (current fetch can WRITE) +// +// A fetch never reads AND writes to L1 in the same execution: +// - Cache hit (READ): Fetch reads from L1, skips subgraph fetch, does NOT write +// - Cache miss (WRITE): Fetch cannot read, makes subgraph call, then writes to L1 +type optimizeL1Cache struct { + disable bool +} + +// entityFetchInfo stores information about an entity fetch needed for L1 optimization +type entityFetchInfo struct { + fetchID int + entityType string // From FetchInfo.RootFields[0].TypeName + providesData *resolve.Object // From FetchInfo.ProvidesData - the full field tree + dependsOn []int // From FetchDependencies.DependsOnFetchIDs + fetch resolve.Fetch // Reference to the actual fetch for modification +} + +// rootFieldProviderInfo stores information about a root field fetch that can provide L1 cache data +type rootFieldProviderInfo struct { + fetchID int + entityTypes []string // Entity types this root field can populate L1 for + providesData *resolve.Object // From FetchInfo.ProvidesData - the full response tree + fetch resolve.Fetch // Reference to the actual fetch for modification +} + +func (o *optimizeL1Cache) ProcessFetchTree(root *resolve.FetchTreeNode) { + if o.disable || root == nil { + return + } + + // Phase 1: Collect entity fetch information from entire tree + entityFetches := o.collectEntityFetches(root) + + // Also collect root field providers (root fields with RootFieldL1EntityCacheKeyTemplates) + rootFieldProviderInfos := o.collectRootFieldProviders(root) + + // No fetches to optimize + if len(entityFetches) == 0 && len(rootFieldProviderInfos) == 0 { + return + } + + // Phase 2: Determine L1 usefulness for each entity fetch + for _, ef := range entityFetches { + canRead := o.hasValidProvider(ef, entityFetches, rootFieldProviderInfos) + canWrite := o.hasValidConsumer(ef, entityFetches, rootFieldProviderInfos) + useL1Cache := canRead || canWrite + o.setUseL1Cache(ef.fetch, useL1Cache) + } + + // Phase 4: Determine L1 usefulness for each root field provider + // Root fields only write to L1, so they need valid consumers to be useful + for _, rfp := range rootFieldProviderInfos { + canWrite := o.rootFieldHasValidConsumer(rfp, entityFetches) + o.setUseL1Cache(rfp.fetch, canWrite) + } +} + +// collectEntityFetches traverses the fetch tree and collects information about entity fetches +func (o *optimizeL1Cache) collectEntityFetches(node *resolve.FetchTreeNode) []*entityFetchInfo { + if node == nil { + return nil + } + + var result []*entityFetchInfo + + switch node.Kind { + case resolve.FetchTreeNodeKindSingle: + if ef := o.extractEntityFetchInfo(node.Item.Fetch); ef != nil { + result = append(result, ef) + } + case resolve.FetchTreeNodeKindParallel, resolve.FetchTreeNodeKindSequence: + for _, child := range node.ChildNodes { + result = append(result, o.collectEntityFetches(child)...) + } + } + + return result +} + +// extractEntityFetchInfo extracts entity fetch information from a fetch if applicable +func (o *optimizeL1Cache) extractEntityFetchInfo(fetch resolve.Fetch) *entityFetchInfo { + if fetch == nil { + return nil + } + + info := fetch.FetchInfo() + if info == nil { + return nil + } + + deps := fetch.Dependencies() + if deps == nil { + return nil + } + + // Check if this is an entity fetch (has root fields with TypeName) + if len(info.RootFields) == 0 { + return nil + } + + // Only entity fetches (EntityFetch, BatchEntityFetch, or SingleFetch with RequiresEntityFetch) + // have meaningful L1 cache potential + isEntityFetch := false + switch f := fetch.(type) { + case *resolve.EntityFetch: + isEntityFetch = true + case *resolve.BatchEntityFetch: + isEntityFetch = true + case *resolve.SingleFetch: + isEntityFetch = f.RequiresEntityFetch || f.RequiresEntityBatchFetch + } + + if !isEntityFetch { + return nil + } + + entityType := info.RootFields[0].TypeName + if entityType == "" { + return nil + } + + return &entityFetchInfo{ + fetchID: deps.FetchID, + entityType: entityType, + providesData: info.ProvidesData, + dependsOn: deps.DependsOnFetchIDs, + fetch: fetch, + } +} + +// collectRootFieldProviders finds root fields that populate L1 cache with entity data +func (o *optimizeL1Cache) collectRootFieldProviders(node *resolve.FetchTreeNode) []*rootFieldProviderInfo { + var providers []*rootFieldProviderInfo + o.collectRootFieldProvidersRecursive(node, &providers) + return providers +} + +func (o *optimizeL1Cache) collectRootFieldProvidersRecursive(node *resolve.FetchTreeNode, providers *[]*rootFieldProviderInfo) { + if node == nil { + return + } + + switch node.Kind { + case resolve.FetchTreeNodeKindSingle: + if node.Item != nil && node.Item.Fetch != nil { + if sf, ok := node.Item.Fetch.(*resolve.SingleFetch); ok { + if len(sf.Caching.RootFieldL1EntityCacheKeyTemplates) > 0 { + deps := sf.Dependencies() + var entityTypes []string + for compositeKey := range sf.Caching.RootFieldL1EntityCacheKeyTemplates { + // Keys are "rootField:EntityType" — extract the entity type after the colon + _, entityType, ok := strings.Cut(compositeKey, ":") + if !ok { + entityType = compositeKey + } + entityTypes = append(entityTypes, entityType) + } + // Get providesData from FetchInfo + var providesData *resolve.Object + if sf.Info != nil { + providesData = sf.Info.ProvidesData + } + *providers = append(*providers, &rootFieldProviderInfo{ + fetchID: deps.FetchID, + entityTypes: entityTypes, + providesData: providesData, + fetch: sf, + }) + } + } + } + case resolve.FetchTreeNodeKindParallel, resolve.FetchTreeNodeKindSequence: + for _, child := range node.ChildNodes { + o.collectRootFieldProvidersRecursive(child, providers) + } + } +} + +// rootFieldHasValidConsumer checks if there's a later entity fetch that can benefit +// from this root field's L1 data, either individually or as part of a union. +func (o *optimizeL1Cache) rootFieldHasValidConsumer(provider *rootFieldProviderInfo, allEntityFetches []*entityFetchInfo) bool { + for _, consumer := range allEntityFetches { + for _, entityType := range provider.entityTypes { + if consumer.entityType != entityType { + continue + } + if provider.fetchID >= consumer.fetchID && !slices.Contains(consumer.dependsOn, provider.fetchID) { + continue + } + + // Fast path: this root field alone covers consumer + if provider.providesData == nil || o.treeContainsAllFields(provider.providesData, consumer.providesData) { + return true + } + + // Slow path: check if union of all providers (including this root field) covers consumer + rootFieldProviders := []*rootFieldProviderInfo{provider} + union := o.collectAncestorUnion(consumer, allEntityFetches, rootFieldProviders) + if union != nil && objectProvidesAllFields(union, consumer.providesData) { + return true + } + } + } + return false +} + +// hasValidProvider checks if there's a prior fetch (or union of prior fetches) +// that can provide all fields this fetch needs. +// +// Fast path: check if any single provider covers the consumer (cheap). +// Slow path: compute the union of all ancestor providers' fields and check. +func (o *optimizeL1Cache) hasValidProvider(consumer *entityFetchInfo, allFetches []*entityFetchInfo, rootFieldProviders []*rootFieldProviderInfo) bool { + // Fast path: check individual providers + for _, provider := range rootFieldProviders { + for _, entityType := range provider.entityTypes { + if entityType == consumer.entityType { + if provider.fetchID < consumer.fetchID || o.isInDependencyChain(consumer, provider.fetchID, allFetches) { + if provider.providesData == nil || o.treeContainsAllFields(provider.providesData, consumer.providesData) { + return true + } + } + } + } + } + + for _, provider := range allFetches { + if provider.fetchID == consumer.fetchID { + continue + } + if provider.entityType != consumer.entityType { + continue + } + if !o.executesBefore(provider, consumer, allFetches) { + continue + } + if objectProvidesAllFields(provider.providesData, consumer.providesData) { + return true + } + } + + // Slow path: compute union of all ancestor providers and check + union := o.collectAncestorUnion(consumer, allFetches, rootFieldProviders) + if union != nil && objectProvidesAllFields(union, consumer.providesData) { + return true + } + + return false +} + +// hasValidConsumer checks if there's a later fetch that can benefit from this fetch's L1 data. +// A fetch is a valid writer if: +// 1. It individually covers a later consumer's fields, OR +// 2. It contributes to a union of providers that covers a later consumer's fields. +func (o *optimizeL1Cache) hasValidConsumer(provider *entityFetchInfo, allFetches []*entityFetchInfo, rootFieldProviders []*rootFieldProviderInfo) bool { + for _, consumer := range allFetches { + if consumer.fetchID == provider.fetchID { + continue + } + if consumer.entityType != provider.entityType { + continue + } + if !o.executesBefore(provider, consumer, allFetches) { + continue + } + + // Fast path: this provider alone covers consumer + if objectProvidesAllFields(provider.providesData, consumer.providesData) { + return true + } + + // Slow path: check if the union of all providers before consumer + // (including this provider and root field providers) covers consumer. + union := o.collectAncestorUnion(consumer, allFetches, rootFieldProviders) + if union != nil && objectProvidesAllFields(union, consumer.providesData) { + return true + } + } + + return false +} + +// executesBefore returns true if a executes before b based on dependencies +func (o *optimizeL1Cache) executesBefore(a, b *entityFetchInfo, allFetches []*entityFetchInfo) bool { + // Direct dependency check: b depends on a + if slices.Contains(b.dependsOn, a.fetchID) { + return true + } + + // Transitive dependency check: b depends on something that depends on a + return o.isInDependencyChain(b, a.fetchID, allFetches) +} + +// isInDependencyChain checks if targetID is anywhere in the dependency chain of ef +func (o *optimizeL1Cache) isInDependencyChain(ef *entityFetchInfo, targetID int, allFetches []*entityFetchInfo) bool { + visited := make(map[int]bool) + return o.isInDependencyChainRecursive(ef.dependsOn, targetID, allFetches, visited) +} + +func (o *optimizeL1Cache) isInDependencyChainRecursive(dependsOn []int, targetID int, allFetches []*entityFetchInfo, visited map[int]bool) bool { + for _, depID := range dependsOn { + if depID == targetID { + return true + } + if visited[depID] { + continue + } + visited[depID] = true + + // Find the fetch with this ID and check its dependencies + for _, fetch := range allFetches { + if fetch.fetchID == depID { + if o.isInDependencyChainRecursive(fetch.dependsOn, targetID, allFetches, visited) { + return true + } + break + } + } + } + return false +} + +// setUseL1Cache sets the UseL1Cache flag on the appropriate caching configuration +func (o *optimizeL1Cache) setUseL1Cache(fetch resolve.Fetch, value bool) { + switch f := fetch.(type) { + case *resolve.SingleFetch: + f.Caching.UseL1Cache = value + case *resolve.EntityFetch: + f.Caching.UseL1Cache = value + case *resolve.BatchEntityFetch: + f.Caching.UseL1Cache = value + } +} + +// objectProvidesAllFields recursively checks if provider object has all fields that consumer needs. +// This validates the entire field tree, not just top-level fields. +func objectProvidesAllFields(provider, consumer *resolve.Object) bool { + if consumer == nil { + return true // Consumer needs nothing + } + if provider == nil { + return len(consumer.Fields) == 0 // Provider has nothing, consumer must need nothing + } + + // Check each consumer field exists in provider + for _, consumerField := range consumer.Fields { + providerField := findFieldByName(provider.Fields, consumerField.Name) + if providerField == nil { + return false // Consumer needs field that provider doesn't have + } + + // Recursively check nested fields + if !nodeProvidesAllFields(providerField.Value, consumerField.Value) { + return false + } + } + + return true +} + +// findFieldByName finds a field by name in a slice of fields +func findFieldByName(fields []*resolve.Field, name []byte) *resolve.Field { + for _, field := range fields { + if bytes.Equal(field.Name, name) { + return field + } + } + return nil +} + +// nodeProvidesAllFields recursively checks if provider node has all fields that consumer node needs. +// Handles Object, Array, and scalar types. +func nodeProvidesAllFields(provider, consumer resolve.Node) bool { + if consumer == nil { + return true + } + if provider == nil { + return false + } + + switch consumerNode := consumer.(type) { + case *resolve.Object: + providerObj, ok := provider.(*resolve.Object) + if !ok { + return false // Type mismatch + } + return objectProvidesAllFields(providerObj, consumerNode) + + case *resolve.Array: + providerArr, ok := provider.(*resolve.Array) + if !ok { + return false // Type mismatch + } + // Check the array item type + return nodeProvidesAllFields(providerArr.Item, consumerNode.Item) + + default: + // Scalar types (String, Int, Float, Boolean, etc.) - if provider has the field, it's sufficient + return true + } +} + +// treeContainsAllFields searches the provider tree for any object that provides all fields the target needs. +// This is used for root field providers where entities may be nested anywhere in the response tree. +func (o *optimizeL1Cache) treeContainsAllFields(tree *resolve.Object, target *resolve.Object) bool { + if target == nil || len(target.Fields) == 0 { + return true // Consumer needs nothing + } + if tree == nil { + return false // Provider has nothing + } + + // Check if this object provides all fields + if objectProvidesAllFields(tree, target) { + return true + } + + // Recursively check nested objects in the tree + for _, field := range tree.Fields { + if o.nodeContainsAllFields(field.Value, target) { + return true + } + } + return false +} + +// nodeContainsAllFields recursively searches a node for an object that provides all target fields. +func (o *optimizeL1Cache) nodeContainsAllFields(node resolve.Node, target *resolve.Object) bool { + if node == nil { + return false + } + + switch n := node.(type) { + case *resolve.Object: + return o.treeContainsAllFields(n, target) + case *resolve.Array: + return o.nodeContainsAllFields(n.Item, target) + } + return false +} + +// unionObjects merges the fields of two Objects into a new Object containing +// all fields from both. For fields present in both, nested Objects are merged +// recursively; other types take the first value. +func unionObjects(a, b *resolve.Object) *resolve.Object { + if a == nil { + return b + } + if b == nil { + return a + } + + // Start with a copy of a's fields + merged := make([]*resolve.Field, 0, len(a.Fields)+len(b.Fields)) + merged = append(merged, a.Fields...) + + // Add fields from b that aren't in a (or merge nested objects) + for _, bf := range b.Fields { + existing := findFieldByName(merged, bf.Name) + if existing == nil { + merged = append(merged, bf) + } else { + // Field exists in both — merge nested objects recursively + existingObj, existingIsObj := existing.Value.(*resolve.Object) + bObj, bIsObj := bf.Value.(*resolve.Object) + if existingIsObj && bIsObj { + existing.Value = unionObjects(existingObj, bObj) + } + // For non-object values (scalars, arrays), keep existing + } + } + + return &resolve.Object{Fields: merged} +} + +// collectAncestorUnion computes the union of ProvidesData fields from all +// ancestor providers of the same entity type that execute before the consumer. +// Includes both entity fetches and root field providers. +func (o *optimizeL1Cache) collectAncestorUnion( + consumer *entityFetchInfo, + allFetches []*entityFetchInfo, + rootFieldProviders []*rootFieldProviderInfo, +) *resolve.Object { + var union *resolve.Object + + // Collect from root field providers + for _, provider := range rootFieldProviders { + for _, entityType := range provider.entityTypes { + if entityType != consumer.entityType { + continue + } + if provider.fetchID < consumer.fetchID || o.isInDependencyChain(consumer, provider.fetchID, allFetches) { + if provider.providesData != nil { + // For root fields, find the nested entity object in the tree + entityObj := o.findEntityObjectInTree(provider.providesData, consumer.providesData) + if entityObj != nil { + union = unionObjects(union, entityObj) + } + } + } + } + } + + // Collect from entity fetches + for _, provider := range allFetches { + if provider.fetchID == consumer.fetchID { + continue + } + if provider.entityType != consumer.entityType { + continue + } + if !o.executesBefore(provider, consumer, allFetches) { + continue + } + if provider.providesData != nil { + union = unionObjects(union, provider.providesData) + } + } + + return union +} + +// findEntityObjectInTree searches a root field's ProvidesData tree for an +// Object that could provide entity fields. Returns the first Object whose +// fields overlap with the target entity's fields. +func (o *optimizeL1Cache) findEntityObjectInTree(tree, target *resolve.Object) *resolve.Object { + if tree == nil || target == nil { + return nil + } + // Check if this object has any of the target fields + if objectProvidesAllFields(tree, target) { + return tree + } + // Check if this object has at least one target field (partial match for union) + for _, tf := range target.Fields { + if findFieldByName(tree.Fields, tf.Name) != nil { + return tree + } + } + // Search nested objects + for _, field := range tree.Fields { + switch n := field.Value.(type) { + case *resolve.Object: + if found := o.findEntityObjectInTree(n, target); found != nil { + return found + } + case *resolve.Array: + if item, ok := n.Item.(*resolve.Object); ok { + if found := o.findEntityObjectInTree(item, target); found != nil { + return found + } + } + } + } + return nil +} diff --git a/v2/pkg/engine/postprocess/optimize_l1_cache_test.go b/v2/pkg/engine/postprocess/optimize_l1_cache_test.go new file mode 100644 index 0000000000..b41ce56d16 --- /dev/null +++ b/v2/pkg/engine/postprocess/optimize_l1_cache_test.go @@ -0,0 +1,1118 @@ +package postprocess + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/resolve" +) + +// makeObject creates a resolve.Object with the given field names (all as scalars) +func makeObject(fieldNames ...string) *resolve.Object { + fields := make([]*resolve.Field, len(fieldNames)) + for i, name := range fieldNames { + fields[i] = &resolve.Field{Name: []byte(name), Value: &resolve.String{}} + } + return &resolve.Object{Fields: fields} +} + +// Helper function to create a simple entity fetch with given fields +func makeEntityFetch(fetchID int, entityType string, fieldNames []string, dependsOnIDs []int) *resolve.EntityFetch { + fields := make([]*resolve.Field, len(fieldNames)) + for i, name := range fieldNames { + fields[i] = &resolve.Field{Name: []byte(name)} + } + return &resolve.EntityFetch{ + FetchDependencies: resolve.FetchDependencies{ + FetchID: fetchID, + DependsOnFetchIDs: dependsOnIDs, + }, + Info: &resolve.FetchInfo{ + RootFields: []resolve.GraphCoordinate{ + {TypeName: entityType, FieldName: "field"}, + }, + ProvidesData: &resolve.Object{ + Fields: fields, + }, + }, + Caching: resolve.FetchCacheConfiguration{ + UseL1Cache: true, // Default value + }, + } +} + +// Helper function to create a batch entity fetch with given fields +func makeBatchEntityFetch(fetchID int, entityType string, fieldNames []string, dependsOnIDs []int) *resolve.BatchEntityFetch { + fields := make([]*resolve.Field, len(fieldNames)) + for i, name := range fieldNames { + fields[i] = &resolve.Field{Name: []byte(name)} + } + return &resolve.BatchEntityFetch{ + FetchDependencies: resolve.FetchDependencies{ + FetchID: fetchID, + DependsOnFetchIDs: dependsOnIDs, + }, + Info: &resolve.FetchInfo{ + RootFields: []resolve.GraphCoordinate{ + {TypeName: entityType, FieldName: "field"}, + }, + ProvidesData: &resolve.Object{ + Fields: fields, + }, + }, + Caching: resolve.FetchCacheConfiguration{ + UseL1Cache: true, // Default value + }, + } +} + +// Helper function to create a root field fetch with L1 entity cache templates +// providesData describes the full response tree of the root field +func makeRootFetchWithL1Templates(fetchID int, dependsOnIDs []int, entityTypes []string, providesData *resolve.Object) *resolve.SingleFetch { + templates := make(map[string]resolve.CacheKeyTemplate) + for _, et := range entityTypes { + templates["users:"+et] = &resolve.EntityQueryCacheKeyTemplate{} + } + return &resolve.SingleFetch{ + FetchDependencies: resolve.FetchDependencies{ + FetchID: fetchID, + DependsOnFetchIDs: dependsOnIDs, + }, + Info: &resolve.FetchInfo{ + RootFields: []resolve.GraphCoordinate{ + {TypeName: "Query", FieldName: "users"}, + }, + ProvidesData: providesData, + }, + FetchConfiguration: resolve.FetchConfiguration{ + RequiresEntityFetch: false, + RequiresEntityBatchFetch: false, + Caching: resolve.FetchCacheConfiguration{ + RootFieldL1EntityCacheKeyTemplates: templates, + }, + }, + } +} + +func getUseL1Cache(fetch resolve.Fetch) bool { + switch f := fetch.(type) { + case *resolve.SingleFetch: + return f.Caching.UseL1Cache + case *resolve.EntityFetch: + return f.Caching.UseL1Cache + case *resolve.BatchEntityFetch: + return f.Caching.UseL1Cache + } + return false +} + +func TestOptimizeL1Cache_SingleEntityFetch_NoProvider_NoConsumer(t *testing.T) { + // Single entity fetch with no prior fetches and no subsequent fetches + // Should have UseL1Cache = false (cannot benefit from L1) + processor := &optimizeL1Cache{} + + entityFetch := makeEntityFetch(1, "User", []string{"id", "name"}, nil) + input := resolve.Sequence( + resolve.Single(entityFetch), + ) + + processor.ProcessFetchTree(input) + + assert.Equal(t, false, getUseL1Cache(entityFetch), "single entity fetch with no provider/consumer should have UseL1Cache=false") +} + +func TestOptimizeL1Cache_TwoEntityFetches_SameType_SameFields(t *testing.T) { + // Two entity fetches with same type and same fields + // First can write for second (as provider), second can read from first (as consumer) + // Both should have UseL1Cache = true + processor := &optimizeL1Cache{} + + fetch1 := makeEntityFetch(1, "User", []string{"id", "name"}, nil) + fetch2 := makeEntityFetch(2, "User", []string{"id", "name"}, []int{1}) + + input := resolve.Sequence( + resolve.Single(fetch1), + resolve.Single(fetch2), + ) + + processor.ProcessFetchTree(input) + + assert.Equal(t, true, getUseL1Cache(fetch1), "first fetch should have UseL1Cache=true (can write for second)") + assert.Equal(t, true, getUseL1Cache(fetch2), "second fetch should have UseL1Cache=true (can read from first)") +} + +func TestOptimizeL1Cache_TwoEntityFetches_DifferentTypes(t *testing.T) { + // Two entity fetches with different types + // Neither can help the other + processor := &optimizeL1Cache{} + + fetch1 := makeEntityFetch(1, "User", []string{"id", "name"}, nil) + fetch2 := makeEntityFetch(2, "Product", []string{"id", "title"}, []int{1}) + + input := resolve.Sequence( + resolve.Single(fetch1), + resolve.Single(fetch2), + ) + + processor.ProcessFetchTree(input) + + assert.Equal(t, false, getUseL1Cache(fetch1), "first fetch should have UseL1Cache=false (different type from second)") + assert.Equal(t, false, getUseL1Cache(fetch2), "second fetch should have UseL1Cache=false (different type from first)") +} + +func TestOptimizeL1Cache_ProviderHasSuperset(t *testing.T) { + // First fetch provides superset of fields, second needs subset + // First can write for second, second can read from first + processor := &optimizeL1Cache{} + + fetch1 := makeEntityFetch(1, "User", []string{"id", "name", "email"}, nil) + fetch2 := makeEntityFetch(2, "User", []string{"id", "name"}, []int{1}) + + input := resolve.Sequence( + resolve.Single(fetch1), + resolve.Single(fetch2), + ) + + processor.ProcessFetchTree(input) + + assert.Equal(t, true, getUseL1Cache(fetch1), "first fetch should have UseL1Cache=true (superset provider)") + assert.Equal(t, true, getUseL1Cache(fetch2), "second fetch should have UseL1Cache=true (subset consumer)") +} + +func TestOptimizeL1Cache_ProviderHasSubset(t *testing.T) { + // First fetch provides subset of fields, second needs superset + // First cannot write useful data for second + processor := &optimizeL1Cache{} + + fetch1 := makeEntityFetch(1, "User", []string{"id"}, nil) + fetch2 := makeEntityFetch(2, "User", []string{"id", "name"}, []int{1}) + + input := resolve.Sequence( + resolve.Single(fetch1), + resolve.Single(fetch2), + ) + + processor.ProcessFetchTree(input) + + assert.Equal(t, false, getUseL1Cache(fetch1), "first fetch should have UseL1Cache=false (subset cannot help superset)") + assert.Equal(t, false, getUseL1Cache(fetch2), "second fetch should have UseL1Cache=false (cannot read from first)") +} + +func TestOptimizeL1Cache_ThreeFetchChain_AllSameFields(t *testing.T) { + // Chain A→B→C, all same type, same fields + // All three should be enabled: + // - A: can write for B and C + // - B: can read from A, can write for C + // - C: can read from A or B + processor := &optimizeL1Cache{} + + fetchA := makeEntityFetch(1, "User", []string{"id", "name"}, nil) + fetchB := makeEntityFetch(2, "User", []string{"id", "name"}, []int{1}) + fetchC := makeEntityFetch(3, "User", []string{"id", "name"}, []int{2}) + + input := resolve.Sequence( + resolve.Single(fetchA), + resolve.Single(fetchB), + resolve.Single(fetchC), + ) + + processor.ProcessFetchTree(input) + + assert.Equal(t, true, getUseL1Cache(fetchA), "A should have UseL1Cache=true (can write for B and C)") + assert.Equal(t, true, getUseL1Cache(fetchB), "B should have UseL1Cache=true (can read from A, write for C)") + assert.Equal(t, true, getUseL1Cache(fetchC), "C should have UseL1Cache=true (can read from A or B)") +} + +func TestOptimizeL1Cache_ThreeFetchChain_IncreasingFields(t *testing.T) { + // Chain A→B→C where: + // - A provides {id} + // - B needs {id, name} + // - C needs {id, name} + // + // A alone doesn't cover B or C. But A contributes {id} to the union + // that covers C (union of A+B = {id, name}). With union-based optimization, + // A is enabled as a writer because it participates in the chain. + // B covers C directly. + processor := &optimizeL1Cache{} + + fetchA := makeEntityFetch(1, "User", []string{"id"}, nil) + fetchB := makeEntityFetch(2, "User", []string{"id", "name"}, []int{1}) + fetchC := makeEntityFetch(3, "User", []string{"id", "name"}, []int{2}) + + input := resolve.Sequence( + resolve.Single(fetchA), + resolve.Single(fetchB), + resolve.Single(fetchC), + ) + + processor.ProcessFetchTree(input) + + assert.Equal(t, true, getUseL1Cache(fetchA), "A should have UseL1Cache=true (contributes to union covering C)") + assert.Equal(t, true, getUseL1Cache(fetchB), "B should have UseL1Cache=true (can write for C)") + assert.Equal(t, true, getUseL1Cache(fetchC), "C should have UseL1Cache=true (can read from B or union)") +} + +func TestOptimizeL1Cache_ThreeFetchChain_DecreasingFields(t *testing.T) { + // Chain A→B→C where: + // - A provides {id, name, email} + // - B needs {id, name} + // - C needs {id} + // + // All can help each other + processor := &optimizeL1Cache{} + + fetchA := makeEntityFetch(1, "User", []string{"id", "name", "email"}, nil) + fetchB := makeEntityFetch(2, "User", []string{"id", "name"}, []int{1}) + fetchC := makeEntityFetch(3, "User", []string{"id"}, []int{2}) + + input := resolve.Sequence( + resolve.Single(fetchA), + resolve.Single(fetchB), + resolve.Single(fetchC), + ) + + processor.ProcessFetchTree(input) + + assert.Equal(t, true, getUseL1Cache(fetchA), "A should have UseL1Cache=true (can write for B and C)") + assert.Equal(t, true, getUseL1Cache(fetchB), "B should have UseL1Cache=true (can read from A, write for C)") + assert.Equal(t, true, getUseL1Cache(fetchC), "C should have UseL1Cache=true (can read from A or B)") +} + +func TestOptimizeL1Cache_ParallelFetches_SameType(t *testing.T) { + // Two parallel fetches with same type + // They execute in parallel, so neither can read from the other + // (no dependency relationship) + processor := &optimizeL1Cache{} + + fetch1 := makeEntityFetch(1, "User", []string{"id", "name"}, nil) + fetch2 := makeEntityFetch(2, "User", []string{"id", "name"}, nil) + + input := resolve.Sequence( + resolve.Parallel( + resolve.Single(fetch1), + resolve.Single(fetch2), + ), + ) + + processor.ProcessFetchTree(input) + + // Neither can help the other since they run in parallel (no dependency) + assert.Equal(t, false, getUseL1Cache(fetch1), "first parallel fetch should have UseL1Cache=false") + assert.Equal(t, false, getUseL1Cache(fetch2), "second parallel fetch should have UseL1Cache=false") +} + +func TestOptimizeL1Cache_ParallelThenSequential(t *testing.T) { + // Two parallel fetches followed by a sequential fetch that depends on both + processor := &optimizeL1Cache{} + + fetch1 := makeEntityFetch(1, "User", []string{"id", "name"}, nil) + fetch2 := makeEntityFetch(2, "Product", []string{"id", "title"}, nil) + fetch3 := makeEntityFetch(3, "User", []string{"id", "name"}, []int{1, 2}) + + input := resolve.Sequence( + resolve.Parallel( + resolve.Single(fetch1), + resolve.Single(fetch2), + ), + resolve.Single(fetch3), + ) + + processor.ProcessFetchTree(input) + + assert.Equal(t, true, getUseL1Cache(fetch1), "fetch1 should have UseL1Cache=true (can write for fetch3)") + assert.Equal(t, false, getUseL1Cache(fetch2), "fetch2 should have UseL1Cache=false (different type)") + assert.Equal(t, true, getUseL1Cache(fetch3), "fetch3 should have UseL1Cache=true (can read from fetch1)") +} + +func TestOptimizeL1Cache_RootFetchWithL1Templates_HasConsumer(t *testing.T) { + // Root field fetch with L1 entity cache templates for User type + // Followed by entity fetch for User + // Root fetch provides {id, name} and entity fetch needs {id, name} + // Root fetch should have UseL1Cache=true because it can write for entity fetch + processor := &optimizeL1Cache{} + + // Root field provides User with {id, name} + rootProvidesData := makeObject("id", "name") + rootFetch := makeRootFetchWithL1Templates(0, nil, []string{"User"}, rootProvidesData) + entityFetch := makeEntityFetch(1, "User", []string{"id", "name"}, []int{0}) + + input := resolve.Sequence( + resolve.Single(rootFetch), + resolve.Single(entityFetch), + ) + + processor.ProcessFetchTree(input) + + // Root fetch can write for entity fetch (provides all fields consumer needs) + assert.Equal(t, true, getUseL1Cache(rootFetch), "root fetch should have UseL1Cache=true (can write for User entity fetch)") + // Entity fetch can read from root field's L1 cache population + assert.Equal(t, true, getUseL1Cache(entityFetch), "entity fetch should have UseL1Cache=true (root field provides User)") +} + +func TestOptimizeL1Cache_RootFetchWithL1Templates_NoConsumer(t *testing.T) { + // Root field fetch with L1 entity cache templates for User type + // No subsequent entity fetch for User type + // Root fetch should have UseL1Cache=false because no one can benefit + processor := &optimizeL1Cache{} + + rootProvidesData := makeObject("id", "name") + rootFetch := makeRootFetchWithL1Templates(0, nil, []string{"User"}, rootProvidesData) + + input := resolve.Sequence( + resolve.Single(rootFetch), + ) + + processor.ProcessFetchTree(input) + + // No entity fetch can read from root field's L1 cache population + assert.Equal(t, false, getUseL1Cache(rootFetch), "root fetch should have UseL1Cache=false (no User entity fetch to benefit)") +} + +func TestOptimizeL1Cache_RootFetchWithL1Templates_DifferentTypeConsumer(t *testing.T) { + // Root field fetch with L1 entity cache templates for User type + // But subsequent entity fetch is for Product type (different) + // Root fetch should have UseL1Cache=false because the entity fetch cannot benefit + processor := &optimizeL1Cache{} + + rootProvidesData := makeObject("id", "name") + rootFetch := makeRootFetchWithL1Templates(0, nil, []string{"User"}, rootProvidesData) + entityFetch := makeEntityFetch(1, "Product", []string{"id", "title"}, []int{0}) + + input := resolve.Sequence( + resolve.Single(rootFetch), + resolve.Single(entityFetch), + ) + + processor.ProcessFetchTree(input) + + // Root fetch provides User, but entity fetch needs Product + assert.Equal(t, false, getUseL1Cache(rootFetch), "root fetch should have UseL1Cache=false (no matching entity type)") + assert.Equal(t, false, getUseL1Cache(entityFetch), "entity fetch should have UseL1Cache=false (root provides different type)") +} + +func TestOptimizeL1Cache_RootFetchWithL1Templates_ProvidesMissingFields(t *testing.T) { + // Root field provides {id, name} but entity fetch needs {id, name, email} + // Root fetch should have UseL1Cache=false because it doesn't provide all fields + // This is critical: we should NOT populate L1 with incomplete data + processor := &optimizeL1Cache{} + + // Root field provides User with {id, name} only + rootProvidesData := makeObject("id", "name") + rootFetch := makeRootFetchWithL1Templates(0, nil, []string{"User"}, rootProvidesData) + // Entity fetch needs {id, name, email} - email is missing from root field + entityFetch := makeEntityFetch(1, "User", []string{"id", "name", "email"}, []int{0}) + + input := resolve.Sequence( + resolve.Single(rootFetch), + resolve.Single(entityFetch), + ) + + processor.ProcessFetchTree(input) + + // Root fetch should NOT use L1 because it doesn't provide all fields consumer needs + assert.Equal(t, false, getUseL1Cache(rootFetch), + "root fetch should have UseL1Cache=false (doesn't provide email field consumer needs)") + // Entity fetch cannot read from root field (missing fields) + assert.Equal(t, false, getUseL1Cache(entityFetch), + "entity fetch should have UseL1Cache=false (root field doesn't provide email)") +} + +func TestOptimizeL1Cache_RootFetchWithL1Templates_ProvidesSuperset(t *testing.T) { + // Root field provides {id, name, email} and entity fetch needs {id, name} + // Root fetch should have UseL1Cache=true because it provides more than needed + processor := &optimizeL1Cache{} + + // Root field provides User with {id, name, email} + rootProvidesData := makeObject("id", "name", "email") + rootFetch := makeRootFetchWithL1Templates(0, nil, []string{"User"}, rootProvidesData) + // Entity fetch needs {id, name} - subset of what root field provides + entityFetch := makeEntityFetch(1, "User", []string{"id", "name"}, []int{0}) + + input := resolve.Sequence( + resolve.Single(rootFetch), + resolve.Single(entityFetch), + ) + + processor.ProcessFetchTree(input) + + // Root fetch should use L1 because it provides all fields (and more) consumer needs + assert.Equal(t, true, getUseL1Cache(rootFetch), + "root fetch should have UseL1Cache=true (provides superset of consumer's fields)") + // Entity fetch can read from root field + assert.Equal(t, true, getUseL1Cache(entityFetch), + "entity fetch should have UseL1Cache=true (root field provides all needed fields)") +} + +func TestOptimizeL1Cache_RootFetchWithL1Templates_NestedEntityFields(t *testing.T) { + // Root field returns a nested structure: Query.products -> [Product] -> author: User + // The User entity is nested inside the Product response + // Entity fetch for User should be able to read from root field's L1 cache + processor := &optimizeL1Cache{} + + // Root field provides: { products: [{ id, name, author: { id, username } }] } + // The User entity is at the "author" path with fields {id, username} + rootProvidesData := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("products"), Value: &resolve.Array{ + Item: &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id"), Value: &resolve.String{}}, + {Name: []byte("name"), Value: &resolve.String{}}, + {Name: []byte("author"), Value: &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id"), Value: &resolve.String{}}, + {Name: []byte("username"), Value: &resolve.String{}}, + }, + }}, + }, + }, + }}, + }, + } + rootFetch := makeRootFetchWithL1Templates(0, nil, []string{"User"}, rootProvidesData) + // Entity fetch needs User with {id, username} + entityFetch := makeEntityFetch(1, "User", []string{"id", "username"}, []int{0}) + + input := resolve.Sequence( + resolve.Single(rootFetch), + resolve.Single(entityFetch), + ) + + processor.ProcessFetchTree(input) + + // Root fetch provides User nested at products[].author with all needed fields + assert.Equal(t, true, getUseL1Cache(rootFetch), + "root fetch should have UseL1Cache=true (nested User has all fields consumer needs)") + // Entity fetch can read from root field's nested User + assert.Equal(t, true, getUseL1Cache(entityFetch), + "entity fetch should have UseL1Cache=true (root field provides nested User)") +} + +func TestOptimizeL1Cache_RootFetchWithL1Templates_NestedEntityMissingFields(t *testing.T) { + // Root field returns nested User but missing fields + // Root field provides: { products: [{ author: { id } }] } (missing username) + // Entity fetch for User needs {id, username} + processor := &optimizeL1Cache{} + + rootProvidesData := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("products"), Value: &resolve.Array{ + Item: &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id"), Value: &resolve.String{}}, + {Name: []byte("author"), Value: &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id"), Value: &resolve.String{}}, + // Missing username! + }, + }}, + }, + }, + }}, + }, + } + rootFetch := makeRootFetchWithL1Templates(0, nil, []string{"User"}, rootProvidesData) + // Entity fetch needs User with {id, username} + entityFetch := makeEntityFetch(1, "User", []string{"id", "username"}, []int{0}) + + input := resolve.Sequence( + resolve.Single(rootFetch), + resolve.Single(entityFetch), + ) + + processor.ProcessFetchTree(input) + + // Root fetch provides User at products[].author but missing username + assert.Equal(t, false, getUseL1Cache(rootFetch), + "root fetch should have UseL1Cache=false (nested User missing username)") + // Entity fetch cannot read from root field + assert.Equal(t, false, getUseL1Cache(entityFetch), + "entity fetch should have UseL1Cache=false (root field's User missing username)") +} + +func TestOptimizeL1Cache_BatchEntityFetch(t *testing.T) { + // Test with BatchEntityFetch type + processor := &optimizeL1Cache{} + + fetch1 := makeBatchEntityFetch(1, "User", []string{"id", "name"}, nil) + fetch2 := makeBatchEntityFetch(2, "User", []string{"id", "name"}, []int{1}) + + input := resolve.Sequence( + resolve.Single(fetch1), + resolve.Single(fetch2), + ) + + processor.ProcessFetchTree(input) + + assert.Equal(t, true, getUseL1Cache(fetch1), "first batch fetch should have UseL1Cache=true") + assert.Equal(t, true, getUseL1Cache(fetch2), "second batch fetch should have UseL1Cache=true") +} + +func TestOptimizeL1Cache_MixedEntityAndBatchFetch(t *testing.T) { + // Mix of EntityFetch and BatchEntityFetch + processor := &optimizeL1Cache{} + + fetch1 := makeEntityFetch(1, "User", []string{"id", "name"}, nil) + fetch2 := makeBatchEntityFetch(2, "User", []string{"id"}, []int{1}) + + input := resolve.Sequence( + resolve.Single(fetch1), + resolve.Single(fetch2), + ) + + processor.ProcessFetchTree(input) + + assert.Equal(t, true, getUseL1Cache(fetch1), "entity fetch should have UseL1Cache=true (can write for batch)") + assert.Equal(t, true, getUseL1Cache(fetch2), "batch fetch should have UseL1Cache=true (can read from entity)") +} + +func TestOptimizeL1Cache_DisabledProcessor(t *testing.T) { + // When processor is disabled, it should not modify any flags + processor := &optimizeL1Cache{disable: true} + + fetch := makeEntityFetch(1, "User", []string{"id", "name"}, nil) + fetch.Caching.UseL1Cache = true // Set to true initially + + input := resolve.Sequence( + resolve.Single(fetch), + ) + + processor.ProcessFetchTree(input) + + // Should remain unchanged (true) since processor is disabled + assert.Equal(t, true, getUseL1Cache(fetch), "disabled processor should not change UseL1Cache flag") +} + +func TestOptimizeL1Cache_TransitiveDependencies(t *testing.T) { + // Test transitive dependencies: A→B→C where C needs same type as A + processor := &optimizeL1Cache{} + + fetchA := makeEntityFetch(1, "User", []string{"id", "name"}, nil) + fetchB := makeEntityFetch(2, "Product", []string{"id", "title"}, []int{1}) + fetchC := makeEntityFetch(3, "User", []string{"id", "name"}, []int{2}) + + input := resolve.Sequence( + resolve.Single(fetchA), + resolve.Single(fetchB), + resolve.Single(fetchC), + ) + + processor.ProcessFetchTree(input) + + // C transitively depends on A (through B), so A can help C + assert.Equal(t, true, getUseL1Cache(fetchA), "A should have UseL1Cache=true (can write for C)") + assert.Equal(t, false, getUseL1Cache(fetchB), "B should have UseL1Cache=false (different type)") + assert.Equal(t, true, getUseL1Cache(fetchC), "C should have UseL1Cache=true (can read from A)") +} + +func TestOptimizeL1Cache_NilRoot(t *testing.T) { + // Test nil root handling + processor := &optimizeL1Cache{} + processor.ProcessFetchTree(nil) // Should not panic +} + +func TestOptimizeL1Cache_EmptyTree(t *testing.T) { + // Test empty tree handling + processor := &optimizeL1Cache{} + input := resolve.Sequence() + processor.ProcessFetchTree(input) // Should not panic +} + +func TestObjectProvidesAllFields(t *testing.T) { + t.Run("nil consumer", func(t *testing.T) { + provider := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id")}, + }, + } + assert.True(t, objectProvidesAllFields(provider, nil)) + }) + + t.Run("nil provider with empty consumer", func(t *testing.T) { + consumer := &resolve.Object{Fields: []*resolve.Field{}} + assert.True(t, objectProvidesAllFields(nil, consumer)) + }) + + t.Run("nil provider with non-empty consumer", func(t *testing.T) { + consumer := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id")}, + }, + } + assert.False(t, objectProvidesAllFields(nil, consumer)) + }) + + t.Run("provider has all consumer fields", func(t *testing.T) { + provider := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id")}, + {Name: []byte("name")}, + {Name: []byte("email")}, + }, + } + consumer := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id")}, + {Name: []byte("name")}, + }, + } + assert.True(t, objectProvidesAllFields(provider, consumer)) + }) + + t.Run("provider equals consumer fields", func(t *testing.T) { + provider := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id")}, + {Name: []byte("name")}, + }, + } + consumer := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id")}, + {Name: []byte("name")}, + }, + } + assert.True(t, objectProvidesAllFields(provider, consumer)) + }) + + t.Run("provider missing consumer field", func(t *testing.T) { + provider := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id")}, + }, + } + consumer := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id")}, + {Name: []byte("name")}, + }, + } + assert.False(t, objectProvidesAllFields(provider, consumer)) + }) + + t.Run("nested object - provider has all nested fields", func(t *testing.T) { + provider := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id")}, + { + Name: []byte("address"), + Value: &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("street")}, + {Name: []byte("city")}, + {Name: []byte("country")}, + }, + }, + }, + }, + } + consumer := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id")}, + { + Name: []byte("address"), + Value: &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("street")}, + {Name: []byte("city")}, + }, + }, + }, + }, + } + assert.True(t, objectProvidesAllFields(provider, consumer)) + }) + + t.Run("nested object - provider missing nested field", func(t *testing.T) { + provider := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id")}, + { + Name: []byte("address"), + Value: &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("street")}, + }, + }, + }, + }, + } + consumer := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id")}, + { + Name: []byte("address"), + Value: &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("street")}, + {Name: []byte("city")}, // Provider doesn't have this + }, + }, + }, + }, + } + assert.False(t, objectProvidesAllFields(provider, consumer)) + }) + + t.Run("array of objects - provider has all fields", func(t *testing.T) { + provider := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id")}, + { + Name: []byte("friends"), + Value: &resolve.Array{ + Item: &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id")}, + {Name: []byte("name")}, + {Name: []byte("email")}, + }, + }, + }, + }, + }, + } + consumer := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id")}, + { + Name: []byte("friends"), + Value: &resolve.Array{ + Item: &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id")}, + {Name: []byte("name")}, + }, + }, + }, + }, + }, + } + assert.True(t, objectProvidesAllFields(provider, consumer)) + }) + + t.Run("array of objects - provider missing nested field", func(t *testing.T) { + provider := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id")}, + { + Name: []byte("friends"), + Value: &resolve.Array{ + Item: &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id")}, + }, + }, + }, + }, + }, + } + consumer := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id")}, + { + Name: []byte("friends"), + Value: &resolve.Array{ + Item: &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("id")}, + {Name: []byte("name")}, // Provider doesn't have this in array item + }, + }, + }, + }, + }, + } + assert.False(t, objectProvidesAllFields(provider, consumer)) + }) + + t.Run("deeply nested objects", func(t *testing.T) { + provider := &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("user"), + Value: &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("profile"), + Value: &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("bio")}, + {Name: []byte("avatar")}, + }, + }, + }, + }, + }, + }, + }, + } + consumer := &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("user"), + Value: &resolve.Object{ + Fields: []*resolve.Field{ + { + Name: []byte("profile"), + Value: &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("bio")}, + }, + }, + }, + }, + }, + }, + }, + } + assert.True(t, objectProvidesAllFields(provider, consumer)) + }) +} + +// ============================================================================= +// UNION-BASED L1 OPTIMIZATION TESTS +// +// These tests verify that the optimizer computes the UNION of ancestor providers' +// ProvidesData fields. Currently, hasValidProvider checks each provider individually. +// With the union fix, it should check if the combined fields of all prior providers +// cover the consumer's needs. +// ============================================================================= + +func TestOptimizeL1Cache_Union_BasicDisjointFields(t *testing.T) { + // A={name}, B={email}, C needs {name, email} + // Neither A nor B individually covers C. + // Union: {name, email} covers C. + processor := &optimizeL1Cache{} + + fetchA := makeEntityFetch(1, "User", []string{"name"}, nil) + fetchB := makeEntityFetch(2, "User", []string{"email"}, []int{1}) + fetchC := makeEntityFetch(3, "User", []string{"name", "email"}, []int{2}) + + input := resolve.Sequence( + resolve.Single(fetchA), + resolve.Single(fetchB), + resolve.Single(fetchC), + ) + + processor.ProcessFetchTree(input) + + assert.Equal(t, true, getUseL1Cache(fetchA), "A should be true (contributes name to union covering C)") + assert.Equal(t, true, getUseL1Cache(fetchB), "B should be true (contributes email to union covering C)") + assert.Equal(t, true, getUseL1Cache(fetchC), "C should be true (union of A+B covers {name, email})") +} + +func TestOptimizeL1Cache_Union_InsufficientUnion(t *testing.T) { + // A={name}, B={email}, C needs {name, phone} + // Union: {name, email} does NOT cover {name, phone} — missing phone. + processor := &optimizeL1Cache{} + + fetchA := makeEntityFetch(1, "User", []string{"name"}, nil) + fetchB := makeEntityFetch(2, "User", []string{"email"}, []int{1}) + fetchC := makeEntityFetch(3, "User", []string{"name", "phone"}, []int{2}) + + input := resolve.Sequence( + resolve.Single(fetchA), + resolve.Single(fetchB), + resolve.Single(fetchC), + ) + + processor.ProcessFetchTree(input) + + assert.Equal(t, false, getUseL1Cache(fetchA), "A should be false (union still can't cover C)") + assert.Equal(t, false, getUseL1Cache(fetchB), "B should be false (union still can't cover C)") + assert.Equal(t, false, getUseL1Cache(fetchC), "C should be false (union {name,email} missing phone)") +} + +func TestOptimizeL1Cache_Union_OverlappingFields(t *testing.T) { + // A={name, id}, B={id, email}, C needs {name, email} + // A has name but not email. B has email but not name. + // Union: {name, id, email} covers C. + // Overlap: both have id. + processor := &optimizeL1Cache{} + + fetchA := makeEntityFetch(1, "User", []string{"name", "id"}, nil) + fetchB := makeEntityFetch(2, "User", []string{"id", "email"}, []int{1}) + fetchC := makeEntityFetch(3, "User", []string{"name", "email"}, []int{2}) + + input := resolve.Sequence( + resolve.Single(fetchA), + resolve.Single(fetchB), + resolve.Single(fetchC), + ) + + processor.ProcessFetchTree(input) + + assert.Equal(t, true, getUseL1Cache(fetchA), "A should be true (contributes name to union)") + assert.Equal(t, true, getUseL1Cache(fetchB), "B should be true (contributes email to union)") + assert.Equal(t, true, getUseL1Cache(fetchC), "C should be true (union covers {name, email})") +} + +func TestOptimizeL1Cache_Union_FourFetchChain(t *testing.T) { + // A={a}, B={b}, C={c}, D needs {a, b, c} + // No single ancestor covers D. Union of A+B+C = {a,b,c} covers D. + processor := &optimizeL1Cache{} + + fetchA := makeEntityFetch(1, "User", []string{"a"}, nil) + fetchB := makeEntityFetch(2, "User", []string{"b"}, []int{1}) + fetchC := makeEntityFetch(3, "User", []string{"c"}, []int{2}) + fetchD := makeEntityFetch(4, "User", []string{"a", "b", "c"}, []int{3}) + + input := resolve.Sequence( + resolve.Single(fetchA), + resolve.Single(fetchB), + resolve.Single(fetchC), + resolve.Single(fetchD), + ) + + processor.ProcessFetchTree(input) + + assert.Equal(t, true, getUseL1Cache(fetchA), "A should be true (contributes a)") + assert.Equal(t, true, getUseL1Cache(fetchB), "B should be true (contributes b)") + assert.Equal(t, true, getUseL1Cache(fetchC), "C should be true (contributes c)") + assert.Equal(t, true, getUseL1Cache(fetchD), "D should be true (union covers {a,b,c})") +} + +func TestOptimizeL1Cache_Union_MiddleFetchRedundant(t *testing.T) { + // A={name, email}, B={phone}, C needs {name, email} + // A alone covers C. B's {phone} is not needed by C. + // With union-based optimization, B is still enabled because it + // participates in the ancestor chain and the union covers C. + // This is a benign false positive — the cost is just a cheap L1 write. + processor := &optimizeL1Cache{} + + fetchA := makeEntityFetch(1, "User", []string{"name", "email"}, nil) + fetchB := makeEntityFetch(2, "User", []string{"phone"}, []int{1}) + fetchC := makeEntityFetch(3, "User", []string{"name", "email"}, []int{2}) + + input := resolve.Sequence( + resolve.Single(fetchA), + resolve.Single(fetchB), + resolve.Single(fetchC), + ) + + processor.ProcessFetchTree(input) + + assert.Equal(t, true, getUseL1Cache(fetchA), "A should be true (covers C directly)") + assert.Equal(t, true, getUseL1Cache(fetchB), "B should be true (participates in chain; benign false positive)") + assert.Equal(t, true, getUseL1Cache(fetchC), "C should be true (A covers it)") +} + +func TestOptimizeL1Cache_Union_MiddleFetchEssential(t *testing.T) { + // A={name}, B={email}, C needs {name, email} + // B is essential: without B, union = {name} doesn't cover C. + // B should be true because it contributes to the union. + processor := &optimizeL1Cache{} + + fetchA := makeEntityFetch(1, "User", []string{"name"}, nil) + fetchB := makeEntityFetch(2, "User", []string{"email"}, []int{1}) + fetchC := makeEntityFetch(3, "User", []string{"name", "email"}, []int{2}) + + input := resolve.Sequence( + resolve.Single(fetchA), + resolve.Single(fetchB), + resolve.Single(fetchC), + ) + + processor.ProcessFetchTree(input) + + assert.Equal(t, true, getUseL1Cache(fetchA), "A should be true (essential for union)") + assert.Equal(t, true, getUseL1Cache(fetchB), "B should be true (essential for union)") + assert.Equal(t, true, getUseL1Cache(fetchC), "C should be true (union covers it)") +} + +func TestOptimizeL1Cache_Union_RootFieldPlusEntity(t *testing.T) { + // Root field provides {name} for User, entity fetch A provides {email}, + // consumer C needs {name, email}. + // Root alone doesn't cover C. A alone doesn't cover C. + // Union: {name, email} covers C. + processor := &optimizeL1Cache{} + + rootProvidesData := &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("users"), Value: &resolve.Array{ + Item: &resolve.Object{ + Fields: []*resolve.Field{ + {Name: []byte("name"), Value: &resolve.String{}}, + }, + }, + }}, + }, + } + rootFetch := makeRootFetchWithL1Templates(0, nil, []string{"User"}, rootProvidesData) + fetchA := makeEntityFetch(1, "User", []string{"email"}, []int{0}) + fetchC := makeEntityFetch(2, "User", []string{"name", "email"}, []int{1}) + + input := resolve.Sequence( + resolve.Single(rootFetch), + resolve.Single(fetchA), + resolve.Single(fetchC), + ) + + processor.ProcessFetchTree(input) + + assert.Equal(t, true, getUseL1Cache(rootFetch), "root should be true (contributes name to union)") + assert.Equal(t, true, getUseL1Cache(fetchA), "A should be true (contributes email to union)") + assert.Equal(t, true, getUseL1Cache(fetchC), "C should be true (union of root+A covers {name, email})") +} + +func TestOptimizeL1Cache_Union_IncreasingFieldsRevisited(t *testing.T) { + // Revisits the existing ThreeFetchChain_IncreasingFields scenario: + // A={id}, B={id, name}, C={id, name} + // Currently A=false. With union: A should be true because A's {id} + // contributes to the union {id, name} that covers C. + // (This is the same as the existing test but with the union expectation.) + processor := &optimizeL1Cache{} + + fetchA := makeEntityFetch(1, "User", []string{"id"}, nil) + fetchB := makeEntityFetch(2, "User", []string{"id", "name"}, []int{1}) + fetchC := makeEntityFetch(3, "User", []string{"id", "name"}, []int{2}) + + input := resolve.Sequence( + resolve.Single(fetchA), + resolve.Single(fetchB), + resolve.Single(fetchC), + ) + + processor.ProcessFetchTree(input) + + // With union: A contributes {id} to union for C. + // B alone covers C, so A's contribution is redundant. But A should + // still be enabled because its write to L1 accumulates data that + // downstream fetches can use. + assert.Equal(t, true, getUseL1Cache(fetchA), "A should be true (contributes to union covering C)") + assert.Equal(t, true, getUseL1Cache(fetchB), "B should be true (covers C directly)") + assert.Equal(t, true, getUseL1Cache(fetchC), "C should be true (B or union covers it)") +} + +func TestOptimizeL1Cache_Union_ParallelProvidersCannotUnion(t *testing.T) { + // A and B run in parallel (no dependency between them). + // C depends on both. C needs {name, email}. + // A={name}, B={email}. + // Even though A+B union covers C, parallel fetches write to L1 concurrently. + // Phase 1 L1 check runs before Phase 2 HTTP, so parallel L1 writes from + // sibling fetches aren't visible to each other. But C runs AFTER both + // A and B complete, so C CAN read the union of A+B from L1. + processor := &optimizeL1Cache{} + + fetchA := makeEntityFetch(1, "User", []string{"name"}, nil) + fetchB := makeEntityFetch(2, "User", []string{"email"}, nil) + fetchC := makeEntityFetch(3, "User", []string{"name", "email"}, []int{1, 2}) + + input := resolve.Sequence( + resolve.Parallel( + resolve.Single(fetchA), + resolve.Single(fetchB), + ), + resolve.Single(fetchC), + ) + + processor.ProcessFetchTree(input) + + // A and B are parallel but both execute before C (C depends on both). + // Union of A+B = {name, email} covers C. + assert.Equal(t, true, getUseL1Cache(fetchA), "A should be true (contributes name for C)") + assert.Equal(t, true, getUseL1Cache(fetchB), "B should be true (contributes email for C)") + assert.Equal(t, true, getUseL1Cache(fetchC), "C should be true (union of parallel A+B covers it)") +} diff --git a/v2/pkg/engine/postprocess/postprocess.go b/v2/pkg/engine/postprocess/postprocess.go index a98f9f16a5..a9f427a1a0 100644 --- a/v2/pkg/engine/postprocess/postprocess.go +++ b/v2/pkg/engine/postprocess/postprocess.go @@ -38,6 +38,7 @@ type processorOptions struct { disableExtractFetches bool disableCreateParallelNodes bool disableAddMissingNestedDependencies bool + disableOptimizeL1Cache bool collectDataSourceInfo bool } @@ -92,6 +93,12 @@ func DisableAddMissingNestedDependencies() ProcessorOption { } } +func DisableOptimizeL1Cache() ProcessorOption { + return func(o *processorOptions) { + o.disableOptimizeL1Cache = true + } +} + func NewProcessor(options ...ProcessorOption) *Processor { opts := &processorOptions{} for _, o := range options { @@ -124,6 +131,11 @@ func NewProcessor(options ...ProcessorOption) *Processor { &createParallelNodes{ disable: opts.disableCreateParallelNodes, }, + // optimizeL1Cache must run after createConcreteSingleFetchTypes as it needs to see + // EntityFetch and BatchEntityFetch types, not just SingleFetch with flags + &optimizeL1Cache{ + disable: opts.disableOptimizeL1Cache, + }, }, processResponseTree: []ResponseTreeProcessor{ &mergeFields{ diff --git a/v2/pkg/engine/resolve/CLAUDE.md b/v2/pkg/engine/resolve/CLAUDE.md new file mode 100644 index 0000000000..0ba01e45f4 --- /dev/null +++ b/v2/pkg/engine/resolve/CLAUDE.md @@ -0,0 +1,880 @@ +# Resolve Package Reference + +The `resolve` package is the execution core of the GraphQL engine. It takes a planned `GraphQLResponse` (response plan tree + fetch tree), executes subgraph fetches, and renders the final JSON response. Entity caching (L1/L2) is integrated directly into the fetch execution flow. + +## Architecture Overview + +Three components work together: + +| Component | File | Responsibility | +|-----------|------|---------------| +| **Resolver** | `resolve.go` | Orchestration, concurrency, arena pools, subscriptions | +| **Loader** | `loader.go` | Fetch execution, caching, result merging | +| **Resolvable** | `resolvable.go` | Response data, two-pass rendering, error handling | + +**End-to-end flow:** +```text +Resolver.ResolveGraphQLResponse(ctx, response, writer) + 1. Inbound singleflight check — followers reuse leader's bytes verbatim + 2. Acquire concurrency semaphore + 3. Create Loader + Resolvable from arena pool + 4. Resolvable.Init(ctx, nil, operationType) + 4. Loader.LoadGraphQLResponseData(ctx, response, resolvable) + └─ Walk fetch tree: sequence/parallel/single + └─ For each fetch: cache check → subgraph request → merge result + 5. Resolvable.Resolve(ctx, response.Data, response.Fetches, responseBuf) + └─ Two-pass walk: validate+collect errors, then render JSON + 6. Release resolve arena, then writer.Write(responseBuf.Bytes()) + └─ Releasing first frees ~50KB during the slow client I/O +``` + +## Resolver (resolve.go) + +Resolver is a single-threaded event loop for subscriptions and an orchestrator for query/mutation resolution. + +### Key Fields +```go +type Resolver struct { + ctx context.Context + options ResolverOptions + maxConcurrency chan struct{} // Semaphore (buffered channel, default 32) + resolveArenaPool *arena.Pool // Arena for Loader & Resolvable + responseBufferPool *arena.Pool // Arena for response buffering + subgraphRequestSingleFlight *SubgraphRequestSingleFlight + inboundRequestSingleFlight *InboundRequestSingleFlight + triggers map[uint64]*trigger // Subscription triggers + events chan subscriptionEvent // Subscription event loop +} +``` + +### Entry Points + +**ResolveGraphQLResponse** — standard resolution: +```go +func (r *Resolver) ResolveGraphQLResponse(ctx *Context, response *GraphQLResponse, writer io.Writer) (*GraphQLResolveInfo, error) +``` +Uses two separate arenas (resolve + response buffer). The resolve arena is freed early before I/O. Inbound deduplication: leader executes, followers wait and reuse buffered response. Followers receive the leader's shared state (e.g. propagated headers) via `Context.SetDeduplicationData` if configured. + +Inbound dedup requires `ctx.Request.ID` and `ctx.VariablesHash` to be populated by the caller. The execution engine populates them via `WithInboundRequestDeduplication()`. + +**ArenaResolveGraphQLResponse** — Deprecated. Thin wrapper that delegates to `ResolveGraphQLResponse`. Kept for backwards compatibility. + +**ResolveGraphQLSubscription** — long-lived subscription: +```go +func (r *Resolver) ResolveGraphQLSubscription(ctx *Context, subscription *GraphQLSubscription, writer SubscriptionResponseWriter) error +``` + +### ResolverOptions + +Key fields on `ResolverOptions`: +- `MaxConcurrency` — semaphore size (default 32, ~50KB per concurrent resolve) +- `Caches map[string]LoaderCache` — named L2 cache instances +- `EntityCacheConfigs` — subgraph → entity type → invalidation config (for extension-based invalidation) +- `PropagateSubgraphErrors`, `SubgraphErrorPropagationMode` — error handling +- `ResolvableOptions` — Apollo compatibility flags +- `SubscriptionHeartbeatInterval` — heartbeat interval (default 5s) + +## Loader (loader.go) + +The Loader executes fetches and merges results into the Resolvable's data. Caching is embedded in the fetch execution flow. + +### Key Fields +```go +type Loader struct { + resolvable *Resolvable + ctx *Context + caches map[string]LoaderCache // Named L2 cache instances + l1Cache map[string]*astjson.Value // Per-request entity cache (key → *astjson.Value on jsonArena). Main-thread only; plain map, not sync.Map. + jsonArena arena.Arena // NOT thread-safe, main thread only + parser astjson.Parser // Reusable, main thread only; scratch slabs amortize across requests + singleFlight *SubgraphRequestSingleFlight + enableMutationL2CachePopulation bool // Set per-mutation, inherited by entity fetches + entityCacheConfigs map[string]map[string]*EntityCacheInvalidationConfig +} +``` + +- `l1Cache` stores `*astjson.Value` pointing into `l.jsonArena` directly. + Both writes and reads StructuralCopy (see "Entity L1 Representation" below), + so there is no separate byte-backed entry type. +- `transformEntries` and `transforms` are reusable slabs for ephemeral Transforms, + resliced to `[:0]` before each use to amortize allocation. +- `parser` is a Loader-owned `astjson.Parser` used exclusively from the main thread + to parse bulk L2 responses onto `l.jsonArena`. + Its scratch slabs are retained across requests to amortize cost. +- There is no `goroutineArenas` field anymore — + L2 parsing is now serialized on the main thread via `bulkL2Lookup`, + so goroutines do not allocate JSON at all. + +### Fetch Tree Execution + +`LoadGraphQLResponseData` is the entry point. It dispatches on the fetch tree: + +```go +func (l *Loader) resolveFetchNode(node *FetchTreeNode) error { + switch node.Kind { + case FetchTreeNodeKindSingle: return l.resolveSingle(node.Item) + case FetchTreeNodeKindSequence: return l.resolveSerial(node.ChildNodes) + case FetchTreeNodeKindParallel: return l.resolveParallel(node.ChildNodes) + } +} +``` + +### Sequential Execution (resolveSerial) + +Each fetch waits for the previous one to complete: +```go +for i := range nodes { + err := l.resolveFetchNode(nodes[i]) +} +``` + +### Parallel Execution (resolveParallel) — Phases + +All cache logic runs on the main thread. +Goroutines exist only for subgraph HTTP fetches. +The model is: + +**Phase 1: Prepare + L1 Check (Main Thread)** +- `prepareCacheKeys()` — generate L1 and L2 cache keys for each fetch +- `tryL1CacheLoad()` — check `l1Cache` (plain map) for entity hits; + every hit StructuralCopies the stored `*astjson.Value` onto `l.jsonArena`, + applying a passthrough denormalize Transform when aliases are present +- If L1 complete hit → set `cacheSkipFetch = true`, + skip L2 and goroutine + +**Phase 1.5: @requestScoped Injection (Main Thread)** +- `tryRequestScopedInjection()` for each not-yet-skipped fetch +- When injection satisfies the fetch → set `fetchSkipped = true`, + skip L2 and goroutine, + record LoadSkipped and `cacheTraceRequestScopedHits` + +**Phase 2L2: Bulk L2 Lookup (Main Thread)** — see "Bulk L2 Lookup" below +- `bulkL2Lookup()` — group L2-eligible fetches by cache instance, + one bulk `cache.Get` per instance, + parse results verbatim on `l.parser` → `l.jsonArena`, + distribute parsed values back to per-fetch `l2CacheKeys[].FromCache`, + run `applyEntityFetchL2Results` / `applyRootFetchL2Results` + to decide `cacheSkipFetch` per fetch, + accumulate analytics and cache trace attachments + +**Phase 2HTTP: Parallel HTTP Fetches (Goroutines via errgroup)** +- `loadFetchHTTP()` for fetches not already skipped by L1, request-scoped, or L2 +- Goroutines do HTTP only — + no cache Gets, no parsing, no arena allocation. + The byte body is returned to the main thread for parsing in Phase 4. + +**Phase 3: Merge Analytics (Main Thread)** +- Merge per-result `l2AnalyticsEvents`, `l2EntitySources`, `l2FetchTimings`, + `l2ErrorEvents`, `l2CacheOpErrors` into the collector. + These slices now only contain write-side / HTTP events; + L2 reads are already accumulated by `bulkL2Lookup` in Phase 2L2. + +**Phase 3.5: Retry @requestScoped Injection (Main Thread)** +- Rerun `tryRequestScopedInjection()` for hints that became satisfiable after + sibling fetches produced the hinted data. + +**Phase 4: Merge Results (Main Thread)** +- `mergeResult()` — parse response JSON on `l.jsonArena`, + merge into Resolvable data tree +- `callOnFinished()` — invoke LoaderHooks +- `populateL1Cache()` / `updateL2Cache()` — write caches using StructuralCopy + (L1) / `MarshalToWithTransform` (L2) +- `exportRequestScopedFields()` — populate request-scoped L1 for sibling fetches + +**Why main-thread cache work?** +L1 is a plain map read and written only on the main thread — +check on the main thread to skip goroutine work early. +L2 parsing is now also main-thread: +a single bulk Get per cache instance replaces N parallel per-fetch Gets, +the parser and arena are reused, +and the goroutine-arena pool (formerly needed to avoid racing on `l.jsonArena`) +is gone entirely. +Goroutines shrink to what actually benefits from parallelism — subgraph HTTP. + +### Bulk L2 Lookup + +`bulkL2Lookup(ctx, nodes, results)` is the main-thread entry point that replaced +per-fetch goroutine L2 reads. +It runs between Phase 1.5 and the HTTP-fetch goroutine launch. + +Flow: + +1. **Group by cache instance.** Walk `results`, collect each fetch's + `l2CacheKeys[].Keys` into a `planEntry{cache, keys, owners}` keyed by + `LoaderCache` identity. + Fetches that are already skipped (L1 complete, @requestScoped) are excluded. +2. **One bulk `cache.Get` per plan.** For each `planEntry`, + issue a single `plan.cache.Get(ctx, plan.keys)`. + Timing is measured once per bulk Get and attributed to every fetch in the plan + (via `l2FetchTimings` with the bulk duration). +3. **Parse verbatim on `l.parser` / `l.jsonArena`.** + Each returned `*CacheEntry` is parsed into an `*astjson.Value` on the + Loader's own arena via `l.parseL2Bytes`. + No denormalize Transform is applied at parse time — + the denormalize Transform is applied later at the materialization site (`applyEntityFetchL2Results` / + `applyRootFetchL2Results`) using `StructuralCopyWithTransform`, + so that the cache-shape value remains available for the writeback merge in `updateL2Cache`. +4. **Distribute results back.** `populateFromCacheBulk` walks each fetch's + `l2CacheKeys[]` and attaches the parsed values to `FromCache` (and + candidate slices for multi-candidate resolution). +5. **Decide `cacheSkipFetch`.** `applyEntityFetchL2Results` / + `applyRootFetchL2Results` run validation against `ProvidesData` and + set `cacheSkipFetch` for fetches whose L2 hits cover all items. + +**Failure semantics — documented behavior change.** +The old per-fetch goroutine path isolated cache errors: a `Get` failure on one +fetch affected only that fetch. +Under `bulkL2Lookup`, a single `plan.cache.Get` now serves every fetch +whose `l2CacheKeys` route to the same cache instance — +if that bulk Get returns an error, +**all fetches in the batch fall back to subgraph**. +Each affected fetch is marked `cacheMustBeUpdated = true`, +its `cacheTraceL2GetError` is set, +and a `CacheOperationError` is recorded per fetch in `l2CacheOpErrors`. +This is considered acceptable because production cache backends rarely fail partially; +the win is removing a goroutine per fetch and a per-goroutine arena per batch. + +### Result Merging + +After a fetch completes, `mergeResult` does: +1. Check for errors in subgraph response +2. Handle auth/rate-limit rejections +3. Parse response JSON into arena-allocated values +4. Merge into items using `astjson.MergeValuesWithPath` +5. For batch entities: map response items back to original items via `batchStats` +6. Run cache invalidation (mutations, extensions) +7. Populate L1 and L2 caches + +### LoaderHooks + +```go +type LoaderHooks interface { + OnLoad(ctx context.Context, ds DataSourceInfo) context.Context + OnFinished(ctx context.Context, ds DataSourceInfo, info *ResponseInfo) +} +``` +Called before/after each fetch. `OnLoad` returns a context passed to `OnFinished`. Not called when fetch is skipped (null parent, auth rejection). + +### DataSource Interface + +```go +type DataSource interface { + Load(ctx context.Context, headers http.Header, input []byte) (data []byte, err error) + LoadWithFiles(ctx context.Context, headers http.Header, input []byte, files []*httpclient.FileUpload) (data []byte, err error) +} +``` + +## Resolvable (resolvable.go) + +Holds the response data and renders it to JSON using a two-pass tree walk. + +### Key Fields +```go +type Resolvable struct { + data *astjson.Value // Root response object (arena-allocated) + errors *astjson.Value // Errors array (lazily initialized) + astjsonArena arena.Arena // Shared with Loader, NOT thread-safe + print bool // false=pre-walk, true=print-walk + out io.Writer // Output for print pass + path []fastjsonext.PathElement // Current JSON path + depth int + operationType ast.OperationType + + // Entity cache analytics (set during print phase) + currentEntityAnalytics *ObjectCacheAnalytics + currentEntityTypeName string + currentEntitySource FieldSource +} +``` + +### Two-Pass Walk + +**Pass 1 (pre-walk)**: `print = false` +- Traverse response plan tree, validate types +- Check field authorization +- Collect errors (null bubbling for non-nullable fields) +- Do NOT write output + +**Pass 2 (print-walk)**: `print = true` +- Traverse again, write JSON to output +- Record entity cache analytics during rendering +- Hash field values for staleness detection + +### walkObject (core method) + +```text +1. Navigate to object in JSON: value = parent.Get(obj.Path...) +2. Null check: if nil and non-nullable → error with null bubbling +3. Type validation: check __typename against PossibleTypes +4. Entity analytics: extract key fields, record entity source (print phase only) +5. Walk all fields recursively: walkNode(field.Value, value) +6. Field authorization: skip unauthorized fields +``` + +### Error Handling Modes + +- **ErrorBehaviorPropagate** (default): null bubbles up to nearest nullable parent +- **ErrorBehaviorNull**: field becomes null even if non-nullable +- **ErrorBehaviorHalt**: stop all execution on first error + +## Response Plan Tree (Node Types) + +The planner produces a tree of Node types describing the expected response shape. + +### GraphQLResponse + +```go +type GraphQLResponse struct { + Data *Object // Response plan tree root + Fetches *FetchTreeNode // Fetch execution tree + Info *GraphQLResponseInfo + DataSources []DataSourceInfo +} +``` + +### Node Types + +| Type | Fields | Purpose | +|------|--------|---------| +| `Object` | Path, Fields, Nullable, PossibleTypes, CacheAnalytics | Object with named fields | +| `Field` | Name, OriginalName, Value (Node), CacheArgs, OnTypeNames, Info | Named field in an object | +| `Array` | Path, Nullable, Item (Node), SkipItem | List of items | +| `String` | Path, Nullable, IsObjectID | String scalar | +| `Scalar` | Path, Nullable | Custom scalar (raw JSON) | +| `Boolean`, `Integer`, `Float`, `BigInt` | Path, Nullable | Typed scalars | +| `Enum` | Path, Nullable, TypeName, Values | Enumeration | +| `Null`, `EmptyObject`, `EmptyArray` | — | Constant nodes | +| `StaticString` | Path, Value | Constant string value | + +### Field +```go +type Field struct { + Name []byte // Output name (may be alias) + OriginalName []byte // Schema name (nil if Name IS original) + Value Node // Nested response node + CacheArgs []CacheFieldArg // Field arguments for cache key suffix (xxhash) + OnTypeNames [][]byte // Fragment type conditions + Info *FieldInfo // Metadata (type names, authorization, source tracking) +} +``` + +## Fetch Tree + +The planner produces a separate tree for fetch execution. + +### FetchTreeNode +```go +type FetchTreeNode struct { + Kind FetchTreeNodeKind // Single | Sequence | Parallel + Item *FetchItem // For Single nodes + ChildNodes []*FetchTreeNode // For Sequence/Parallel nodes + Trigger *FetchTreeNode // For subscription triggers +} +``` + +### Fetch Types + +| Type | Use Case | Key Fields | +|------|----------|------------| +| `SingleFetch` | Root fields, standalone queries | InputTemplate, DataSource, Caching | +| `EntityFetch` | Nested entity (single object) | EntityInput (Header, Item, Footer) | +| `BatchEntityFetch` | Nested entity (array) | BatchInput (Header, Items[], Separator, Footer) | + +All fetch types carry `FetchCacheConfiguration` and `FetchInfo` (data source name, provides data, root fields). + +### FetchCacheConfiguration +```go +type FetchCacheConfiguration struct { + Enabled bool // L2 enabled for this fetch + CacheName string // Cache instance name + TTL time.Duration // Cache entry lifetime + CacheKeyTemplate CacheKeyTemplate // Key generation template + IncludeSubgraphHeaderPrefix bool // Prefix with header hash + RootFieldL1EntityCacheKeyTemplates map[string]CacheKeyTemplate // Entity L1 keys for root fields + EnablePartialCacheLoad bool // Fetch only missing entities + UseL1Cache bool // L1 enabled (set by postprocessor) + ShadowMode bool // Never serve cached data + MutationEntityImpactConfig *MutationEntityImpactConfig + EnableMutationL2CachePopulation bool // Mutations populate L2 + HashAnalyticsKeys bool // Hash vs raw in analytics + KeyFields []KeyField // @key fields for analytics +} +``` + +## Entity Caching + +### Architecture + +| Cache | Storage | Scope | Key Fields | Thread Safety | +|-------|---------|-------|------------|---------------| +| **L1** | Plain `map[string]*astjson.Value` in Loader | Single request | `@key` only | Main-thread only — no locking required | +| **L2** | External (`LoaderCache`) | Cross-request | `@key` only | Main-thread bulk Get + per-result write-side accumulation | + +**Key principle**: Both L1 and L2 use only `@key` fields for stable entity identity. + +### LoaderCache Interface +```go +type LoaderCache interface { + Get(ctx context.Context, keys []string) ([]*CacheEntry, error) + Set(ctx context.Context, entries []*CacheEntry, ttl time.Duration) error + Delete(ctx context.Context, keys []string) error +} + +type CacheEntry struct { + Key string + Value []byte // JSON-encoded entity + RemainingTTL time.Duration // TTL from cache (0 = unknown) +} +``` + +### Cache Key Generation + +**Entity keys** (via `EntityQueryCacheKeyTemplate`): +```json +{"__typename":"User","key":{"id":"123"}} +``` + +**Root field keys** (via `RootQueryCacheKeyTemplate`): +```json +{"__typename":"Query","field":"topProducts","args":{"first":5}} +``` + +**Key transformations** (applied in order): +1. Subgraph header hash prefix: `{headerHash}:{key}` (when `IncludeSubgraphHeaderPrefix = true`) +2. `L2CacheKeyInterceptor`: custom transform (e.g., tenant isolation) + +**Entity field argument-aware keys**: Fields with arguments get xxhash suffix appended, so different argument values produce different cache entries. + +### Cache Flow (Integrated into Loader Phases) + +**Sequential (tryCacheLoad):** +```text +prepareCacheKeys() → tryL1CacheLoad() → tryL2CacheLoad() → fetch → populateL1Cache() + updateL2Cache() +``` +The sequential path still uses `tryL2CacheLoad` because there is no batch to bulk over. +It parses on `l.parser` / `l.jsonArena` just like `bulkL2Lookup`. + +**Parallel (resolveParallel):** +```text +Phase 1 (main): prepareCacheKeys + tryL1CacheLoad for all fetches +Phase 1.5 (main): tryRequestScopedInjection (skip fetches whose data is already in requestScopedL1) +Phase 2L2 (main): bulkL2Lookup — one cache.Get per cache instance, + parse verbatim on l.parser / l.jsonArena, + distribute results, decide cacheSkipFetch, attach cache trace +Phase 2HTTP (goroutines): loadFetchHTTP for remaining fetches — HTTP only, + no cache work, no JSON parsing +Phase 3 (main): merge per-result analytics (write-side + HTTP) into the collector +Phase 3.5 (main): retry tryRequestScopedInjection for late-satisfied hints +Phase 4 (main): mergeResult + populateL1Cache + updateL2Cache + exportRequestScopedFields +``` + +### Entity L1 Representation + +Entity L1 is pointer-backed via `*astjson.Value`. +Storage is always on `l.jsonArena`, +all reads and writes happen on the main thread, +and isolation from the response tree is guaranteed by always StructuralCopying on both sides of the cache. + +**StructuralCopy semantics**: `l.parser.StructuralCopy` clones container nodes (objects, arrays) +on the arena while aliasing leaf nodes (strings, numbers, bools, nulls) from the source. +This is safe because all values within a request share the same arena lifetime. +Strings are always eagerly decoded during parsing (no lazy mutation), +making aliased leaf values safe for concurrent reads. + +**Writes** (`populateL1Cache` + root-field promotion): +- L1 writes use `l.structuralCopyNormalizedPassthrough(value, fetchInfo)` — + renames aliases to schema names but keeps ALL source fields + (including @key fields not in ProvidesData). + The passthrough behavior preserves field accumulation across fetches. +- With no alias / arg normalization → `l.parser.StructuralCopy(l.jsonArena, value)` +- With normalization needed → `l.parser.StructuralCopyWithTransform(l.jsonArena, value, xform)` + where `xform` is built ephemeral with `Transform.Passthrough = true` +- Merging an incoming value into an existing L1 entry uses the + **working-copy-and-swap** pattern: + StructuralCopy the existing entry into a working copy, + run `astjson.MergeValues(l.jsonArena, working, freshIncoming)` against the working copy, + and `l1Cache.Store(key, working)` on success or `l1Cache.Store(key, freshIncoming)` on failure. + The live entry pointer is never mutated in place, + so a partial-mutation failure inside `MergeValues` cannot corrupt sibling L1 keys. + +**Reads** (`tryL1CacheLoad` + `populateFromCache`): +- L1 reads use `l.structuralCopyDenormalizedPassthrough(stored, fetchInfo)` — + restores aliases but keeps all accumulated fields from prior fetches. +- With no aliases → `l.parser.StructuralCopy(l.jsonArena, stored)` returns a fresh, + mutable value owned by the current request arena. +- With aliases → `l.parser.StructuralCopyWithTransform(l.jsonArena, stored, xform)` + re-applies aliases via an ephemeral passthrough Transform while producing an independent copy. +- Readers can freely mutate the returned value (merge into items, re-wrap, etc.) + without affecting the cached entry. + +**L2 writes** still use non-passthrough `l.structuralCopyNormalized` (projects to ProvidesData +fields only) since L2 entries must be minimal and self-contained. + +StructuralCopy on the same arena is cheap — +a single tree walk with leaf aliasing, no byte round-trip, no parser invocation. +It gives a stronger isolation guarantee than the former byte-backed design +(which parsed on every read) and removes an entire class of arena-lifetime bugs +that used to require the goroutine-arena pool to paper over. + +### Copy Budget + +The minimum StructuralCopy count for each data flow, +verified by adversarial mutation tests in `loader_cache_copy_invariant_test.go` +and baseline benchmarks in `loader_cache_copy_bench_test.go` / `loader_noncaching_bench_test.go`. +Any PR that changes this budget must update both the tests and this table. + +| Flow | Writes | Reads | Merge-into-response | +|------|--------|-------|---------------------| +| L1 write (`populateL1Cache`) | 1 (`structuralCopyNormalizedPassthrough`) | — | — | +| L1 read + merge (`tryL1CacheLoad` + `populateFromCache`) | — | 1 (`structuralCopyDenormalizedPassthrough`) | — | +| L2 write (`updateL2Cache`) | 1 (`MarshalToWithTransform` — byte-level, no Value copy) | — | — | +| L2 read + merge (`bulkL2Lookup` + `applyEntityFetchL2Results`) | — | 1 parse + 1 `structuralCopyDenormalized` per entity | — | +| Full L1 cache hit merge (`mergeResult` cacheSkipFetch, loader.go:1472) | — | (1 above) | 1 `StructuralCopy` per entity before `MergeValues` into response item | +| Partial-cache L1 merge (`mergeResult` partialCache, loader.go:1491) | — | (1 above) | 1 `StructuralCopy` per cached item before `MergeValues` | +| Batch L2 cache hit splice (`mergeBatchCacheHit`, loader.go:1220) | — | (L2 above) | 1 `StructuralCopy` per entity before `SetArrayItem` | +| Partial batch response interleave (`mergeBatchPartialResponse`, loader.go:1372) | — | (L2 above) | 1 `StructuralCopy` per cached entity before `SetArrayItem` | +| Entity L1 merge-into-existing working-copy-and-swap (`loader_cache.go:1647`, `:3110`) | 1 `StructuralCopy` of existing entry before in-place `MergeValues` | — | — | +| @requestScoped coordinate L1 inject/export | 1 per hint via `structuralCopyNormalized` / `structuralCopyDenormalized` | — | — | +| Non-caching fetch | — | — | **0** — one `ParseBytesWithArena` + `MergeValuesWithPath`, no copy | + +**Why the response-tree merge copies are load-bearing**: +`astjson.MergeValues(dst, src)` aliases nested container nodes from `src` into `dst`. +Without a StructuralCopy isolating `src`, mutating a nested field under `dst` +(e.g., a subsequent fetch merging into the same response tree, +or the L1 merge-into-existing path writing back) corrupts the underlying cache entry. +Adversarial tests in `loader_cache_copy_invariant_test.go` verify each site by +mutating `mergedValue.Get("profile")` and asserting `FromCache` remains intact — +with any of the 4 copies removed, the `profile` nested container gets corrupted. + +**Why working-copy-and-swap is load-bearing**: +`MergeValues` is non-atomic on failure. A partial mutation of a live L1 entry +would corrupt every sibling L1 key pointing at the same `*Value`. +Copy-merge-store is the only safe pattern. + +**Absolute floor**: isolation between cache and response tree requires at least +one copy at the write boundary + one at the read boundary + one at the merge +boundary (because the read copy must survive `MergeValues` aliasing into the +response tree, which is a longer-lived writable structure than the cache entry). + +**Root-field L1 promotion** (`populateL1CacheForRootFieldEntities`): +When a root-field fetch returns entities that have `RootFieldL1EntityCacheKeyTemplates`, +the loader promotes the entities into `l1Cache` under their entity cache keys +so a later entity fetch can short-circuit. +Promotion derives the entity-shaped sub-`Object` from `singleFetch.Info.ProvidesData` +via `batchEntityValidationObject(providesData, fieldPath)`, +builds a normalize Transform once per path group, +and stores a `StructuralCopyWithTransform`-ed entity on `l.jsonArena`. +If `singleFetch.Info.ProvidesData` is nil — typically because the planner ran with +`DisableFetchProvidesData = true` — promotion is silently skipped rather than +storing response-shape (aliased) values that would corrupt subsequent entity L1 +reads. Production planners always populate `ProvidesData`, so this guard is +defense-in-depth against test/programmatic fetch construction. + +### ProvidesData and Validation + +`FetchInfo.ProvidesData` describes what fields a fetch provides. Used by: +- `validateItemHasRequiredData()` — check if cached entity has all required fields +- `buildNormalizeTransformForFetch()` / `buildDenormalizeTransformForFetch()` — + derive per-fetch `astjson.Transform` descriptors from the `*Object` tree. + The normalize Transform strips aliases and appends CacheArgs hash suffixes; + the denormalize Transform is the inverse. + Transforms are now ephemeral — built and consumed inline at each cache operation + site via `l.structuralCopyNormalized()` / `l.structuralCopyDenormalized()` + (and their passthrough variants for L1). + The Loader has reusable `transformEntries []astjson.TransformEntry` and + `transforms []astjson.Transform` slabs that are resliced to `[:0]` before each use. + Driven by the astjson APIs + (`StructuralCopyWithTransform`, `MarshalToWithTransform`, `ParseBytesWithTransform`). + `Transform.Passthrough` — when true, source fields not listed in Entries or Forced + are copied verbatim (no rename, no projection). + Used by L1 writes/reads to preserve all entity fields while still renaming aliased fields. +- `shallowCopyProvidedFields()` — copy only required fields for shadow comparisons and request-scoped injection + +**Critical**: For nested entity fetches, `ProvidesData` must contain entity fields (`id`, `username`), NOT the parent field (`author`). + +**Union-based L1 optimization**: The postprocessor (`optimize_l1_cache.go`) computes the +UNION of ancestor providers' ProvidesData fields when checking if a fetch can read from L1. +If no single provider covers the consumer, +the union of all prior providers (same entity type, in dependency chain) is checked. +This enables L1 for fetches whose required fields are spread across multiple prior fetches. + +**Request-scoped Transforms**: a Transform's OutputKey for any field with `CacheArgs` +depends on `l.ctx.Variables` and `l.ctx.RemapVariables`, +both of which are per-request state. +The same `*Field` on the same shared planner `*Object` therefore produces +different OutputKey suffixes in different requests. +Transforms are valid only for the request that built them +and MUST be ephemeral — +never cached on `*Object`, the plan tree, the `Resolver`, or anywhere else outliving a request. +Within one fetch, `cacheFieldName(field)` is deterministic, +so building Transforms once at the top of `prepareCacheKeys` and reusing for the rest of +the cache flow is sound. + +### Cache Invalidation + +**Extension-based** (`processExtensionsCacheInvalidation`): +Subgraphs return invalidation keys in response extensions: +```json +{"extensions":{"cacheInvalidation":{"keys":[{"typename":"User","key":{"id":"1"}}]}}} +``` +Optimization: skips delete if the same key is being written by `updateL2Cache`. + +**Mutation-based** (`MutationCacheInvalidationConfiguration`): +After mutation completes, delete L2 entry for the returned entity. + +**Subscription-based** (`SubscriptionEntityPopulationConfiguration`): +- Populate mode: write entity data to L2 on each subscription event +- Invalidate mode (`EnableInvalidationOnKeyOnly`): delete L2 entry when subscription provides only @key fields + +### Smart Cache Key Backfill (Root Field EntityKeyMappings) + +When `EntityKeyMappings` produces multiple L2 keys on read and some miss, +`updateL2Cache` makes precise per-key write decisions via `cacheKeysToExactRootFieldEntityEntries`. + +Two independent write decisions per mapping: + +1. **Requested key** (`shouldWriteRequestedKey`): the key rendered from request arguments. + Written when it matches the rendered key (backfill) or on the fetch path (refresh). + On skip-fetch, only written when `fromCacheNeedsWriteback`. +2. **Rendered key** (`shouldWriteRenderedKey`): the key rendered from final entity data. + On the fetch path, always written — the subgraph is the source of truth. + On the skip-fetch path, only written for genuinely new keys (missing or derived), + not existing cached keys that would be redundantly rewritten. + +This means a value mismatch (request asked for `email:a@` but entity has `email:b@`) writes +the `b@` key as a derived entry while correctly skipping the unproven `a@` key. + +`hasMissingRequestedKeys` replaces the old `needsKeyBackfill` boolean with per-entity precision. +`cacheMustBeUpdated` is set optimistically before merge; exact filtering happens in `updateL2Cache`. + +### Partial Cache Loading + +- **Default** (`EnablePartialCacheLoad = false`): any cache miss → refetch ALL entities in batch +- **Enabled** (`EnablePartialCacheLoad = true`): only fetch missing entities, serve cached ones directly + +### Shadow Mode + +L2 reads and writes happen normally, but cached data is **never served**. Fresh data is always fetched from the subgraph and compared against the cached value. Used for staleness detection via `ShadowComparisonEvent`. L1 cache works normally (not affected by shadow mode). + +### Cache Analytics + +Enable via `ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true`. After execution, call `ctx.GetCacheStats()` to get `CacheAnalyticsSnapshot`. + +**CacheAnalyticsSnapshot** contains: +- `L1Reads`, `L2Reads` — `[]CacheKeyEvent` (hit/miss/partial-hit per key) +- `L1Writes`, `L2Writes` — `[]CacheWriteEvent` (key, size, TTL, WriteReason for EntityKeyMappings writes) +- `FetchTimings` — `[]FetchTimingEvent` (duration, HTTP status, response size, TTFB) +- `ErrorEvents` — `[]SubgraphErrorEvent` +- `FieldHashes` — `[]EntityFieldHash` (xxhash of field values for staleness) +- `EntityTypes` — `[]EntityTypeInfo` (count and unique keys per type) +- `ShadowComparisons` — `[]ShadowComparisonEvent` (cached vs fresh comparison) +- `MutationEvents` — `[]MutationEvent` (mutation impact on cached entities) + +**Convenience methods**: `L1HitRate()`, `L2HitRate()`, `L1HitCount()`, `L2HitCount()`, `CachedBytesServed()`, `EventsByEntityType()`. + +**Thread safety**: L2 read events are accumulated by `bulkL2Lookup` on the main thread. +Write-side and HTTP events (`l2AnalyticsEvents`, `l2FetchTimings`, `l2ErrorEvents`, +`l2CacheOpErrors`, `l2EntitySources`) are accumulated per-result and merged into the +collector on the main thread after `g.Wait()` via `MergeL2Events()`, +`MergeL2FetchTimings()`, `MergeL2Errors()`, `MergeL2CacheOpErrors()`, and +`MergeEntitySources()`. + +## Configuration Types + +### Runtime Options (set per-request on Context) +```go +type CachingOptions struct { + EnableL1Cache bool // Per-request entity cache + EnableL2Cache bool // External cross-request cache + EnableCacheAnalytics bool // Detailed event tracking + L2CacheKeyInterceptor L2CacheKeyInterceptor // Custom key transform +} + +type L2CacheKeyInterceptor func(ctx context.Context, key string, info L2CacheKeyInterceptorInfo) string +type L2CacheKeyInterceptorInfo struct { + SubgraphName string + CacheName string +} +``` + +### Plan-Time Configuration (in `plan/federation_metadata.go`) + +Set per-subgraph via `SubgraphCachingConfig`: + +| Type | Controls | +|------|----------| +| `EntityCacheConfiguration` | L2 caching for entity types (TypeName, CacheName, TTL, etc.) | +| `RootFieldCacheConfiguration` | L2 caching for root fields (TypeName, FieldName, EntityKeyMappings) | +| `MutationFieldCacheConfiguration` | Whether mutations populate L2 | +| `MutationCacheInvalidationConfiguration` | Which mutations delete L2 entries | +| `SubscriptionEntityPopulationConfiguration` | How subscriptions populate/invalidate L2 | + +## Thread Safety Model + +The model is intentionally simple: +**main thread parses, merges, and runs all cache logic; +goroutines do HTTP only.** + +| Context | Operations | Safety Mechanism | +|---------|-----------|-----------------| +| Main thread | Arena allocation, parsing, L1 cache ops, bulk L2 Get + parse + distribute, result merging, two-pass rendering | Single-threaded | +| Goroutines (Phase 2HTTP) | Subgraph HTTP calls (byte body only) | No shared arena state; each goroutine returns a `[]byte` to its `*result` for main-thread parsing in Phase 4 | +| Analytics merge | Per-result write-side slices → collector | Main thread merge after `g.Wait()` (L2 read events are already accumulated on the main thread in Phase 2L2) | +| L1 cache | Read/write entity values | Plain map, main-thread only; values are pointer-stable because every write StructuralCopies first | + +**Rule**: Never allocate on `jsonArena` from a goroutine. +HTTP goroutines must hand their response body back as `[]byte` for main-thread parsing. + +## Arena Allocation + +- Resolver owns `resolveArenaPool` and `responseBufferPool` +- All `*astjson.Value` nodes live on the shared arena (no GC pressure) +- Arena is NOT thread-safe → only main thread allocates +- **Early release pattern** (`ResolveGraphQLResponse`): resolve arena freed before I/O, response arena freed after write +- Never store heap-allocated `*Value` in arena-owned containers (GC can't trace into arena noscan memory) +- All parsed L2 values now live on `l.jsonArena` directly. + There are no goroutine arenas and no cross-arena references in the response tree, + so the old "MergeValues creates cross-arena references, arenas must outlive rendering" + lifetime caveat no longer applies. + +## Key Files + +| File | Purpose | +|------|---------| +| `resolve.go` | Resolver: orchestration, concurrency, subscriptions | +| `loader.go` | Loader: fetch execution, parallel phases, result merging | +| `resolvable.go` | Resolvable: two-pass walk, JSON rendering | +| `loader_cache.go` | L1/L2 cache operations, LoaderCache interface, prepareCacheKeys, tryL1/L2CacheLoad, populateL1Cache, updateL2Cache | +| `loader_cache_transform.go` | StructuralCopy helpers: structuralCopyNormalized/Denormalized (+ passthrough variants), structuralCopyProjected, normalize/denormalize/project Transform builders | +| `caching.go` | CacheKeyTemplate, EntityQueryCacheKeyTemplate, RootQueryCacheKeyTemplate | +| `cache_analytics.go` | CacheAnalyticsCollector, CacheAnalyticsSnapshot, all event types | +| `extensions_cache_invalidation.go` | processExtensionsCacheInvalidation | +| `fetch.go` | Fetch types (SingleFetch, EntityFetch, BatchEntityFetch), FetchCacheConfiguration | +| `fetchtree.go` | FetchTreeNode tree structure | +| `node_object.go` | Object, Field node types | +| `node_array.go` | Array node type | +| `node.go` | Node interface, NodeKind constants | +| `context.go` | Context, CachingOptions, ExecutionOptions | +| `datasource.go` | DataSource, SubscriptionDataSource interfaces | +| `response.go` | GraphQLResponse, GraphQLResponseInfo | + +## Testing Patterns + +### Unit Test Setup +```go +ctrl := gomock.NewController(t) +defer ctrl.Finish() + +ds := NewMockDataSource(ctrl) +ds.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers http.Header, input []byte) ([]byte, error) { + return []byte(`{"data":{...}}`), nil + }).Times(1) + +loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + +// REQUIRED: Disable singleFlight for unit tests +ctx := NewContext(context.Background()) +ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true +ctx.ExecutionOptions.Caching = CachingOptions{EnableL1Cache: true, EnableL2Cache: true} + +// REQUIRED: Always use arena +ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) +resolvable := NewResolvable(ar, ResolvableOptions{}) +resolvable.Init(ctx, nil, ast.OperationTypeQuery) + +err := loader.LoadGraphQLResponseData(ctx, response, resolvable) +out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) +``` + +### Exact Assertions + +**IMPORTANT**: Always use exact assertions. Never use vague comparisons. + +```go +// GOOD: exact values +assert.Equal(t, 3, hitCount, "should have exactly 3 L1 hits") +assert.Equal(t, int64(12), stats.L1Hits, "should have exactly 12 L1 hits") +assert.Equal(t, 2, accountsCalls, "should call accounts subgraph exactly twice") + +// BAD: hides regressions +assert.GreaterOrEqual(t, hitCount, 1) // DON'T DO THIS +assert.Greater(t, stats.L1Hits, int64(0)) // DON'T DO THIS +``` + +### Snapshot Comments + +Every event line in a `CacheAnalyticsSnapshot` assertion MUST have a brief comment explaining **why** that event occurred: + +```go +// GOOD: explains the "why" +L2Reads: []resolve.CacheKeyEvent{ + {CacheKey: keyUser, Kind: resolve.CacheKeyMiss, ...}, // First request, L2 empty + {CacheKey: keyUser, Kind: resolve.CacheKeyHit, ...}, // Populated by Request 1 +}, + +// BAD: restates the field value +{CacheKey: keyUser, Kind: resolve.CacheKeyMiss, ...}, // this is a miss +``` + +### Cache Log Rule + +Every `defaultCache.ClearLog()` MUST be followed by `defaultCache.GetLog()` with full assertions BEFORE the next `ClearLog()` or end of test. Never clear a log without verifying its contents. + +### Caching Test / AC Sync Rule + +**When modifying or adding caching-related tests**, you MUST also update `docs/entity-caching/ENTITY_CACHING_ACCEPTANCE_CRITERIA.md` (from the repo root). Every AC must link to its covering tests with relative paths, line numbers, and test names. This applies to: +- New caching tests (add test links to the relevant AC) +- Changes to existing caching tests that affect which ACs are covered +- New ACs (must have at least one test link) + +### Run Tests +```bash +go test -run "TestL1Cache" ./v2/pkg/engine/resolve/... -v +go test -run "TestFederationCaching" ./execution/engine/... -v +go test -race ./v2/pkg/engine/resolve/... -v +``` + +## astjson Quick Reference + +```go +// Create values on arena +astjson.ObjectValue(arena) +astjson.ArrayValue(arena) +astjson.StringValue(arena, string) +astjson.StringValueBytes(arena, []byte) +astjson.NumberValue(arena, string) +astjson.TrueValue(arena) +astjson.FalseValue(arena) +astjson.NullValue // Global constant (not a function) + +// Navigate +value.Get(keys...) // Navigate nested path +value.GetArray() // Get array items +value.GetStringBytes() // Get string as []byte +value.Type() // TypeNull, TypeTrue, TypeObject, etc. + +// Mutate +value.Set(arena, key, val) // Set object field +value.SetArrayItem(arena, idx, val) // Set array item + +// Serialize +value.MarshalTo([]byte) // Append JSON to buffer + +// Copy (methods on astjson.Parser) +parser.StructuralCopy(arena, value) // Clone containers, alias leaves +parser.StructuralCopyWithTransform(arena, value, xform) // Clone + rename/project fields + +// Transform +astjson.Transform{ + Entries []TransformEntry // Field rename/project rules + Forced []TransformEntry // Always-included fields + Passthrough bool // true = copy unlisted fields verbatim (L1); + // false = project to listed fields only (L2) +} +``` + +**String handling**: `Value.stringRaw` and `Value.stringHasEscapes` are removed. +Strings are always eagerly decoded during parsing. +`ensureDecodedString()` and the public `EnsureDecoded()` are removed. +`Value.stringNeedsEscape` is kept for `MarshalTo` optimization. diff --git a/v2/pkg/engine/resolve/arena_thread_safety_bench_test.go b/v2/pkg/engine/resolve/arena_thread_safety_bench_test.go new file mode 100644 index 0000000000..28433a12a9 --- /dev/null +++ b/v2/pkg/engine/resolve/arena_thread_safety_bench_test.go @@ -0,0 +1,98 @@ +package resolve + +import ( + "strconv" + "sync" + "testing" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" +) + +// cacheLoadAllocs simulates the allocation pattern of tryL2CacheLoad: +// parse cached JSON bytes, create wrapper objects, allocate slices. +func cacheLoadAllocs(a arena.Arena) { + // 1. extractCacheKeysStrings: allocate slice + string bytes + keys := arena.AllocateSlice[string](a, 0, 4) + for range 4 { + buf := arena.AllocateSlice[byte](a, 0, 64) + buf = arena.SliceAppend(a, buf, []byte("cache:entity:Product:id:prod-1234")...) + keys = arena.SliceAppend(a, keys, string(buf)) + } + _ = keys + + // 2. populateFromCache: parse JSON bytes + v, _ := astjson.ParseBytesWithArena(a, []byte(`{"__typename":"Product","id":"prod-1234","name":"Test Product","price":29.99}`)) + + // 3. EntityMergePath wrapping: create wrapper objects + obj := astjson.ObjectValue(a) + obj.Set(a, "product", v) + outer := astjson.ObjectValue(a) + outer.Set(a, "data", obj) + + // 4. denormalize via DeepCopyWithTransform: create new object tree + result := astjson.ObjectValue(a) + result.Set(a, "productName", v.Get("name")) + result.Set(a, "productPrice", v.Get("price")) +} + +// BenchmarkConcurrentArena measures Option A: single arena wrapped with NewConcurrentArena. +// All goroutines allocate from the same mutex-protected arena. +func BenchmarkConcurrentArena(b *testing.B) { + for _, goroutines := range []int{1, 4, 8, 16} { + b.Run(goroutineName(goroutines), func(b *testing.B) { + a := arena.NewConcurrentArena(arena.NewMonotonicArena(arena.WithMinBufferSize(64 * 1024))) + b.ResetTimer() + for b.Loop() { + var wg sync.WaitGroup + for range goroutines { + wg.Go(func() { + cacheLoadAllocs(a) + }) + } + wg.Wait() + a.Reset() + } + }) + } +} + +// BenchmarkPerGoroutineArena measures Option B: each goroutine gets its own arena from sync.Pool. +// Zero lock contention on allocations. +func BenchmarkPerGoroutineArena(b *testing.B) { + pool := sync.Pool{ + New: func() any { + return arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + }, + } + + for _, goroutines := range []int{1, 4, 8, 16} { + b.Run(goroutineName(goroutines), func(b *testing.B) { + b.ResetTimer() + for b.Loop() { + arenas := make([]arena.Arena, goroutines) + var wg sync.WaitGroup + for i := range goroutines { + ga := pool.Get().(arena.Arena) + arenas[i] = ga + wg.Go(func() { + cacheLoadAllocs(ga) + }) + } + wg.Wait() + for _, ga := range arenas { + ga.Reset() + pool.Put(ga) + } + } + }) + } +} + +func goroutineName(n int) string { + return "goroutines=" + stringFromInt(n) +} + +func stringFromInt(n int) string { + return strconv.Itoa(n) +} diff --git a/v2/pkg/engine/resolve/arena_thread_safety_gc_test.go b/v2/pkg/engine/resolve/arena_thread_safety_gc_test.go new file mode 100644 index 0000000000..e6c01772f7 --- /dev/null +++ b/v2/pkg/engine/resolve/arena_thread_safety_gc_test.go @@ -0,0 +1,178 @@ +package resolve + +import ( + "runtime" + "runtime/debug" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" +) + +// TestCrossArenaMergeValuesCreatesShallowReferences proves that MergeValues +// links *Value pointers from the source arena into the target arena's tree +// without deep-copying. Resetting the source arena makes the merged values stale. +// +// This is the foundational invariant for AC-THREAD-04: goroutine arenas that +// hold FromCache values must NOT be released before the response is fully rendered. +func TestCrossArenaMergeValuesCreatesShallowReferences(t *testing.T) { + old := debug.SetGCPercent(1) + defer debug.SetGCPercent(old) + + mainArena := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + goroutineArena := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + + // Parse entity data on the "goroutine" arena (simulates populateFromCache) + fromCache, err := astjson.ParseBytesWithArena(goroutineArena, []byte(`{"id":"prod-1","name":"Widget"}`)) + require.NoError(t, err) + + // Parse the target item on the main arena (simulates the response tree) + item, err := astjson.ParseBytesWithArena(mainArena, []byte(`{"id":"prod-1"}`)) + require.NoError(t, err) + + // Merge: this splices FromCache nodes into item's object tree + merged, err := astjson.MergeValues(mainArena, item, fromCache) + require.NoError(t, err) + + // Verify merged result contains data from both arenas + mergedJSON := string(merged.MarshalTo(nil)) + assert.Equal(t, `{"id":"prod-1","name":"Widget"}`, mergedJSON) + + // Force GC to stress-test pointer validity — goroutine arena is still alive + runtime.GC() + runtime.GC() + + // Values should still be valid since goroutine arena hasn't been reset + postGCJSON := string(merged.MarshalTo(nil)) + assert.Equal(t, mergedJSON, postGCJSON, + "merged values should survive GC when goroutine arena is still alive") + + // Now reset the goroutine arena — simulates premature release + goroutineArena.Reset() + + // Overwrite the freed memory with different data + _, _ = astjson.ParseBytesWithArena(goroutineArena, []byte(`{"id":"STALE","name":"CORRUPTED"}`)) + + // The merged tree still holds pointers into the (now overwritten) goroutine arena. + // This proves MergeValues is shallow — accessing the stale data may panic or + // return corrupted values. + staleOrPanicked := func() (result string, panicked bool) { + defer func() { + if r := recover(); r != nil { + panicked = true + } + }() + return string(merged.MarshalTo(nil)), false + } + staleJSON, panicked := staleOrPanicked() + assert.True(t, panicked || staleJSON != mergedJSON, + "merged values should be stale or inaccessible after goroutine arena reset — "+ + "this proves MergeValues creates cross-arena shallow references") + + runtime.KeepAlive(mainArena) + runtime.KeepAlive(goroutineArena) +} + +// TestGoroutineArenaLifetimeWithDeferredRelease verifies the correct pattern: +// goroutine arenas survive through the full resolve lifecycle and are only +// released in Free(). This matches the Loader.goroutineArenas design. +func TestGoroutineArenaLifetimeWithDeferredRelease(t *testing.T) { + old := debug.SetGCPercent(1) + defer debug.SetGCPercent(old) + + mainArena := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + + // Simulate multiple goroutines, each with their own arena + const numGoroutines = 4 + goroutineArenas := make([]arena.Arena, numGoroutines) + fromCacheValues := make([]*astjson.Value, numGoroutines) + + for i := range numGoroutines { + goroutineArenas[i] = arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + var err error + fromCacheValues[i], err = astjson.ParseBytesWithArena( + goroutineArenas[i], + []byte(`{"id":"prod-`+stringFromInt(i+1)+`","name":"Product `+stringFromInt(i+1)+`"}`), + ) + require.NoError(t, err) + } + + // Phase 4: merge all FromCache values into main arena tree + items := make([]*astjson.Value, numGoroutines) + for i := range numGoroutines { + items[i], _ = astjson.ParseBytesWithArena(mainArena, []byte(`{"id":"prod-`+stringFromInt(i+1)+`"}`)) + merged, err := astjson.MergeValues(mainArena, items[i], fromCacheValues[i]) + require.NoError(t, err) + items[i] = merged + } + + // GC pressure — all arenas still alive + runtime.GC() + runtime.GC() + + // Verify all merged values are still valid (simulates response rendering) + for i := range numGoroutines { + json := string(items[i].MarshalTo(nil)) + expected := `{"id":"prod-` + stringFromInt(i+1) + `","name":"Product ` + stringFromInt(i+1) + `"}` + assert.Equal(t, expected, json, + "merged value %d should be readable with goroutine arenas alive", i) + } + + // Now release goroutine arenas (simulates Loader.Free()) + for _, a := range goroutineArenas { + a.Reset() + } + + runtime.KeepAlive(mainArena) + runtime.KeepAlive(goroutineArenas) +} + +// Benchmark_CrossArenaGCSafety exercises the goroutine arena pattern under GC +// pressure. Each iteration creates goroutine arenas, merges values, renders the +// result, then releases. runtime.GC() between iterations maximizes pressure on +// any dangling pointers. +func Benchmark_CrossArenaGCSafety(b *testing.B) { + old := debug.SetGCPercent(1) + defer debug.SetGCPercent(old) + + entityJSON := []byte(`{"__typename":"Product","id":"prod-1","name":"Widget","price":9.99}`) + itemJSON := []byte(`{"__typename":"Product","id":"prod-1"}`) + + b.ResetTimer() + for b.Loop() { + mainArena := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + goroutineArena := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + + // Simulate goroutine: parse cached entity + fromCache, err := astjson.ParseBytesWithArena(goroutineArena, entityJSON) + if err != nil { + b.Fatal(err) + } + + // Simulate Phase 4: merge into response tree + item, err := astjson.ParseBytesWithArena(mainArena, itemJSON) + if err != nil { + b.Fatal(err) + } + merged, err := astjson.MergeValues(mainArena, item, fromCache) + if err != nil { + b.Fatal(err) + } + + // Simulate response rendering + buf := merged.MarshalTo(nil) + if len(buf) == 0 { + b.Fatal("empty output") + } + + // Release (correct order: goroutine arena after rendering) + goroutineArena.Reset() + mainArena.Reset() + + // GC pressure between iterations + runtime.GC() + } +} diff --git a/v2/pkg/engine/resolve/batch_entity_cache_test.go b/v2/pkg/engine/resolve/batch_entity_cache_test.go new file mode 100644 index 0000000000..79164536ea --- /dev/null +++ b/v2/pkg/engine/resolve/batch_entity_cache_test.go @@ -0,0 +1,858 @@ +package resolve + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/fastjsonext" +) + +// Helpers to build batch entity cache test fixtures. +// These mirror the integration test scenario: products(upcs: ["top-1","top-2","top-3"]) +// with EntityKeyMappings using ArgumentIsEntityKey=true. + +func newBatchProductsCacheKeyTemplate() *RootQueryCacheKeyTemplate { + return NewRootQueryCacheKeyTemplate( + []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "products"}, + Args: []FieldArgument{ + { + Name: "upcs", + Variable: &ContextVariable{ + Path: []string{"upcs"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + }, + }, + }, + []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "upc", ArgumentPath: []string{"upcs"}, ArgumentIsEntityKey: true}, + }, + }, + }, + ) +} + +func newBatchProductsProvidesData() *Object { + return &Object{ + Fields: []*Field{ + {Name: []byte("upc"), Value: &Scalar{Path: []string{"upc"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + {Name: []byte("price"), Value: &Scalar{Path: []string{"price"}, Nullable: false}}, + }, + } +} + +func newBatchProductsResponse(rootDS DataSource, cacheKeyTemplate CacheKeyTemplate, providesData *Object) *GraphQLResponse { + var rootProvidesData *Object + if providesData != nil { + rootProvidesData = &Object{ + Fields: []*Field{ + { + Name: []byte("products"), + Value: &Array{ + Item: &Object{ + Fields: providesData.Fields, + }, + }, + }, + }, + } + } + + return &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + // No MergePath for root field fetches - data is merged at root level + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: cacheKeyTemplate, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://products","body":{"query":"{ products(upcs: $upcs) { upc name price } }"}}`), SegmentType: StaticSegmentType}, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + RootFields: []GraphCoordinate{{TypeName: "Query", FieldName: "products"}}, + OperationType: ast.OperationTypeQuery, + ProvidesData: rootProvidesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("products"), + Value: &Array{ + Path: []string{"products"}, + Item: &Object{ + Fields: []*Field{ + {Name: []byte("upc"), Value: &String{Path: []string{"upc"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + {Name: []byte("price"), Value: &Scalar{Path: []string{"price"}}}, + }, + }, + }, + }, + }, + }, + } +} + +// TestBatchEntityCache_AllMissThenAllHit mirrors the integration test +// TestBatchEntityCacheLookup_FullFetch_AllMiss + TestBatchEntityCacheLookup_FullFetch_AllHit. +// Verifies the complete batch entity cache lifecycle at the resolve layer: +// 1. First request: all L2 misses → subgraph fetch → entities written to L2 individually +// 2. Second request: all L2 hits → no subgraph call → entities served from cache +func TestBatchEntityCache_AllMissThenAllHit(t *testing.T) { + ctrl := gomock.NewController(t) + + cache := NewFakeLoaderCache() + + // First request: subgraph returns 3 products + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"products":[{"upc":"top-1","name":"Trilby","price":11},{"upc":"top-2","name":"Fedora","price":22},{"upc":"top-3","name":"Boater","price":33}]}}`), nil + }).Times(1) // Only called once across both requests + + response := newBatchProductsResponse( + rootDS, + newBatchProductsCacheKeyTemplate(), + newBatchProductsProvidesData(), + ) + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(context.Background()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"upcs":["top-1","top-2","top-3"]}`)) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = false + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + + // Request 1: cold cache → fetch from subgraph, write entities to L2 + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out1 := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"products":[{"upc":"top-1","name":"Trilby","price":11},{"upc":"top-2","name":"Fedora","price":22},{"upc":"top-3","name":"Boater","price":33}]}}`, out1) + + // Cache log: 1 batch get (3 misses) + 1 batch set (3 entries) + log := cache.GetLog() + require.Equal(t, 2, len(log)) + assert.Equal(t, "get", log[0].Operation) + assert.Equal(t, []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-3"}}`, Hit: false}, + }, log[0].Items) + assert.Equal(t, "set", log[1].Operation) + assert.Equal(t, []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"upc":"top-3"}}`, TTL: 30 * time.Second}, + }, log[1].Items) + cache.ClearLog() + + // Verify each entity was stored individually + assert.Equal(t, `{"upc":"top-1","name":"Trilby","price":11}`, string(cache.GetValue(`{"__typename":"Product","key":{"upc":"top-1"}}`))) + assert.Equal(t, `{"upc":"top-2","name":"Fedora","price":22}`, string(cache.GetValue(`{"__typename":"Product","key":{"upc":"top-2"}}`))) + assert.Equal(t, `{"upc":"top-3","name":"Boater","price":33}`, string(cache.GetValue(`{"__typename":"Product","key":{"upc":"top-3"}}`))) + + // Request 2: warm cache → all hits, no subgraph call + ar2 := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + resolvable2 := NewResolvable(ar2, ResolvableOptions{}) + ctx2 := NewContext(context.Background()) + ctx2.Variables = astjson.MustParseBytes([]byte(`{"upcs":["top-1","top-2","top-3"]}`)) + ctx2.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx2.ExecutionOptions.Caching.EnableL1Cache = false + ctx2.ExecutionOptions.Caching.EnableL2Cache = true + + err = resolvable2.Init(ctx2, nil, ast.OperationTypeQuery) + require.NoError(t, err) + loader2 := &Loader{caches: map[string]LoaderCache{"default": cache}} + err = loader2.LoadGraphQLResponseData(ctx2, response, resolvable2) + require.NoError(t, err) + + out2 := fastjsonext.PrintGraphQLResponse(resolvable2.data, resolvable2.errors) + assert.Equal(t, out1, out2) + + // Cache log: 1 batch get (3 hits), no set + log2 := cache.GetLog() + require.Equal(t, 1, len(log2)) + assert.Equal(t, "get", log2[0].Operation) + assert.Equal(t, []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-3"}}`, Hit: true}, + }, log2[0].Items) +} + +// TestBatchEntityCache_PartialHitFetchesMissing mirrors +// TestBatchEntityCacheLookup_PartialFetch_SomeCached. +// Verifies that when partial batch loading is enabled, only missing entities +// are fetched from the subgraph while cached entities are served from L2. +func TestBatchEntityCache_PartialHitFetchesMissing(t *testing.T) { + ctrl := gomock.NewController(t) + + cache := NewFakeLoaderCache() + + // Seed cache with 2 of 3 products + err := cache.Set(context.Background(), withCacheEntryTTL([]*CacheEntry{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Value: []byte(`{"upc":"top-1","name":"Trilby","price":11}`)}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Value: []byte(`{"upc":"top-2","name":"Fedora","price":22}`)}, + }, 30*time.Second)) + require.NoError(t, err) + cache.ClearLog() + + // Subgraph should only be called for the missing product (top-3) + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"products":[{"upc":"top-3","name":"Boater","price":33}]}}`), nil + }).Times(1) + + tmpl := newBatchProductsCacheKeyTemplate() + provides := newBatchProductsProvidesData() + + var rootProvidesData *Object + if provides != nil { + rootProvidesData = &Object{ + Fields: []*Field{ + { + Name: []byte("products"), + Value: &Array{ + Item: &Object{ + Fields: provides.Fields, + }, + }, + }, + }, + } + } + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + // No MergePath for root field fetches - data is merged at root level + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: tmpl, + EnablePartialCacheLoad: true, + PartialBatchLoad: true, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://products","body":{"query":"{ products(upcs: $upcs) { upc name price } }"}}`), SegmentType: StaticSegmentType}, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + RootFields: []GraphCoordinate{{TypeName: "Query", FieldName: "products"}}, + OperationType: ast.OperationTypeQuery, + ProvidesData: rootProvidesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("products"), + Value: &Array{ + Path: []string{"products"}, + Item: &Object{ + Fields: []*Field{ + {Name: []byte("upc"), Value: &String{Path: []string{"upc"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + {Name: []byte("price"), Value: &Scalar{Path: []string{"price"}}}, + }, + }, + }, + }, + }, + }, + } + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(context.Background()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"upcs":["top-1","top-2","top-3"]}`)) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = false + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err = resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"products":[{"upc":"top-1","name":"Trilby","price":11},{"upc":"top-2","name":"Fedora","price":22},{"upc":"top-3","name":"Boater","price":33}]}}`, out) + + // Cache log: 1 get (2 hits, 1 miss) + 1 set (missing entity written) + log := cache.GetLog() + require.Equal(t, 2, len(log)) + assert.Equal(t, "get", log[0].Operation) + assert.Equal(t, []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-3"}}`, Hit: false}, + }, log[0].Items) + assert.Equal(t, "set", log[1].Operation) + assert.Equal(t, []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-3"}}`, TTL: 30 * time.Second}, + }, log[1].Items) +} + +// TestMultiCandidateCacheValue_MergeCandidatesForWiderProjection exercises +// resolveMultiCandidateCacheValue's merge logic directly. +// Scenario: two EntityKeyMappings produce two cache entries for the same entity. +// Candidate A has {id, name}, candidate B has {id, email}. The request needs +// {id, name, email}. Neither candidate alone validates, but merging them does. +func TestMultiCandidateCacheValue_MergeCandidatesForWiderProjection(t *testing.T) { + cache := NewFakeLoaderCache() + + // Seed cache with two entries for same user via different key mappings + idKey := `{"__typename":"User","key":{"id":"u1"}}` + emailKey := `{"__typename":"User","key":{"email":"a@example.com"}}` + err := cache.Set(context.Background(), withCacheEntryTTL([]*CacheEntry{ + {Key: idKey, Value: []byte(`{"id":"u1","name":"Alice"}`), RemainingTTL: 20 * time.Second}, + {Key: emailKey, Value: []byte(`{"id":"u1","email":"a@example.com"}`), RemainingTTL: 10 * time.Second}, + }, 30*time.Second)) + require.NoError(t, err) + cache.ClearLog() + + ctrl := gomock.NewController(t) + // Subgraph should NOT be called — merged candidates satisfy the request + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) + + // ProvidesData requires all three fields: id, name, email + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + {Name: []byte("email"), Value: &Scalar{Path: []string{"email"}, Nullable: false}}, + }, + } + + response := newUserRootQueryResponse( + rootDS, + newUserRootQueryTemplate([]string{"id", "email"}, []string{"id", "email"}), + providesData, + ) + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(context.Background()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"id":"u1","email":"a@example.com"}`)) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err = resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + // Merged result should contain all three fields + assert.Equal(t, `{"data":{"user":{"id":"u1","name":"Alice","email":"a@example.com"}}}`, out) + + // Cache log: 1 get (both keys hit) + 1 set (writeback of merged value) + log := cache.GetLog() + require.GreaterOrEqual(t, len(log), 1) + assert.Equal(t, "get", log[0].Operation) + assert.Equal(t, []CacheLogItem{ + {Key: `{"__typename":"User","key":{"id":"u1"}}`, Hit: true}, + {Key: `{"__typename":"User","key":{"email":"a@example.com"}}`, Hit: true}, + }, log[0].Items) +} + +// TestBatchEntityCache_NegativeCacheHit exercises the negative cache path in +// applyRootFetchL2Results (loader_cache.go ~line 1170-1194). +// When the L2 cache holds a null sentinel for an entity and NegativeCacheTTL > 0, +// the entity is served as null from the negative cache without calling the subgraph. +func TestBatchEntityCache_NegativeCacheHit(t *testing.T) { + ctrl := gomock.NewController(t) + + cache := NewFakeLoaderCache() + + // Seed cache: top-1 → real data, top-2 → null sentinel, top-3 → real data + err := cache.Set(context.Background(), withCacheEntryTTL([]*CacheEntry{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Value: []byte(`{"upc":"top-1","name":"Trilby","price":11}`)}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Value: []byte(`null`)}, + {Key: `{"__typename":"Product","key":{"upc":"top-3"}}`, Value: []byte(`{"upc":"top-3","name":"Boater","price":33}`)}, + }, 30*time.Second)) + require.NoError(t, err) + cache.ClearLog() + + // Subgraph should NOT be called — all entities are cache hits (including negative) + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) + + tmpl := newBatchProductsCacheKeyTemplate() + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + NegativeCacheTTL: 10 * time.Second, + CacheKeyTemplate: tmpl, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://products","body":{"query":"{ products(upcs: $upcs) { upc name price } }"}}`), SegmentType: StaticSegmentType}, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + RootFields: []GraphCoordinate{{TypeName: "Query", FieldName: "products"}}, + OperationType: ast.OperationTypeQuery, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("products"), + Value: &Array{ + Path: []string{"products"}, + Item: &Object{ + Fields: []*Field{ + {Name: []byte("upc"), Value: &String{Path: []string{"upc"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + {Name: []byte("price"), Value: &Scalar{Path: []string{"price"}}}, + }, + }, + }, + }, + }, + }, + } + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(context.Background()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"upcs":["top-1","top-2","top-3"]}`)) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = false + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err = resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + // top-1 and top-3 have real data; top-2 is null from negative cache + assert.Equal(t, `{"data":{"products":[{"upc":"top-1","name":"Trilby","price":11},null,{"upc":"top-3","name":"Boater","price":33}]}}`, out) + + // Cache log: 1 batch get (3 hits including negative), no set (nothing new to write) + log := cache.GetLog() + require.Equal(t, 1, len(log)) + assert.Equal(t, "get", log[0].Operation) + assert.Equal(t, []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: true}, + {Key: `{"__typename":"Product","key":{"upc":"top-3"}}`, Hit: true}, + }, log[0].Items) // All 3 are cache hits (including null sentinel) +} + +// TestBatchEntityCache_AnalyticsTracking exercises the analytics event recording +// in applyRootFetchL2Results (loader_cache.go ~lines 1150-1156 for misses, +// 1232-1242 for hits). Verifies that CacheKeyHit and CacheKeyMiss events are +// correctly recorded when analytics is enabled. +func TestBatchEntityCache_AnalyticsTracking(t *testing.T) { + ctrl := gomock.NewController(t) + + cache := NewFakeLoaderCache() + + // Seed cache with 2 of 3 products (top-1 and top-3 cached, top-2 missing) + err := cache.Set(context.Background(), withCacheEntryTTL([]*CacheEntry{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Value: []byte(`{"upc":"top-1","name":"Trilby","price":11}`)}, + {Key: `{"__typename":"Product","key":{"upc":"top-3"}}`, Value: []byte(`{"upc":"top-3","name":"Boater","price":33}`)}, + }, 30*time.Second)) + require.NoError(t, err) + cache.ClearLog() + + // Subgraph called once for the missing product (top-2) + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"products":[{"upc":"top-1","name":"Trilby","price":11},{"upc":"top-2","name":"Fedora","price":22},{"upc":"top-3","name":"Boater","price":33}]}}`), nil + }).Times(1) + + tmpl := newBatchProductsCacheKeyTemplate() + provides := newBatchProductsProvidesData() + + response := newBatchProductsResponse(rootDS, tmpl, provides) + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(context.Background()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"upcs":["top-1","top-2","top-3"]}`)) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = false + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err = resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"products":[{"upc":"top-1","name":"Trilby","price":11},{"upc":"top-2","name":"Fedora","price":22},{"upc":"top-3","name":"Boater","price":33}]}}`, out) + + // Verify analytics: 2 L2 hits (top-1, top-3) + 1 L2 miss (top-2) + stats := ctx.GetCacheStats() + require.Equal(t, 3, len(stats.L2Reads)) + assert.Equal(t, CacheKeyEvent{ + CacheKey: `{"__typename":"Product","key":{"upc":"top-1"}}`, + EntityType: "Query", // Root field fetch uses the root type name + Kind: CacheKeyHit, // top-1 was seeded in L2 cache + DataSource: "products", + ByteSize: len(`{"upc":"top-1","name":"Trilby","price":11}`), + CacheAgeMs: stats.L2Reads[0].CacheAgeMs, // dynamic, just preserve actual + }, stats.L2Reads[0]) + assert.Equal(t, CacheKeyEvent{ + CacheKey: `{"__typename":"Product","key":{"upc":"top-2"}}`, + EntityType: "Query", // Root field fetch uses the root type name + Kind: CacheKeyMiss, // top-2 was not in L2 cache + DataSource: "products", + ByteSize: 0, + }, stats.L2Reads[1]) + assert.Equal(t, CacheKeyEvent{ + CacheKey: `{"__typename":"Product","key":{"upc":"top-3"}}`, + EntityType: "Query", // Root field fetch uses the root type name + Kind: CacheKeyHit, // top-3 was seeded in L2 cache + DataSource: "products", + ByteSize: len(`{"upc":"top-3","name":"Boater","price":33}`), + CacheAgeMs: stats.L2Reads[2].CacheAgeMs, // dynamic, just preserve actual + }, stats.L2Reads[2]) +} + +// TestUpdateL2Cache_MutationSkipsWithoutFlag exercises the early return in +// updateL2Cache (loader_cache.go ~lines 1479-1482). +// When the operation is a mutation and enableMutationL2CachePopulation is false, +// updateL2Cache must return immediately without writing to the L2 cache. +func TestUpdateL2Cache_MutationSkipsWithoutFlag(t *testing.T) { + ctrl := gomock.NewController(t) + + cache := NewFakeLoaderCache() + + // Subgraph returns a product (mutation result) + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"createProduct":{"upc":"new-1","name":"NewHat","price":99}}}`), nil + }).Times(1) + + tmpl := NewRootQueryCacheKeyTemplate( + []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Mutation", FieldName: "createProduct"}, + Args: []FieldArgument{ + { + Name: "upc", + Variable: &ContextVariable{ + Path: []string{"upc"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + }, + }, + }, + nil, + ) + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeMutation}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: tmpl, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://products","body":{"query":"mutation { createProduct(upc: $upc) { upc name price } }"}}`), SegmentType: StaticSegmentType}, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + RootFields: []GraphCoordinate{{TypeName: "Mutation", FieldName: "createProduct"}}, + OperationType: ast.OperationTypeMutation, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "mutation"), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("createProduct"), + Value: &Object{ + Path: []string{"createProduct"}, + Fields: []*Field{ + {Name: []byte("upc"), Value: &String{Path: []string{"upc"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + {Name: []byte("price"), Value: &Scalar{Path: []string{"price"}}}, + }, + }, + }, + }, + }, + } + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(context.Background()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"upc":"new-1"}`)) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = false + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeMutation) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"createProduct":{"upc":"new-1","name":"NewHat","price":99}}}`, out) + + // Cache log: no set operations — mutation without enableMutationL2CachePopulation + // skips L2 cache writes entirely + log := cache.GetLog() + for _, entry := range log { + assert.NotEqual(t, "set", entry.Operation, "mutation without enableMutationL2CachePopulation should not write to L2 cache") + } + + // Verify cache is empty — nothing was stored + assert.Nil(t, cache.GetValue(`{"__typename":"Mutation","field":"createProduct","args":{"upc":"new-1"}}`)) +} + +// TestBatchEntityCache_TracingEnabled exercises the tracing code paths in +// applyRootFetchL2Results and updateL2Cache that record cache trace data +// (L2 miss/hit counts, duration, keys) when TracingOptions.Enable is true. +func TestBatchEntityCache_TracingEnabled(t *testing.T) { + ctrl := gomock.NewController(t) + + cache := NewFakeLoaderCache() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"products":[{"upc":"top-1","name":"Trilby","price":11},{"upc":"top-2","name":"Fedora","price":22}]}}`), nil + }).Times(1) + + response := newBatchProductsResponse( + rootDS, + newBatchProductsCacheKeyTemplate(), + newBatchProductsProvidesData(), + ) + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(context.Background()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"upcs":["top-1","top-2"]}`)) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = false + ctx.ExecutionOptions.Caching.EnableL2Cache = true + // Enable tracing to exercise tracing branches in applyRootFetchL2Results + updateL2Cache + ctx.TracingOptions = TraceOptions{ + Enable: true, + EnablePredictableDebugTimings: true, + } + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"products":[{"upc":"top-1","name":"Trilby","price":11},{"upc":"top-2","name":"Fedora","price":22}]}}`, out) + + // Cache log: 1 get (2 misses) + 1 set (2 entries) + log := cache.GetLog() + require.Equal(t, 2, len(log)) + assert.Equal(t, "get", log[0].Operation) + assert.Equal(t, []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + {Key: `{"__typename":"Product","key":{"upc":"top-2"}}`, Hit: false}, + }, log[0].Items) + assert.Equal(t, "set", log[1].Operation) +} + +// TestBatchEntityCache_L2DisabledSkipsCache exercises the L2 disabled early return +// in tryCacheLoad. When EnableL2Cache is false, no cache operations should occur. +func TestBatchEntityCache_L2DisabledSkipsCache(t *testing.T) { + ctrl := gomock.NewController(t) + + cache := NewFakeLoaderCache() + // Seed cache - but it should never be read since L2 is disabled + err := cache.Set(context.Background(), withCacheEntryTTL([]*CacheEntry{ + {Key: `{"__typename":"Product","key":{"upc":"top-1"}}`, Value: []byte(`{"upc":"top-1","name":"Trilby","price":11}`)}, + }, 30*time.Second)) + require.NoError(t, err) + cache.ClearLog() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"products":[{"upc":"top-1","name":"Trilby","price":11}]}}`), nil + }).Times(1) + + response := newBatchProductsResponse( + rootDS, + newBatchProductsCacheKeyTemplate(), + newBatchProductsProvidesData(), + ) + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(context.Background()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"upcs":["top-1"]}`)) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = false + ctx.ExecutionOptions.Caching.EnableL2Cache = false // L2 disabled + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err = resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"products":[{"upc":"top-1","name":"Trilby","price":11}]}}`, out) + + // No cache operations should have occurred + assert.Equal(t, 0, len(cache.GetLog())) +} + +// TestBatchEntityCache_KeyInterceptorApplied exercises the L2CacheKeyInterceptor +// path. When an interceptor is set, it transforms the cache keys before L2 read/write. +func TestBatchEntityCache_KeyInterceptorApplied(t *testing.T) { + ctrl := gomock.NewController(t) + + cache := NewFakeLoaderCache() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"products":[{"upc":"top-1","name":"Trilby","price":11}]}}`), nil + }).Times(1) + + response := newBatchProductsResponse( + rootDS, + newBatchProductsCacheKeyTemplate(), + newBatchProductsProvidesData(), + ) + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(context.Background()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"upcs":["top-1"]}`)) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = false + ctx.ExecutionOptions.Caching.EnableL2Cache = true + // Interceptor prepends "tenant42:" to every cache key + ctx.ExecutionOptions.Caching.L2CacheKeyInterceptor = func(ctx context.Context, key string, info L2CacheKeyInterceptorInfo) string { + return "tenant42:" + key + } + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"products":[{"upc":"top-1","name":"Trilby","price":11}]}}`, out) + + // Cache key should have been transformed by the interceptor + log := cache.GetLog() + require.GreaterOrEqual(t, len(log), 1) + // The get operation should use the intercepted key + assert.Equal(t, "get", log[0].Operation) + assert.Equal(t, []CacheLogItem{ + {Key: `tenant42:{"__typename":"Product","key":{"upc":"top-1"}}`, Hit: false}, + }, log[0].Items) +} diff --git a/v2/pkg/engine/resolve/cache_analytics.go b/v2/pkg/engine/resolve/cache_analytics.go new file mode 100644 index 0000000000..6599ae3446 --- /dev/null +++ b/v2/pkg/engine/resolve/cache_analytics.go @@ -0,0 +1,1086 @@ +package resolve + +import ( + "slices" + "strings" + "sync" + "time" + "unicode/utf8" + + "github.com/cespare/xxhash/v2" + + "github.com/wundergraph/astjson" +) + +// CacheLevel indicates whether a cache operation targets L1 or L2. +type CacheLevel uint8 + +const ( + CacheLevelL1 CacheLevel = iota + 1 + CacheLevelL2 +) + +// CacheKeyEventKind classifies the result of a cache key lookup. +type CacheKeyEventKind uint8 + +const ( + CacheKeyHit CacheKeyEventKind = iota + 1 + CacheKeyMiss // Key not found or value nil + CacheKeyPartialHit // Key found but missing required fields +) + +// FieldSource indicates where the data for an entity came from. +type FieldSource uint8 + +const ( + FieldSourceSubgraph FieldSource = iota // Default: data came from subgraph fetch + FieldSourceL1 // Data came from L1 (per-request) cache + FieldSourceL2 // Data came from L2 (external) cache + FieldSourceShadowCached // Cached value saved during shadow comparison +) + +// CacheOperationSource identifies what triggered a cache operation. +type CacheOperationSource string + +const ( + CacheSourceQuery CacheOperationSource = "query" + CacheSourceMutation CacheOperationSource = "mutation" + CacheSourceSubscription CacheOperationSource = "subscription" +) + +// CacheKeyEvent records a single cache key lookup result. +type CacheKeyEvent struct { + CacheKey string + EntityType string + Kind CacheKeyEventKind + DataSource string + ByteSize int + CacheAgeMs int64 // age of cached entry in ms (L2 hits only, 0 = unknown) + Shadow bool // true if this event occurred in shadow mode +} + +// CacheWriteEvent records a single cache write operation. +type CacheWriteEvent struct { + CacheKey string + EntityType string + ByteSize int + DataSource string + CacheLevel CacheLevel + TTL time.Duration + Shadow bool // true if this write occurred in shadow mode + Source CacheOperationSource // what triggered this write (query/mutation/subscription) + WriteReason CacheWriteReason // why this write occurred (refresh/backfill/derived, empty for non-EntityKeyMappings) +} + +// FetchTimingEvent records the duration of a subgraph fetch or cache lookup. +type FetchTimingEvent struct { + DataSource string // subgraph name + EntityType string // entity type (empty for root fetches) + DurationMs int64 // time spent on this operation in milliseconds + Source FieldSource // what handled this: Subgraph (fetch), L2 (cache GET) + ItemCount int // number of entities in this fetch/lookup + IsEntityFetch bool // true for _entities, false for root field + HTTPStatusCode int // HTTP status code from subgraph response (0 for cache hits) + ResponseBytes int // response body size in bytes (0 for cache hits) + TTFBMs int64 // time to first byte in milliseconds (0 when unavailable) +} + +// SubgraphErrorEvent records a subgraph error for analytics. +type SubgraphErrorEvent struct { + DataSource string // subgraph name + EntityType string // entity type (empty for root fetches) + Message string // error message (truncated for safety) + Code string // error code from errors[0].extensions.code (empty if not present) +} + +// EntityFieldHash stores an xxhash of a scalar field value on an entity type, +// along with the entity's key data and the source of the data. +type EntityFieldHash struct { + EntityType string + FieldName string + FieldHash uint64 // xxhash of the non-key field value + KeyRaw string // raw key JSON e.g. {"id":"1234"} (when HashKeys=false) + KeyHash uint64 // xxhash of key JSON (when HashKeys=true) + Source FieldSource // where the entity data came from (L1/L2/Subgraph) +} + +// EntityTypeInfo holds the entity type name and its instance count. +type EntityTypeInfo struct { + TypeName string + Count int + UniqueKeys int // number of distinct entity keys +} + +// entityCount is an internal type for accumulating entity counts. +type entityCount struct { + typeName string + count int + uniqueKeys map[string]struct{} // set of seen entity key JSONs +} + +// entitySourceRecord records where each entity's data came from. +type entitySourceRecord struct { + entityType string + keyJSON string + source FieldSource +} + +// ShadowComparisonEvent records a comparison between cached and fresh data in shadow mode. +type ShadowComparisonEvent struct { + CacheKey string // cache key for correlation + EntityType string // entity type name + IsFresh bool // true if ProvidesData fields match between cached and fresh + CachedHash uint64 // xxhash of extracted ProvidesData fields from cached value + FreshHash uint64 // xxhash of extracted ProvidesData fields from fresh value + CachedBytes int // byte size of cached ProvidesData fields + FreshBytes int // byte size of fresh ProvidesData fields + DataSource string // which subgraph provided the data (e.g. "accounts") + CacheAgeMs int64 // how old the cached entry was in milliseconds (0 = unknown) + ConfiguredTTL time.Duration // TTL configured for this entity type +} + +// MutationEvent records that a mutation returned a cacheable entity. +// Recorded during mutation execution by proactively comparing the mutation response +// with the L2 cached value for the same entity. +type MutationEvent struct { + MutationRootField string // e.g., "updateUsername" + EntityType string // e.g., "User" + EntityCacheKey string // display key e.g. {"__typename":"User","key":{"id":"1234"}} + HadCachedValue bool // true if L2 had a cached value for this entity + IsStale bool // true if cached value differs from mutation response (always false when HadCachedValue=false) + CachedHash uint64 // xxhash of cached ProvidesData fields (0 when HadCachedValue=false) + FreshHash uint64 // xxhash of mutation response ProvidesData fields + CachedBytes int // 0 when HadCachedValue=false + FreshBytes int + Source CacheOperationSource // what triggered this event (query/mutation/subscription) +} + +// CacheOperationError records a cache operation (Get/Set/Delete) that returned an error. +// Cache errors are non-fatal (the engine falls back to subgraph fetch), but tracking them +// in analytics allows operators to detect cache infrastructure issues. +type CacheOperationError struct { + Operation string // "get", "set", "set_negative", or "delete" + CacheName string // named cache instance + EntityType string // entity type (empty for root fetches) + DataSource string // subgraph name + Message string // error message (truncated for safety) + ItemCount int // number of keys involved in the failed operation +} + +// HeaderImpactEvent records a fresh fetch that wrote to L2 cache with header-prefixed keys. +// A cross-request consumer can aggregate these events: when the same BaseKey appears with +// different HeaderHash values but identical ResponseHash values, the forwarded headers +// do not affect the subgraph response, and IncludeSubgraphHeaderPrefix can be disabled. +type HeaderImpactEvent struct { + BaseKey string // cache key WITHOUT header prefix (stable identity for grouping) + HeaderHash uint64 // hash of forwarded headers for this subgraph + ResponseHash uint64 // xxhash of the response value bytes written to L2 + EntityType string // entity type (e.g., "User") or "Query" for root fields + DataSource string // subgraph name +} + +// CacheAnalyticsCollector accumulates cache analytics events during request execution. +// All methods are designed to be called from a single goroutine (main thread) except +// where noted. L2 events from goroutines are accumulated on per-result slices and +// merged on the main thread via MergeL2Events. +type CacheAnalyticsCollector struct { + l1KeyEvents []CacheKeyEvent + l2KeyEvents []CacheKeyEvent + writeEvents []CacheWriteEvent + fieldHashes []EntityFieldHash // flat slice (was: nested maps) + entityCounts []entityCount // simple type→count (was: map) + entitySources []entitySourceRecord // records where each entity's data came from + fetchTimings []FetchTimingEvent // main thread timings + errorEvents []SubgraphErrorEvent // main thread errors + shadowComparisons []ShadowComparisonEvent // shadow mode staleness comparison events + mutationEvents []MutationEvent // mutation entity impact events + headerImpactEvents []HeaderImpactEvent // header impact events for L2 writes with header prefix + cacheOpErrors []CacheOperationError // cache operation errors (main thread) + xxh *xxhash.Digest +} + +// NewCacheAnalyticsCollector creates a new collector with pre-allocated slices. +func NewCacheAnalyticsCollector() *CacheAnalyticsCollector { + return &CacheAnalyticsCollector{ + l1KeyEvents: make([]CacheKeyEvent, 0, 16), + l2KeyEvents: make([]CacheKeyEvent, 0, 16), + writeEvents: make([]CacheWriteEvent, 0, 8), + fieldHashes: make([]EntityFieldHash, 0, 32), + entityCounts: make([]entityCount, 0, 4), + entitySources: make([]entitySourceRecord, 0, 16), + fetchTimings: make([]FetchTimingEvent, 0, 8), + errorEvents: make([]SubgraphErrorEvent, 0, 4), + xxh: xxhash.New(), + } +} + +// cacheAnalyticsPool recycles collectors across requests. The allocator profile +// on the cache demo showed ~5 GB / 8% of all allocations originating from +// `NewCacheAnalyticsCollector` alone (fresh collector per request × 13K rps), +// driving GC pressure that shows up in the p99 tail. Pooling retains the +// pre-allocated slice capacities across requests; `ResetForReuse` truncates +// them without releasing the backing arrays. +var cacheAnalyticsPool = sync.Pool{ + New: func() any { return NewCacheAnalyticsCollector() }, +} + +// AcquireCacheAnalyticsCollector returns a collector ready for reuse. The +// returned collector must be released via ReleaseCacheAnalyticsCollector once +// the caller is done (typically via Context.Free()). +func AcquireCacheAnalyticsCollector() *CacheAnalyticsCollector { + c := cacheAnalyticsPool.Get().(*CacheAnalyticsCollector) + c.ResetForReuse() + return c +} + +// ReleaseCacheAnalyticsCollector returns the collector to the pool. Safe to +// call with nil. +func ReleaseCacheAnalyticsCollector(c *CacheAnalyticsCollector) { + if c == nil { + return + } + cacheAnalyticsPool.Put(c) +} + +// ResetForReuse clears the collector's accumulated events while retaining the +// backing array capacities. Safe to call on a collector that was never used. +func (c *CacheAnalyticsCollector) ResetForReuse() { + c.l1KeyEvents = c.l1KeyEvents[:0] + c.l2KeyEvents = c.l2KeyEvents[:0] + c.writeEvents = c.writeEvents[:0] + c.fieldHashes = c.fieldHashes[:0] + c.entityCounts = c.entityCounts[:0] + c.entitySources = c.entitySources[:0] + c.fetchTimings = c.fetchTimings[:0] + c.errorEvents = c.errorEvents[:0] + c.shadowComparisons = c.shadowComparisons[:0] + c.mutationEvents = c.mutationEvents[:0] + c.headerImpactEvents = c.headerImpactEvents[:0] + c.cacheOpErrors = c.cacheOpErrors[:0] + if c.xxh != nil { + c.xxh.Reset() + } +} + +// RecordL1KeyEvent records an L1 cache key lookup event. Main thread only. +func (c *CacheAnalyticsCollector) RecordL1KeyEvent(kind CacheKeyEventKind, entityType, cacheKey, dataSource string, byteSize int) { + c.l1KeyEvents = append(c.l1KeyEvents, CacheKeyEvent{ + CacheKey: cacheKey, + EntityType: entityType, + Kind: kind, + DataSource: dataSource, + ByteSize: byteSize, + }) +} + +// RecordL2KeyEvent records an L2 cache key lookup event. Main thread only. +// It is exported for external consumers such as cosmo router; this repository +// has no production caller. If cosmo no longer uses it, internalize it in the next breaking window. +// Use MergeL2Events to merge events collected on per-result slices from goroutines. +func (c *CacheAnalyticsCollector) RecordL2KeyEvent(kind CacheKeyEventKind, entityType, cacheKey, dataSource string, byteSize int) { + c.l2KeyEvents = append(c.l2KeyEvents, CacheKeyEvent{ + CacheKey: cacheKey, + EntityType: entityType, + Kind: kind, + DataSource: dataSource, + ByteSize: byteSize, + }) +} + +// MergeL2Events merges L2 events collected on a per-result slice (from goroutines) +// into the collector. Must be called on the main thread. +func (c *CacheAnalyticsCollector) MergeL2Events(events []CacheKeyEvent) { + c.l2KeyEvents = append(c.l2KeyEvents, events...) +} + +// RecordWrite records a cache write event. Main thread only. +func (c *CacheAnalyticsCollector) RecordWrite(event CacheWriteEvent) { + c.writeEvents = append(c.writeEvents, event) +} + +// HashFieldValue computes an xxhash of the given field value bytes and records it +// as an EntityFieldHash with entity key and source information. +func (c *CacheAnalyticsCollector) HashFieldValue(entityType, fieldName string, valueBytes []byte, keyRaw string, keyHash uint64, source FieldSource) { + c.xxh.Reset() + _, _ = c.xxh.Write(valueBytes) + hash := c.xxh.Sum64() + + c.fieldHashes = append(c.fieldHashes, EntityFieldHash{ + EntityType: entityType, + FieldName: fieldName, + FieldHash: hash, + KeyRaw: keyRaw, + KeyHash: keyHash, + Source: source, + }) +} + +// IncrementEntityCount increments the instance count for the given entity type. +// If keyJSON is non-empty, it is tracked for unique key counting. +func (c *CacheAnalyticsCollector) IncrementEntityCount(typeName string, keyJSON string) { + for i := range c.entityCounts { + if c.entityCounts[i].typeName == typeName { + c.entityCounts[i].count++ + if keyJSON != "" { + if c.entityCounts[i].uniqueKeys == nil { + c.entityCounts[i].uniqueKeys = make(map[string]struct{}, 4) + } + c.entityCounts[i].uniqueKeys[keyJSON] = struct{}{} + } + return + } + } + var keys map[string]struct{} + if keyJSON != "" { + keys = map[string]struct{}{keyJSON: {}} + } + c.entityCounts = append(c.entityCounts, entityCount{typeName: typeName, count: 1, uniqueKeys: keys}) +} + +// RecordEntitySource records the source of data for a specific entity instance. +// Main thread only. +func (c *CacheAnalyticsCollector) RecordEntitySource(entityType, keyJSON string, source FieldSource) { + c.entitySources = append(c.entitySources, entitySourceRecord{ + entityType: entityType, + keyJSON: keyJSON, + source: source, + }) +} + +// MergeEntitySources merges entity source records collected in goroutines +// into the collector. Must be called on the main thread. +func (c *CacheAnalyticsCollector) MergeEntitySources(sources []entitySourceRecord) { + c.entitySources = append(c.entitySources, sources...) +} + +// RecordFetchTiming records a fetch timing event. Main thread only. +// It is exported for external consumers such as cosmo router; this repository +// has no production caller. If cosmo no longer uses it, internalize it in the next breaking window. +func (c *CacheAnalyticsCollector) RecordFetchTiming(event FetchTimingEvent) { + c.fetchTimings = append(c.fetchTimings, event) +} + +// MergeL2FetchTimings merges fetch timing events collected in goroutines into the collector. +// Must be called on the main thread. +func (c *CacheAnalyticsCollector) MergeL2FetchTimings(timings []FetchTimingEvent) { + c.fetchTimings = append(c.fetchTimings, timings...) +} + +// RecordError records a subgraph error event. Main thread only. +func (c *CacheAnalyticsCollector) RecordError(event SubgraphErrorEvent) { + c.errorEvents = append(c.errorEvents, event) +} + +// MergeL2Errors merges error events collected in goroutines into the collector. +// Must be called on the main thread. +func (c *CacheAnalyticsCollector) MergeL2Errors(events []SubgraphErrorEvent) { + c.errorEvents = append(c.errorEvents, events...) +} + +// RecordShadowComparison records a shadow mode comparison between cached and fresh data. +// Main thread only. +func (c *CacheAnalyticsCollector) RecordShadowComparison(event ShadowComparisonEvent) { + c.shadowComparisons = append(c.shadowComparisons, event) +} + +// RecordMutationEvent records a mutation entity impact event. Main thread only. +func (c *CacheAnalyticsCollector) RecordMutationEvent(event MutationEvent) { + c.mutationEvents = append(c.mutationEvents, event) +} + +// RecordHeaderImpactEvent records a header impact event. Main thread only. +func (c *CacheAnalyticsCollector) RecordHeaderImpactEvent(event HeaderImpactEvent) { + c.headerImpactEvents = append(c.headerImpactEvents, event) +} + +// RecordCacheOperationError records a cache operation error. Main thread only. +func (c *CacheAnalyticsCollector) RecordCacheOperationError(event CacheOperationError) { + c.cacheOpErrors = append(c.cacheOpErrors, event) +} + +// MergeL2CacheOpErrors merges cache operation errors collected in goroutines into the collector. +// Must be called on the main thread. +func (c *CacheAnalyticsCollector) MergeL2CacheOpErrors(events []CacheOperationError) { + c.cacheOpErrors = append(c.cacheOpErrors, events...) +} + +// EntitySource returns the source for a given entity instance. +// Returns FieldSourceSubgraph if no record is found (the default). +func (c *CacheAnalyticsCollector) EntitySource(entityType, keyJSON string) FieldSource { + for i := len(c.entitySources) - 1; i >= 0; i-- { + if c.entitySources[i].entityType == entityType && c.entitySources[i].keyJSON == keyJSON { + return c.entitySources[i].source + } + } + return FieldSourceSubgraph +} + +// Snapshot produces a read-only CacheAnalyticsSnapshot from the collected data. +// Duplicate events (same cache key appearing multiple times due to entity batch positions) +// are consolidated: consumers see one event per unique (CacheKey, Kind) for reads, +// one per CacheKey for writes, and one per CacheKey for shadow comparisons. +func (c *CacheAnalyticsCollector) Snapshot() CacheAnalyticsSnapshot { + // Clone slices whose backing arrays are otherwise shared with the pooled + // collector. Snapshot() is called right before ReleaseCacheAnalyticsCollector, + // and the next request's ResetForReuse + Record* calls would overwrite the + // caller's view of these slices. The deduplicate* helpers already allocate. + snap := CacheAnalyticsSnapshot{ + L1Reads: deduplicateKeyEvents(c.l1KeyEvents), + L2Reads: deduplicateKeyEvents(c.l2KeyEvents), + FieldHashes: slices.Clone(c.fieldHashes), + FetchTimings: slices.Clone(c.fetchTimings), + ErrorEvents: slices.Clone(c.errorEvents), + ShadowComparisons: deduplicateShadowComparisons(c.shadowComparisons), + MutationEvents: slices.Clone(c.mutationEvents), + HeaderImpactEvents: deduplicateHeaderImpactEvents(c.headerImpactEvents), + CacheOpErrors: slices.Clone(c.cacheOpErrors), + } + + // Split write events into L1 and L2, then deduplicate each + for _, we := range c.writeEvents { + switch we.CacheLevel { + case CacheLevelL1: + snap.L1Writes = append(snap.L1Writes, we) + case CacheLevelL2: + snap.L2Writes = append(snap.L2Writes, we) + } + } + snap.L1Writes = deduplicateWriteEvents(snap.L1Writes) + snap.L2Writes = deduplicateWriteEvents(snap.L2Writes) + + // Build EntityTypes slice from entityCounts + if len(c.entityCounts) > 0 { + snap.EntityTypes = make([]EntityTypeInfo, len(c.entityCounts)) + for i, ec := range c.entityCounts { + snap.EntityTypes[i] = EntityTypeInfo{ + TypeName: ec.typeName, + Count: ec.count, + UniqueKeys: len(ec.uniqueKeys), + } + } + } + + return snap +} + +// deduplicateKeyEvents removes duplicate cache key events, keeping the first +// occurrence for each (CacheKey, Kind) pair. This consolidates events where the +// same entity key appears multiple times in a batch (e.g., User 1234 referenced +// by two different reviews). +func deduplicateKeyEvents(events []CacheKeyEvent) []CacheKeyEvent { + if len(events) == 0 { + return events + } + type dedupKey struct { + cacheKey string + kind CacheKeyEventKind + } + seen := make(map[dedupKey]struct{}, len(events)) + out := make([]CacheKeyEvent, 0, len(events)) + for _, ev := range events { + k := dedupKey{cacheKey: ev.CacheKey, kind: ev.Kind} + if _, ok := seen[k]; ok { + continue + } + seen[k] = struct{}{} + out = append(out, ev) + } + return out +} + +// deduplicateWriteEvents removes duplicate cache write events, keeping the first +// occurrence for each CacheKey. Within a single cache level, the same key written +// multiple times (from batch positions referencing the same entity) is one operation. +func deduplicateWriteEvents(events []CacheWriteEvent) []CacheWriteEvent { + if len(events) == 0 { + return events + } + seen := make(map[string]struct{}, len(events)) + out := make([]CacheWriteEvent, 0, len(events)) + for _, ev := range events { + if _, ok := seen[ev.CacheKey]; ok { + continue + } + seen[ev.CacheKey] = struct{}{} + out = append(out, ev) + } + return out +} + +// deduplicateShadowComparisons removes duplicate shadow comparison events, +// keeping the first occurrence for each CacheKey. +func deduplicateShadowComparisons(events []ShadowComparisonEvent) []ShadowComparisonEvent { + if len(events) == 0 { + return events + } + seen := make(map[string]struct{}, len(events)) + out := make([]ShadowComparisonEvent, 0, len(events)) + for _, ev := range events { + if _, ok := seen[ev.CacheKey]; ok { + continue + } + seen[ev.CacheKey] = struct{}{} + out = append(out, ev) + } + return out +} + +// deduplicateHeaderImpactEvents removes duplicate header impact events, +// keeping the first occurrence for each unique event identity. +func deduplicateHeaderImpactEvents(events []HeaderImpactEvent) []HeaderImpactEvent { + if len(events) == 0 { + return events + } + seen := make(map[HeaderImpactEvent]struct{}, len(events)) + out := make([]HeaderImpactEvent, 0, len(events)) + for _, ev := range events { + if _, ok := seen[ev]; ok { + continue + } + seen[ev] = struct{}{} + out = append(out, ev) + } + return out +} + +// CacheAnalyticsSnapshot is a read-only snapshot of cache analytics data. +// Requires EnableCacheAnalytics to be set; returns empty when disabled. +type CacheAnalyticsSnapshot struct { + // Cache read events (nil when analytics disabled) + L1Reads []CacheKeyEvent + L2Reads []CacheKeyEvent + + // Cache write events, split by level + L1Writes []CacheWriteEvent + L2Writes []CacheWriteEvent + + // Fetch timing events + FetchTimings []FetchTimingEvent + + // Subgraph error events + ErrorEvents []SubgraphErrorEvent + + // Field value hashes: flat slice of EntityFieldHash + FieldHashes []EntityFieldHash + + // Entity tracking: type + count inline + EntityTypes []EntityTypeInfo + + // Shadow mode comparison events + ShadowComparisons []ShadowComparisonEvent + + // Mutation entity impact events + MutationEvents []MutationEvent + + // Header impact events (L2 writes with header-prefixed keys) + HeaderImpactEvents []HeaderImpactEvent + + // Cache operation errors (Get/Set/Delete failures) + CacheOpErrors []CacheOperationError +} + +// L1HitRate returns the L1 cache hit rate as a float64 in [0, 1]. +// Returns 0 if there are no L1 events. +func (s *CacheAnalyticsSnapshot) L1HitRate() float64 { + var hits, total int64 + for _, ev := range s.L1Reads { + total++ + if ev.Kind == CacheKeyHit { + hits++ + } + } + if total == 0 { + return 0 + } + return float64(hits) / float64(total) +} + +// L2HitRate returns the L2 cache hit rate as a float64 in [0, 1]. +// Returns 0 if there are no L2 events. +func (s *CacheAnalyticsSnapshot) L2HitRate() float64 { + var hits, total int64 + for _, ev := range s.L2Reads { + total++ + if ev.Kind == CacheKeyHit { + hits++ + } + } + if total == 0 { + return 0 + } + return float64(hits) / float64(total) +} + +// CachedBytesServed returns the total bytes served from cache (L1 + L2 hits). +func (s *CacheAnalyticsSnapshot) CachedBytesServed() int64 { + var total int64 + for _, ev := range s.L1Reads { + if ev.Kind == CacheKeyHit { + total += int64(ev.ByteSize) + } + } + for _, ev := range s.L2Reads { + if ev.Kind == CacheKeyHit { + total += int64(ev.ByteSize) + } + } + return total +} + +// EntityTypeCacheStats holds per-entity-type cache statistics. +type EntityTypeCacheStats struct { + L1Hits int64 + L1Misses int64 + L2Hits int64 + L2Misses int64 + PartialHits int64 + BytesServed int64 + BytesWritten int64 +} + +// EventsByEntityType returns cache statistics grouped by entity type. +func (s *CacheAnalyticsSnapshot) EventsByEntityType() map[string]EntityTypeCacheStats { + result := make(map[string]EntityTypeCacheStats) + for _, ev := range s.L1Reads { + stats := result[ev.EntityType] + switch ev.Kind { + case CacheKeyHit: + stats.L1Hits++ + stats.BytesServed += int64(ev.ByteSize) + case CacheKeyMiss: + stats.L1Misses++ + case CacheKeyPartialHit: + stats.L1Misses++ + stats.PartialHits++ + } + result[ev.EntityType] = stats + } + for _, ev := range s.L2Reads { + stats := result[ev.EntityType] + switch ev.Kind { + case CacheKeyHit: + stats.L2Hits++ + stats.BytesServed += int64(ev.ByteSize) + case CacheKeyMiss: + stats.L2Misses++ + case CacheKeyPartialHit: + stats.L2Misses++ + stats.PartialHits++ + } + result[ev.EntityType] = stats + } + for _, ev := range s.L1Writes { + stats := result[ev.EntityType] + stats.BytesWritten += int64(ev.ByteSize) + result[ev.EntityType] = stats + } + for _, ev := range s.L2Writes { + stats := result[ev.EntityType] + stats.BytesWritten += int64(ev.ByteSize) + result[ev.EntityType] = stats + } + return result +} + +// DataSourceCacheStats holds per-data-source cache statistics. +type DataSourceCacheStats struct { + L1Hits int64 + L1Misses int64 + L2Hits int64 + L2Misses int64 + BytesServed int64 + BytesWritten int64 +} + +// EventsByDataSource returns cache statistics grouped by data source name. +func (s *CacheAnalyticsSnapshot) EventsByDataSource() map[string]DataSourceCacheStats { + result := make(map[string]DataSourceCacheStats) + for _, ev := range s.L1Reads { + stats := result[ev.DataSource] + switch ev.Kind { + case CacheKeyHit: + stats.L1Hits++ + stats.BytesServed += int64(ev.ByteSize) + case CacheKeyMiss, CacheKeyPartialHit: + stats.L1Misses++ + } + result[ev.DataSource] = stats + } + for _, ev := range s.L2Reads { + stats := result[ev.DataSource] + switch ev.Kind { + case CacheKeyHit: + stats.L2Hits++ + stats.BytesServed += int64(ev.ByteSize) + case CacheKeyMiss, CacheKeyPartialHit: + stats.L2Misses++ + } + result[ev.DataSource] = stats + } + for _, ev := range s.L1Writes { + stats := result[ev.DataSource] + stats.BytesWritten += int64(ev.ByteSize) + result[ev.DataSource] = stats + } + for _, ev := range s.L2Writes { + stats := result[ev.DataSource] + stats.BytesWritten += int64(ev.ByteSize) + result[ev.DataSource] = stats + } + return result +} + +// L1HitCount returns the number of L1 cache hits. +func (s *CacheAnalyticsSnapshot) L1HitCount() int64 { + var count int64 + for _, ev := range s.L1Reads { + if ev.Kind == CacheKeyHit { + count++ + } + } + return count +} + +// L2HitCount returns the number of L2 cache hits. +func (s *CacheAnalyticsSnapshot) L2HitCount() int64 { + var count int64 + for _, ev := range s.L2Reads { + if ev.Kind == CacheKeyHit { + count++ + } + } + return count +} + +// PartialHitRate returns the fraction of cache lookups that were partial hits. +// Returns 0 if there are no cache events. +func (s *CacheAnalyticsSnapshot) PartialHitRate() float64 { + var partialHits, total int64 + for _, ev := range s.L1Reads { + total++ + if ev.Kind == CacheKeyPartialHit { + partialHits++ + } + } + for _, ev := range s.L2Reads { + total++ + if ev.Kind == CacheKeyPartialHit { + partialHits++ + } + } + if total == 0 { + return 0 + } + return float64(partialHits) / float64(total) +} + +// ErrorsByDataSource returns error counts grouped by data source name. +func (s *CacheAnalyticsSnapshot) ErrorsByDataSource() map[string]int { + if len(s.ErrorEvents) == 0 { + return nil + } + result := make(map[string]int, len(s.ErrorEvents)) + for _, ev := range s.ErrorEvents { + result[ev.DataSource]++ + } + return result +} + +// ErrorRate returns the fraction of subgraph fetches that resulted in errors. +// Denominator is total subgraph fetches (FieldSourceSubgraph timings) + errors. +// Returns 0 if there are no fetches or errors. +func (s *CacheAnalyticsSnapshot) ErrorRate() float64 { + errorCount := int64(len(s.ErrorEvents)) + if errorCount == 0 { + return 0 + } + var subgraphFetches int64 + for _, ft := range s.FetchTimings { + if ft.Source == FieldSourceSubgraph { + subgraphFetches++ + } + } + total := subgraphFetches + errorCount + if total == 0 { + return 0 + } + return float64(errorCount) / float64(total) +} + +// AvgFetchDurationMs returns the average fetch duration in milliseconds for the given data source. +// Only considers subgraph fetches (not cache lookups). Returns 0 if no fetches recorded. +func (s *CacheAnalyticsSnapshot) AvgFetchDurationMs(dataSource string) int64 { + var total, count int64 + for _, ft := range s.FetchTimings { + if ft.DataSource == dataSource && ft.Source == FieldSourceSubgraph { + total += ft.DurationMs + count++ + } + } + if count == 0 { + return 0 + } + return total / count +} + +// TotalTimeSavedMs estimates total time saved by cache hits in milliseconds. +// For each data source, multiplies the average fetch duration by the number of cache hits. +func (s *CacheAnalyticsSnapshot) TotalTimeSavedMs() int64 { + // Compute average fetch duration per datasource + type dsStats struct { + totalDuration int64 + fetchCount int64 + hitCount int64 + } + dss := make(map[string]*dsStats) + for _, ft := range s.FetchTimings { + ds, ok := dss[ft.DataSource] + if !ok { + ds = &dsStats{} + dss[ft.DataSource] = ds + } + if ft.Source == FieldSourceSubgraph { + ds.totalDuration += ft.DurationMs + ds.fetchCount++ + } + } + // Count cache hits per datasource from key events + for _, ev := range s.L1Reads { + if ev.Kind == CacheKeyHit { + ds, ok := dss[ev.DataSource] + if !ok { + ds = &dsStats{} + dss[ev.DataSource] = ds + } + ds.hitCount++ + } + } + for _, ev := range s.L2Reads { + if ev.Kind == CacheKeyHit { + ds, ok := dss[ev.DataSource] + if !ok { + ds = &dsStats{} + dss[ev.DataSource] = ds + } + ds.hitCount++ + } + } + var totalSaved int64 + for _, ds := range dss { + if ds.fetchCount > 0 && ds.hitCount > 0 { + avgDuration := ds.totalDuration / ds.fetchCount + totalSaved += avgDuration * ds.hitCount + } + } + return totalSaved +} + +// AvgCacheAgeMs returns the average cache age in milliseconds for L2 hits of the given entity type. +// Only considers L2 hits with known age (CacheAgeMs > 0). Returns 0 if no data available. +// If entityType is empty, returns the average across all entity types. +func (s *CacheAnalyticsSnapshot) AvgCacheAgeMs(entityType string) int64 { + var total, count int64 + for _, ev := range s.L2Reads { + if ev.Kind == CacheKeyHit && ev.CacheAgeMs > 0 { + if entityType == "" || ev.EntityType == entityType { + total += ev.CacheAgeMs + count++ + } + } + } + if count == 0 { + return 0 + } + return total / count +} + +// MaxCacheAgeMs returns the maximum cache age in milliseconds across all L2 hits. +// Returns 0 if no L2 hits with known age exist. +func (s *CacheAnalyticsSnapshot) MaxCacheAgeMs() int64 { + var maxAge int64 + for _, ev := range s.L2Reads { + if ev.Kind == CacheKeyHit && ev.CacheAgeMs > maxAge { + maxAge = ev.CacheAgeMs + } + } + return maxAge +} + +// ShadowFreshnessRate returns the fraction of shadow cache hits where the cached data +// matched the fresh data (ProvidesData fields were identical). +// Returns 0.0 if there are no shadow comparisons. +func (s *CacheAnalyticsSnapshot) ShadowFreshnessRate() float64 { + if len(s.ShadowComparisons) == 0 { + return 0 + } + var fresh int64 + for _, sc := range s.ShadowComparisons { + if sc.IsFresh { + fresh++ + } + } + return float64(fresh) / float64(len(s.ShadowComparisons)) +} + +// ShadowStaleCount returns the number of shadow comparisons where cached data was stale. +func (s *CacheAnalyticsSnapshot) ShadowStaleCount() int64 { + var count int64 + for _, sc := range s.ShadowComparisons { + if !sc.IsFresh { + count++ + } + } + return count +} + +// ShadowFreshnessRateByEntityType returns per-entity-type freshness rates. +// Returns nil if there are no shadow comparisons. +func (s *CacheAnalyticsSnapshot) ShadowFreshnessRateByEntityType() map[string]float64 { + if len(s.ShadowComparisons) == 0 { + return nil + } + type counts struct { + fresh int64 + total int64 + } + byType := make(map[string]*counts) + for _, sc := range s.ShadowComparisons { + c, ok := byType[sc.EntityType] + if !ok { + c = &counts{} + byType[sc.EntityType] = c + } + c.total++ + if sc.IsFresh { + c.fresh++ + } + } + result := make(map[string]float64, len(byType)) + for typeName, c := range byType { + result[typeName] = float64(c.fresh) / float64(c.total) + } + return result +} + +// computeCacheAgeMs computes cache age in milliseconds from remaining TTL and original TTL. +// Returns 0 if either value is zero or if the computed age would be negative. +func computeCacheAgeMs(remainingTTL, originalTTL time.Duration) int64 { + if remainingTTL <= 0 || originalTTL <= 0 { + return 0 + } + age := originalTTL - remainingTTL + if age <= 0 { + return 0 + } + return age.Milliseconds() +} + +// truncateErrorMessage truncates an error message to maxLen bytes for analytics safety. +func truncateErrorMessage(msg string, maxLen int) string { + if len(msg) <= maxLen { + return msg + } + for maxLen > 0 && !utf8.RuneStart(msg[maxLen]) { + maxLen-- + } + return msg[:maxLen] +} + +// buildEntityKeyJSON builds a compact JSON key from an entity's key field values. +// For @key(fields: "id") and value={"id":"1234","name":"Alice"}: +// +// returns {"id":"1234"} +// +// For @key(fields: "id address { city }") and value={"id":"1234","address":{"city":"NYC","street":"Main"}}: +// +// returns {"id":"1234","address":{"city":"NYC"}} (only key fields, not street) +func buildEntityKeyJSON(value *astjson.Value, keyFields []KeyField) []byte { + if len(keyFields) == 0 { + return nil + } + buf := make([]byte, 0, 64) + buf = appendKeyFieldsJSON(buf, value, keyFields) + return buf +} + +func appendKeyFieldsJSON(buf []byte, value *astjson.Value, keyFields []KeyField) []byte { + buf = append(buf, '{') + first := true + for _, kf := range keyFields { + fieldValue := value.Get(kf.Name) + if fieldValue == nil { + continue + } + if !first { + buf = append(buf, ',') + } + first = false + buf = append(buf, '"') + buf = append(buf, kf.Name...) + buf = append(buf, '"', ':') + if len(kf.Children) > 0 { + // Nested key: recursively extract only key fields + buf = appendKeyFieldsJSON(buf, fieldValue, kf.Children) + } else { + // Scalar key: marshal the value directly + buf = fieldValue.MarshalTo(buf) + } + } + buf = append(buf, '}') + return buf +} + +// walkCachedResponseForSources walks a cached JSON value to find entity instances +// and accumulates their source records on a per-result slice (goroutine-safe). +func walkCachedResponseForSources(value *astjson.Value, keyFields []KeyField, entityType string, source FieldSource, out *[]entitySourceRecord) { + if value == nil { + return + } + switch value.Type() { + case astjson.TypeArray: + for _, item := range value.GetArray() { + walkCachedResponseForSources(item, keyFields, entityType, source, out) + } + case astjson.TypeObject: + keyJSON := buildEntityKeyJSON(value, keyFields) + if len(keyJSON) > 0 { + *out = append(*out, entitySourceRecord{ + entityType: entityType, + keyJSON: string(keyJSON), + source: source, + }) + } + } +} + +// ParseKeyFields parses a selection set string into a structured KeyField tree. +// "id" → [{Name:"id"}] +// "id address { city country }" → [{Name:"id"}, {Name:"address", Children:[{Name:"city"}, {Name:"country"}]}] +func ParseKeyFields(selectionSet string) []KeyField { + words := strings.Fields(selectionSet) + fields, _ := parseKeyFieldsFromTokens(words, 0) + return fields +} + +func parseKeyFieldsFromTokens(tokens []string, pos int) ([]KeyField, int) { + var fields []KeyField + for pos < len(tokens) { + token := tokens[pos] + if token == "}" { + return fields, pos + 1 + } + if token == "{" { + pos++ + continue + } + kf := KeyField{Name: token} + pos++ + // Check if next token is "{" — nested fields + if pos < len(tokens) && tokens[pos] == "{" { + pos++ // skip "{" + kf.Children, pos = parseKeyFieldsFromTokens(tokens, pos) + } + fields = append(fields, kf) + } + return fields, pos +} diff --git a/v2/pkg/engine/resolve/cache_analytics_test.go b/v2/pkg/engine/resolve/cache_analytics_test.go new file mode 100644 index 0000000000..cd0d25d02d --- /dev/null +++ b/v2/pkg/engine/resolve/cache_analytics_test.go @@ -0,0 +1,2090 @@ +package resolve + +import ( + "bytes" + "context" + "slices" + "sync" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/fastjsonext" +) + +// ============================================================================= +// Unit Tests for CacheAnalyticsCollector +// ============================================================================= + +// TestCacheAnalyticsCollector_RecordEvents verifies that L1/L2 key events are +// recorded with correct fields. Without this, cache analytics could silently +// drop or misattribute events. +func TestCacheAnalyticsCollector_RecordEvents(t *testing.T) { + t.Run("L1 and L2 key events are recorded with exact counts", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.RecordL1KeyEvent(CacheKeyHit, "User", "key1", "accounts", 128) + c.RecordL1KeyEvent(CacheKeyMiss, "User", "key2", "accounts", 0) + c.RecordL1KeyEvent(CacheKeyHit, "Product", "key3", "products", 256) + + c.RecordL2KeyEvent(CacheKeyHit, "User", "key4", "accounts", 512) + c.RecordL2KeyEvent(CacheKeyMiss, "Product", "key5", "products", 0) + + snap := c.Snapshot() + + // L1: 3 events recorded (2 hits + 1 miss), L2: 2 events (1 hit + 1 miss) + assert.Equal(t, 3, len(snap.L1Reads)) + assert.Equal(t, 2, len(snap.L2Reads)) + + // Verify specific events + assert.Equal(t, CacheKeyHit, snap.L1Reads[0].Kind) + assert.Equal(t, "User", snap.L1Reads[0].EntityType) + assert.Equal(t, "key1", snap.L1Reads[0].CacheKey) + assert.Equal(t, "accounts", snap.L1Reads[0].DataSource) + assert.Equal(t, 128, snap.L1Reads[0].ByteSize) + + assert.Equal(t, CacheKeyMiss, snap.L1Reads[1].Kind) + assert.Equal(t, 0, snap.L1Reads[1].ByteSize) + }) + + t.Run("partial hits count as misses in summary", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.RecordL2KeyEvent(CacheKeyPartialHit, "User", "key1", "accounts", 0) + c.RecordL2KeyEvent(CacheKeyHit, "User", "key2", "accounts", 100) + + snap := c.Snapshot() + + assert.Equal(t, 2, len(snap.L2Reads)) + assert.Equal(t, CacheKeyPartialHit, snap.L2Reads[0].Kind) + assert.Equal(t, CacheKeyHit, snap.L2Reads[1].Kind) + }) +} + +// TestCacheAnalyticsCollector_MergeL2Events verifies that L2 events accumulated +// in goroutines merge correctly into the collector on the main thread. +func TestCacheAnalyticsCollector_MergeL2Events(t *testing.T) { + c := NewCacheAnalyticsCollector() + + // Simulate events from goroutine 1 + events1 := []CacheKeyEvent{ + {CacheKey: "key1", EntityType: "User", Kind: CacheKeyHit, DataSource: "accounts", ByteSize: 100}, + {CacheKey: "key2", EntityType: "User", Kind: CacheKeyMiss, DataSource: "accounts", ByteSize: 0}, + } + // Simulate events from goroutine 2 + events2 := []CacheKeyEvent{ + {CacheKey: "key3", EntityType: "Product", Kind: CacheKeyHit, DataSource: "products", ByteSize: 200}, + } + + c.MergeL2Events(events1) + c.MergeL2Events(events2) + + snap := c.Snapshot() + // 2 events from goroutine 1 + 1 from goroutine 2 + assert.Equal(t, 3, len(snap.L2Reads)) + + // Count hits and misses from events + var l2Hits, l2Misses int + for _, ev := range snap.L2Reads { + switch ev.Kind { + case CacheKeyHit: + l2Hits++ + case CacheKeyMiss: + l2Misses++ + } + } + assert.Equal(t, 2, l2Hits) + assert.Equal(t, 1, l2Misses) +} + +// TestCacheAnalyticsCollector_WriteEvents verifies that L1/L2 write events +// are partitioned correctly and carry TTL and size metadata. +func TestCacheAnalyticsCollector_WriteEvents(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.RecordWrite(CacheWriteEvent{CacheKey: "key1", EntityType: "User", ByteSize: 128, DataSource: "accounts", CacheLevel: CacheLevelL1, Source: CacheSourceQuery}) + c.RecordWrite(CacheWriteEvent{CacheKey: "key2", EntityType: "User", ByteSize: 256, DataSource: "accounts", CacheLevel: CacheLevelL2, TTL: 30 * time.Second, Source: CacheSourceQuery}) + c.RecordWrite(CacheWriteEvent{CacheKey: "key3", EntityType: "Product", ByteSize: 512, DataSource: "products", CacheLevel: CacheLevelL2, TTL: 60 * time.Second, Source: CacheSourceQuery}) + + snap := c.Snapshot() + // 1 L1 write, 2 L2 writes + assert.Equal(t, 1, len(snap.L1Writes)) + assert.Equal(t, 2, len(snap.L2Writes)) + + assert.Equal(t, time.Duration(0), snap.L1Writes[0].TTL) + assert.Equal(t, 128, snap.L1Writes[0].ByteSize) + assert.Equal(t, "User", snap.L1Writes[0].EntityType) + + assert.Equal(t, 30*time.Second, snap.L2Writes[0].TTL) + assert.Equal(t, 256, snap.L2Writes[0].ByteSize) + + assert.Equal(t, "Product", snap.L2Writes[1].EntityType) + assert.Equal(t, 512, snap.L2Writes[1].ByteSize) +} + +// TestCacheAnalyticsCollector_FieldHashing verifies xxhash-based field value +// hashing for staleness detection. Same input must produce identical hashes, +// different input must produce different hashes. +func TestCacheAnalyticsCollector_FieldHashing(t *testing.T) { + t.Run("same input produces same hash", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.HashFieldValue("User", "name", []byte(`"Alice"`), `{"id":"1"}`, 0, FieldSourceSubgraph) + c.HashFieldValue("User", "name", []byte(`"Alice"`), `{"id":"1"}`, 0, FieldSourceSubgraph) + + snap := c.Snapshot() + assert.Equal(t, 2, len(snap.FieldHashes)) + assert.Equal(t, snap.FieldHashes[0].FieldHash, snap.FieldHashes[1].FieldHash, "same input = same hash") + assert.Equal(t, "User", snap.FieldHashes[0].EntityType) + assert.Equal(t, "name", snap.FieldHashes[0].FieldName) + assert.Equal(t, `{"id":"1"}`, snap.FieldHashes[0].KeyRaw) + assert.Equal(t, FieldSourceSubgraph, snap.FieldHashes[0].Source) + }) + + t.Run("different input produces different hash", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.HashFieldValue("User", "name", []byte(`"Alice"`), `{"id":"1"}`, 0, FieldSourceSubgraph) + c.HashFieldValue("User", "name", []byte(`"Bob"`), `{"id":"2"}`, 0, FieldSourceSubgraph) + + snap := c.Snapshot() + assert.Equal(t, 2, len(snap.FieldHashes)) + assert.NotEqual(t, snap.FieldHashes[0].FieldHash, snap.FieldHashes[1].FieldHash, "different input = different hash") + }) + + t.Run("hashed keys mode", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.HashFieldValue("User", "name", []byte(`"Alice"`), "", 12345, FieldSourceL1) + + snap := c.Snapshot() + assert.Equal(t, 1, len(snap.FieldHashes)) + assert.Equal(t, "", snap.FieldHashes[0].KeyRaw) + assert.Equal(t, uint64(12345), snap.FieldHashes[0].KeyHash) + assert.Equal(t, FieldSourceL1, snap.FieldHashes[0].Source) + }) + + t.Run("field source tracking", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.HashFieldValue("User", "name", []byte(`"Alice"`), `{"id":"1"}`, 0, FieldSourceSubgraph) + c.HashFieldValue("User", "name", []byte(`"Alice"`), `{"id":"1"}`, 0, FieldSourceL1) + c.HashFieldValue("User", "name", []byte(`"Alice"`), `{"id":"1"}`, 0, FieldSourceL2) + + snap := c.Snapshot() + assert.Equal(t, 3, len(snap.FieldHashes)) + assert.Equal(t, FieldSourceSubgraph, snap.FieldHashes[0].Source) + assert.Equal(t, FieldSourceL1, snap.FieldHashes[1].Source) + assert.Equal(t, FieldSourceL2, snap.FieldHashes[2].Source) + }) +} + +// TestCacheAnalyticsCollector_EntityCounts verifies per-type entity instance +// counting and unique key tracking. Duplicate keys should increment count +// but not unique keys. +func TestCacheAnalyticsCollector_EntityCounts(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.IncrementEntityCount("User", `{"id":"1"}`) + c.IncrementEntityCount("User", `{"id":"2"}`) + c.IncrementEntityCount("User", `{"id":"1"}`) // duplicate key + c.IncrementEntityCount("Product", `{"upc":"top-1"}`) + + snap := c.Snapshot() + assert.Equal(t, 2, len(snap.EntityTypes)) + + // Find counts by type + var userCount, productCount int + for _, et := range snap.EntityTypes { + switch et.TypeName { + case "User": + userCount = et.Count + case "Product": + productCount = et.Count + } + } + // User: 3 instances (id:1 twice + id:2), Product: 1 instance + assert.Equal(t, 3, userCount) + assert.Equal(t, 1, productCount) + + // Verify unique keys + var userUniqueKeys, productUniqueKeys int + for _, et := range snap.EntityTypes { + switch et.TypeName { + case "User": + userUniqueKeys = et.UniqueKeys + case "Product": + productUniqueKeys = et.UniqueKeys + } + } + // User: 2 unique keys (id:1, id:2), Product: 1 unique key + assert.Equal(t, 2, userUniqueKeys) + assert.Equal(t, 1, productUniqueKeys) +} + +// TestCacheAnalyticsCollector_EntitySourceTracking verifies that the source +// (subgraph, L1, L2) of each entity is recorded and retrievable by type+key. +func TestCacheAnalyticsCollector_EntitySourceTracking(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.RecordEntitySource("User", `{"id":"1"}`, FieldSourceSubgraph) + c.RecordEntitySource("User", `{"id":"2"}`, FieldSourceL1) + c.RecordEntitySource("Product", `{"upc":"top-1"}`, FieldSourceL2) + + assert.Equal(t, FieldSourceSubgraph, c.EntitySource("User", `{"id":"1"}`)) + assert.Equal(t, FieldSourceL1, c.EntitySource("User", `{"id":"2"}`)) + assert.Equal(t, FieldSourceL2, c.EntitySource("Product", `{"upc":"top-1"}`)) + // Unknown entity defaults to Subgraph source + assert.Equal(t, FieldSourceSubgraph, c.EntitySource("Unknown", `{"id":"99"}`)) +} + +// TestCacheAnalyticsCollector_MergeEntitySources verifies that entity source +// records from goroutines merge into the main thread collector. +func TestCacheAnalyticsCollector_MergeEntitySources(t *testing.T) { + c := NewCacheAnalyticsCollector() + + sources := []entitySourceRecord{ + {entityType: "User", keyJSON: `{"id":"1"}`, source: FieldSourceL2}, + {entityType: "User", keyJSON: `{"id":"2"}`, source: FieldSourceL2}, + } + + c.MergeEntitySources(sources) + + assert.Equal(t, FieldSourceL2, c.EntitySource("User", `{"id":"1"}`)) + assert.Equal(t, FieldSourceL2, c.EntitySource("User", `{"id":"2"}`)) +} + +// TestCacheAnalyticsCollector_SnapshotDerivedMetrics verifies computed metrics +// (hit rates, bytes served, entity/datasource breakdowns) derived from raw events. +func TestCacheAnalyticsCollector_SnapshotDerivedMetrics(t *testing.T) { + t.Run("hit rates", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + // 3 L1 hits, 1 L1 miss = 75% hit rate + c.RecordL1KeyEvent(CacheKeyHit, "User", "k1", "ds", 100) + c.RecordL1KeyEvent(CacheKeyHit, "User", "k2", "ds", 100) + c.RecordL1KeyEvent(CacheKeyHit, "User", "k3", "ds", 100) + c.RecordL1KeyEvent(CacheKeyMiss, "User", "k4", "ds", 0) + + // 1 L2 hit, 1 L2 miss = 50% hit rate + c.RecordL2KeyEvent(CacheKeyHit, "User", "k5", "ds", 200) + c.RecordL2KeyEvent(CacheKeyMiss, "User", "k6", "ds", 0) + + snap := c.Snapshot() + + // 3 L1 hits / 4 total = 0.75, 1 L2 hit / 2 total = 0.5 + assert.Equal(t, 0.75, snap.L1HitRate()) + assert.Equal(t, 0.5, snap.L2HitRate()) + }) + + t.Run("zero events returns zero hit rate", func(t *testing.T) { + snap := CacheAnalyticsSnapshot{} + assert.Equal(t, float64(0), snap.L1HitRate()) + assert.Equal(t, float64(0), snap.L2HitRate()) + }) + + t.Run("cached bytes served", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.RecordL1KeyEvent(CacheKeyHit, "User", "k1", "ds", 100) + c.RecordL1KeyEvent(CacheKeyHit, "User", "k2", "ds", 200) + c.RecordL1KeyEvent(CacheKeyMiss, "User", "k3", "ds", 0) + c.RecordL2KeyEvent(CacheKeyHit, "User", "k4", "ds", 300) + c.RecordL2KeyEvent(CacheKeyMiss, "User", "k5", "ds", 0) + + snap := c.Snapshot() + // 100 + 200 (L1 hits) + 300 (L2 hit) = 600 + assert.Equal(t, int64(600), snap.CachedBytesServed()) + }) + + t.Run("events by entity type", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.RecordL1KeyEvent(CacheKeyHit, "User", "k1", "ds", 100) + c.RecordL1KeyEvent(CacheKeyMiss, "User", "k2", "ds", 0) + c.RecordL1KeyEvent(CacheKeyHit, "Product", "k3", "ds", 200) + c.RecordL2KeyEvent(CacheKeyHit, "User", "k4", "ds", 300) + c.RecordWrite(CacheWriteEvent{CacheKey: "k5", EntityType: "User", ByteSize: 150, DataSource: "ds", CacheLevel: CacheLevelL2, TTL: 30 * time.Second, Source: CacheSourceQuery}) + + snap := c.Snapshot() + byEntity := snap.EventsByEntityType() + + assert.Equal(t, int64(1), byEntity["User"].L1Hits) + assert.Equal(t, int64(1), byEntity["User"].L1Misses) + assert.Equal(t, int64(1), byEntity["User"].L2Hits) + assert.Equal(t, int64(400), byEntity["User"].BytesServed) // 100 L1 + 300 L2 + assert.Equal(t, int64(150), byEntity["User"].BytesWritten) + + assert.Equal(t, int64(1), byEntity["Product"].L1Hits) + assert.Equal(t, int64(200), byEntity["Product"].BytesServed) + }) + + t.Run("events by data source", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.RecordL1KeyEvent(CacheKeyHit, "User", "k1", "accounts", 100) + c.RecordL2KeyEvent(CacheKeyMiss, "User", "k2", "accounts", 0) + c.RecordL1KeyEvent(CacheKeyHit, "Product", "k3", "products", 200) + c.RecordWrite(CacheWriteEvent{CacheKey: "k4", EntityType: "Product", ByteSize: 250, DataSource: "products", CacheLevel: CacheLevelL2, TTL: 30 * time.Second, Source: CacheSourceQuery}) + + snap := c.Snapshot() + byDS := snap.EventsByDataSource() + + assert.Equal(t, int64(1), byDS["accounts"].L1Hits) + assert.Equal(t, int64(1), byDS["accounts"].L2Misses) + assert.Equal(t, int64(100), byDS["accounts"].BytesServed) + + assert.Equal(t, int64(1), byDS["products"].L1Hits) + assert.Equal(t, int64(200), byDS["products"].BytesServed) + assert.Equal(t, int64(250), byDS["products"].BytesWritten) + }) + + t.Run("partial hit rate", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.RecordL1KeyEvent(CacheKeyHit, "User", "k1", "ds", 100) + c.RecordL2KeyEvent(CacheKeyPartialHit, "User", "k2", "ds", 0) + c.RecordL2KeyEvent(CacheKeyMiss, "User", "k3", "ds", 0) + c.RecordL2KeyEvent(CacheKeyHit, "User", "k4", "ds", 200) + + snap := c.Snapshot() + // 1 partial hit out of 4 total events = 0.25 + assert.Equal(t, 0.25, snap.PartialHitRate()) + }) +} + +// TestCacheAnalyticsCollector_DisabledReturnsEmpty verifies that GetCacheStats() +// returns an empty snapshot when EnableCacheAnalytics is not set. This ensures +// zero overhead when analytics is off. +func TestCacheAnalyticsCollector_DisabledReturnsEmpty(t *testing.T) { + // When analytics is disabled, GetCacheStats() returns an empty snapshot + ctx := NewContext(context.Background()) + // Do NOT enable analytics + ctx.ExecutionOptions.Caching.EnableL1Cache = true + + // All nil because EnableCacheAnalytics was not set, so no collector exists + snap := ctx.GetCacheStats() + // All nil because EnableCacheAnalytics was not set + assert.Nil(t, snap.L1Reads) + assert.Nil(t, snap.L2Reads) + assert.Nil(t, snap.L1Writes) + assert.Nil(t, snap.L2Writes) + assert.Nil(t, snap.FieldHashes) + assert.Nil(t, snap.EntityTypes) +} + +// TestBuildEntityKeyJSON verifies that entity key JSON is built from @key fields +// only, ignoring other fields. Composite keys must include nested sub-selections. +func TestBuildEntityKeyJSON(t *testing.T) { + t.Run("simple key", func(t *testing.T) { + var parser astjson.Parser + + val, err := parser.Parse(`{"id":"1234","name":"Alice","age":30}`) + require.NoError(t, err) + + keyFields := []KeyField{{Name: "id"}} + result := buildEntityKeyJSON(val, keyFields) + assert.Equal(t, `{"id":"1234"}`, string(result)) + }) + + t.Run("composite key", func(t *testing.T) { + var parser astjson.Parser + + val, err := parser.Parse(`{"id":"1234","address":{"city":"NYC","street":"Main"},"name":"Alice"}`) + require.NoError(t, err) + + keyFields := []KeyField{ + {Name: "id"}, + {Name: "address", Children: []KeyField{{Name: "city"}}}, + } + result := buildEntityKeyJSON(val, keyFields) + assert.Equal(t, `{"id":"1234","address":{"city":"NYC"}}`, string(result)) + }) + + t.Run("nil key fields returns nil", func(t *testing.T) { + result := buildEntityKeyJSON(nil, nil) + assert.Nil(t, result) + }) +} + +// TestParseKeyFields verifies parsing of @key field selection strings into +// structured KeyField slices, including nested composite keys. +func TestParseKeyFields(t *testing.T) { + t.Run("simple key", func(t *testing.T) { + fields := ParseKeyFields("id") + assert.Equal(t, []KeyField{{Name: "id"}}, fields) + }) + + t.Run("composite key", func(t *testing.T) { + fields := ParseKeyFields("id address { city }") + assert.Equal(t, []KeyField{ + {Name: "id"}, + {Name: "address", Children: []KeyField{{Name: "city"}}}, + }, fields) + }) + + t.Run("nested composite key", func(t *testing.T) { + fields := ParseKeyFields("id address { city country }") + assert.Equal(t, []KeyField{ + {Name: "id"}, + {Name: "address", Children: []KeyField{{Name: "city"}, {Name: "country"}}}, + }, fields) + }) +} + +// ============================================================================= +// Integration Tests +// ============================================================================= + +// TestCacheAnalytics_L1Integration verifies end-to-end L1 cache analytics: +// first entity fetch misses (cold cache), second fetch for same entity hits L1. +func TestCacheAnalytics_L1Integration(t *testing.T) { + t.Run("L1 analytics records hit and miss events", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + entityDS1 := NewMockDataSource(ctrl) + entityDS1.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil + }).Times(1) + + // Second entity fetch - should NOT be called (L1 hit) + entityDS2 := NewMockDataSource(ctrl) + entityDS2.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Times(0) + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + }, + } + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{product {__typename id}}"}}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + // First entity fetch - populates L1 cache + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS1, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://products.service","body":{"query":"...","variables":{"representations":[`), SegmentType: StaticSegmentType}, + { + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + }, + {Data: []byte(`]}}}`), SegmentType: StaticSegmentType}, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + RootFields: []GraphCoordinate{{TypeName: "Product", FieldName: "name"}}, + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + // Second entity fetch for SAME entity - should hit L1 cache + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS2, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://products.service","body":{"query":"...","variables":{"representations":[`), SegmentType: StaticSegmentType}, + { + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + }, + {Data: []byte(`]}}}`), SegmentType: StaticSegmentType}, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + RootFields: []GraphCoordinate{{TypeName: "Product", FieldName: "name"}}, + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }, + }, + }, + }, + }, + } + + loader := &Loader{} + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1","name":"Product One"}}}`, out) + + // Verify analytics + snap := ctx.GetCacheStats() + + // 2 events: 1st entity fetch misses (cache empty), 2nd hits (populated by 1st) + assert.Equal(t, 2, len(snap.L1Reads)) + + // 1st fetch: L1 miss (empty cache), 2nd fetch: L1 hit (same entity cached by 1st) + var l1Hits, l1Misses int + for _, ev := range snap.L1Reads { + assert.Equal(t, "Product", ev.EntityType) + assert.Equal(t, "products", ev.DataSource) + if ev.Kind == CacheKeyHit { + l1Hits++ + assert.Equal(t, 59, ev.ByteSize) + } else { + l1Misses++ + } + } + assert.Equal(t, 1, l1Hits) + assert.Equal(t, 1, l1Misses) + + // L1 writes occur after 1st entity fetch resolved from subgraph + assert.Equal(t, 1, len(snap.L1Writes)) + for _, we := range snap.L1Writes { + assert.Equal(t, "Product", we.EntityType) + assert.Equal(t, 59, we.ByteSize) + } + }) +} + +// TestCacheAnalytics_L2Integration verifies end-to-end L2 cache analytics: +// first request misses L2, fetches from subgraph, and writes to L2. +func TestCacheAnalytics_L2Integration(t *testing.T) { + t.Run("L2 analytics records hit and write events", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil + }).Times(1) + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + }, + } + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{product {__typename id}}"}}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://products.service","body":{"query":"...","variables":{"representations":[`), SegmentType: StaticSegmentType}, + { + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + }, + {Data: []byte(`]}}}`), SegmentType: StaticSegmentType}, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + RootFields: []GraphCoordinate{{TypeName: "Product", FieldName: "name"}}, + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }, + }, + }, + }, + }, + } + + loader := &Loader{ + caches: map[string]LoaderCache{"default": cache}, + } + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1","name":"Product One"}}}`, out) + + snap := ctx.GetCacheStats() + + // L1 miss: single entity fetch, L1 cache empty (no prior population) + assert.Equal(t, 1, len(snap.L1Reads)) + assert.Equal(t, CacheKeyMiss, snap.L1Reads[0].Kind) + + // L2 miss: first request, L2 cache starts empty + assert.Equal(t, 1, len(snap.L2Reads)) + assert.Equal(t, CacheKeyMiss, snap.L2Reads[0].Kind) + + // Entity written to L2 after subgraph fetch; TTL from FetchCacheConfiguration + assert.Equal(t, 1, len(snap.L2Writes)) + assert.Equal(t, 30*time.Second, snap.L2Writes[0].TTL) + assert.Equal(t, 59, snap.L2Writes[0].ByteSize) + }) +} + +// TestCacheAnalytics_UseL1CacheDisabled verifies that no L1 events are recorded +// when UseL1Cache is false on the fetch configuration. This prevents spurious +// analytics for fetches that deliberately bypass L1. +func TestCacheAnalytics_UseL1CacheDisabled(t *testing.T) { + t.Run("no L1 events when UseL1Cache is false", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil + }).Times(1) + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + }, + } + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: false, // L1 disabled for this fetch + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://products.service","body":{"query":"...","variables":{"representations":[`), SegmentType: StaticSegmentType}, + { + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + }, + {Data: []byte(`]}}}`), SegmentType: StaticSegmentType}, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + RootFields: []GraphCoordinate{{TypeName: "Product", FieldName: "name"}}, + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }, + }, + }, + }, + }, + } + + loader := &Loader{} + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + snap := ctx.GetCacheStats() + + // UseL1Cache=false on FetchCacheConfiguration skips L1 lookup entirely + // UseL1Cache=false on FetchCacheConfiguration skips L1 lookup entirely + assert.Equal(t, 0, len(snap.L1Reads)) + }) +} + +// TestCacheAnalytics_EntityCounting_Integration verifies that entity instances +// are counted during the two-pass resolution walk (not just during loading). +func TestCacheAnalytics_EntityCounting_Integration(t *testing.T) { + t.Run("entity instances counted during resolution", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"users":[{"__typename":"User","id":"u1","name":"Alice"},{"__typename":"User","id":"u2","name":"Bob"}]}}`), nil + }).Times(1) + + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"User","email":"alice@example.com"},{"__typename":"User","email":"bob@example.com"}]}}`), nil + }).Times(1) + + userCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("email"), Value: &Scalar{Path: []string{"email"}, Nullable: false}}, + }, + } + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + RequiresEntityBatchFetch: true, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: userCacheKeyTemplate, + UseL1Cache: true, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }, + }, + Info: &FetchInfo{ + DataSourceID: "accounts", + DataSourceName: "accounts", + RootFields: []GraphCoordinate{{TypeName: "User", FieldName: "email"}}, + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.users", ObjectPath("users"), FetchItemPathElement{Kind: FetchItemPathElementKindArray}), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("users"), + Value: &Array{ + Path: []string{"users"}, + Item: &Object{ + TypeName: "User", + CacheAnalytics: &ObjectCacheAnalytics{ + KeyFields: []KeyField{{Name: "id"}}, + }, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + {Name: []byte("email"), Value: &String{Path: []string{"email"}}}, + }, + }, + }, + }, + }, + }, + } + + loader := &Loader{} + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + // Resolve to trigger entity counting and field hashing + buf := &bytes.Buffer{} + err = resolvable.Resolve(context.Background(), response.Data, response.Fetches, buf) + require.NoError(t, err) + + snap := ctx.GetCacheStats() + + // 1 entity type (User); 2 instances from batch fetch (Alice, Bob) + require.Equal(t, 1, len(snap.EntityTypes)) + assert.Equal(t, "User", snap.EntityTypes[0].TypeName) + assert.Equal(t, 2, snap.EntityTypes[0].Count) + }) +} + +// TestCacheAnalytics_ErrorCodeExtraction verifies that extensions.code is +// extracted from subgraph error responses into analytics error events. +func TestCacheAnalytics_ErrorCodeExtraction(t *testing.T) { + t.Run("extracts extensions.code from subgraph error", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"errors":[{"message":"not authorized","extensions":{"code":"UNAUTHORIZED"}}],"data":{"product":null}}`), nil + }).Times(1) + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + SelectResponseErrorsPath: []string{"errors"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://products.service","body":{"query":"{product {id}}"}}`), SegmentType: StaticSegmentType}, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + RootFields: []GraphCoordinate{{TypeName: "Query", FieldName: "product"}}, + OperationType: ast.OperationTypeQuery, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Nullable: true, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }, + }, + }, + }, + } + + loader := &Loader{} + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + snap := ctx.GetCacheStats() + + // Code extracted from errors[0].extensions.code in the subgraph response + require.Equal(t, 1, len(snap.ErrorEvents)) + assert.Equal(t, "products", snap.ErrorEvents[0].DataSource) + assert.Equal(t, "not authorized", snap.ErrorEvents[0].Message) + assert.Equal(t, "UNAUTHORIZED", snap.ErrorEvents[0].Code) + }) + + t.Run("empty code when no extensions.code", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"errors":[{"message":"internal server error"}],"data":{"product":null}}`), nil + }).Times(1) + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + SelectResponseErrorsPath: []string{"errors"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://products.service","body":{"query":"{product {id}}"}}`), SegmentType: StaticSegmentType}, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + RootFields: []GraphCoordinate{{TypeName: "Query", FieldName: "product"}}, + OperationType: ast.OperationTypeQuery, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Nullable: true, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }, + }, + }, + }, + } + + loader := &Loader{} + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + snap := ctx.GetCacheStats() + + // Code is empty because the response error has no extensions object + require.Equal(t, 1, len(snap.ErrorEvents)) + assert.Equal(t, "products", snap.ErrorEvents[0].DataSource) + assert.Equal(t, "internal server error", snap.ErrorEvents[0].Message) + assert.Equal(t, "", snap.ErrorEvents[0].Code) + }) +} + +// ============================================================================= +// Benchmarks +// ============================================================================= + +// TestCacheAnalyticsCollector_HitCount verifies the L1HitCount/L2HitCount +// convenience methods that count only hit events from raw event slices. +func TestCacheAnalyticsCollector_HitCount(t *testing.T) { + c := NewCacheAnalyticsCollector() + + // 2 L1 hits, 1 L1 miss + c.RecordL1KeyEvent(CacheKeyHit, "User", "k1", "accounts", 100) + c.RecordL1KeyEvent(CacheKeyHit, "User", "k2", "accounts", 100) + c.RecordL1KeyEvent(CacheKeyMiss, "User", "k3", "accounts", 0) + + // 1 L2 hit, 1 L2 miss + c.RecordL2KeyEvent(CacheKeyHit, "Product", "k4", "products", 200) + c.RecordL2KeyEvent(CacheKeyMiss, "Product", "k5", "products", 0) + + snap := c.Snapshot() + // 2 L1 hits out of 3, 1 L2 hit out of 2 + assert.Equal(t, int64(2), snap.L1HitCount()) + assert.Equal(t, int64(1), snap.L2HitCount()) +} + +// TestCacheAnalyticsCollector_HitCount_Zero verifies hit counts return 0 +// on an empty snapshot (no events recorded). +func TestCacheAnalyticsCollector_HitCount_Zero(t *testing.T) { + snap := CacheAnalyticsSnapshot{} + assert.Equal(t, int64(0), snap.L1HitCount()) + assert.Equal(t, int64(0), snap.L2HitCount()) +} + +// TestCacheAnalyticsCollector_FetchTiming verifies fetch timing recording, +// merging from goroutines, average duration computation, and time-saved +// estimation based on cache hits. +func TestCacheAnalyticsCollector_FetchTiming(t *testing.T) { + t.Run("fetch timings recorded and merged", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + // Record main thread timing + c.RecordFetchTiming(FetchTimingEvent{ + DataSource: "accounts", + EntityType: "User", + DurationMs: 5, // 5ms + Source: FieldSourceSubgraph, + ItemCount: 2, + IsEntityFetch: true, + }) + + // Simulate goroutine timings + l2Timings := []FetchTimingEvent{ + {DataSource: "products", EntityType: "Product", DurationMs: 2, Source: FieldSourceL2, ItemCount: 3, IsEntityFetch: true}, + {DataSource: "accounts", EntityType: "User", DurationMs: 1, Source: FieldSourceL2, ItemCount: 1, IsEntityFetch: true}, + } + c.MergeL2FetchTimings(l2Timings) + + snap := c.Snapshot() + // 1 main-thread + 2 merged from goroutines + assert.Equal(t, 3, len(snap.FetchTimings)) + + assert.Equal(t, "accounts", snap.FetchTimings[0].DataSource) + assert.Equal(t, FieldSourceSubgraph, snap.FetchTimings[0].Source) + assert.Equal(t, int64(5), snap.FetchTimings[0].DurationMs) + assert.Equal(t, 2, snap.FetchTimings[0].ItemCount) + assert.Equal(t, true, snap.FetchTimings[0].IsEntityFetch) + + assert.Equal(t, "products", snap.FetchTimings[1].DataSource) + assert.Equal(t, FieldSourceL2, snap.FetchTimings[1].Source) + }) + + t.Run("avg fetch duration by datasource", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.RecordFetchTiming(FetchTimingEvent{DataSource: "accounts", DurationMs: 4, Source: FieldSourceSubgraph}) + c.RecordFetchTiming(FetchTimingEvent{DataSource: "accounts", DurationMs: 6, Source: FieldSourceSubgraph}) + c.RecordFetchTiming(FetchTimingEvent{DataSource: "accounts", DurationMs: 1, Source: FieldSourceL2}) // L2 should be excluded + c.RecordFetchTiming(FetchTimingEvent{DataSource: "products", DurationMs: 10, Source: FieldSourceSubgraph}) + + snap := c.Snapshot() + // accounts: (4+6)/2 = 5ms (L2 excluded), products: 10/1 = 10ms + assert.Equal(t, int64(5), snap.AvgFetchDurationMs("accounts")) + assert.Equal(t, int64(10), snap.AvgFetchDurationMs("products")) + assert.Equal(t, int64(0), snap.AvgFetchDurationMs("unknown")) + }) + + t.Run("total time saved", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + // 2 subgraph fetches for accounts, avg 5ms + c.RecordFetchTiming(FetchTimingEvent{DataSource: "accounts", DurationMs: 4, Source: FieldSourceSubgraph}) + c.RecordFetchTiming(FetchTimingEvent{DataSource: "accounts", DurationMs: 6, Source: FieldSourceSubgraph}) + + // 3 cache hits for accounts + c.RecordL1KeyEvent(CacheKeyHit, "User", "k1", "accounts", 100) + c.RecordL1KeyEvent(CacheKeyHit, "User", "k2", "accounts", 100) + c.RecordL2KeyEvent(CacheKeyHit, "User", "k3", "accounts", 100) + + snap := c.Snapshot() + // avg fetch duration = 5ms, 3 hits = 15ms saved + assert.Equal(t, int64(15), snap.TotalTimeSavedMs()) + }) +} + +// TestCacheAnalyticsCollector_ErrorEvents verifies error event recording, +// goroutine merging, per-datasource breakdown, and error rate computation. +func TestCacheAnalyticsCollector_ErrorEvents(t *testing.T) { + t.Run("error events recorded and merged", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.RecordError(SubgraphErrorEvent{ + DataSource: "accounts", + EntityType: "User", + Message: "connection refused", + }) + + // Simulate goroutine errors + l2Errors := []SubgraphErrorEvent{ + {DataSource: "products", EntityType: "Product", Message: "timeout"}, + } + c.MergeL2Errors(l2Errors) + + snap := c.Snapshot() + assert.Equal(t, 2, len(snap.ErrorEvents)) + assert.Equal(t, "accounts", snap.ErrorEvents[0].DataSource) + assert.Equal(t, "connection refused", snap.ErrorEvents[0].Message) + assert.Equal(t, "products", snap.ErrorEvents[1].DataSource) + assert.Equal(t, "timeout", snap.ErrorEvents[1].Message) + }) + + t.Run("errors by datasource", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.RecordError(SubgraphErrorEvent{DataSource: "accounts", Message: "err1"}) + c.RecordError(SubgraphErrorEvent{DataSource: "accounts", Message: "err2"}) + c.RecordError(SubgraphErrorEvent{DataSource: "products", Message: "err3"}) + + snap := c.Snapshot() + byDS := snap.ErrorsByDataSource() + assert.Equal(t, 2, byDS["accounts"]) + assert.Equal(t, 1, byDS["products"]) + }) + + t.Run("errors by datasource returns nil when no errors", func(t *testing.T) { + snap := CacheAnalyticsSnapshot{} + assert.Nil(t, snap.ErrorsByDataSource()) + }) + + t.Run("error rate", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + // 3 successful fetches + 1 error = 25% error rate + c.RecordFetchTiming(FetchTimingEvent{DataSource: "accounts", Source: FieldSourceSubgraph}) + c.RecordFetchTiming(FetchTimingEvent{DataSource: "accounts", Source: FieldSourceSubgraph}) + c.RecordFetchTiming(FetchTimingEvent{DataSource: "products", Source: FieldSourceSubgraph}) + c.RecordError(SubgraphErrorEvent{DataSource: "accounts", Message: "err"}) + + snap := c.Snapshot() + // 1 error / (3 fetches + 1 error) = 0.25 + assert.Equal(t, 0.25, snap.ErrorRate()) + }) + + t.Run("error rate zero when no errors", func(t *testing.T) { + snap := CacheAnalyticsSnapshot{} + assert.Equal(t, float64(0), snap.ErrorRate()) + }) + + t.Run("error code from extensions", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.RecordError(SubgraphErrorEvent{ + DataSource: "accounts", + EntityType: "User", + Message: "not authorized", + Code: "UNAUTHORIZED", + }) + c.RecordError(SubgraphErrorEvent{ + DataSource: "products", + EntityType: "Product", + Message: "not found", + // Code intentionally empty — no extensions.code + }) + + snap := c.Snapshot() + assert.Equal(t, 2, len(snap.ErrorEvents)) + assert.Equal(t, "UNAUTHORIZED", snap.ErrorEvents[0].Code) + assert.Equal(t, "", snap.ErrorEvents[1].Code) + }) +} + +// TestCacheAnalyticsCollector_UniqueKeys verifies that entity unique key tracking +// correctly deduplicates keys while counting all instances. +func TestCacheAnalyticsCollector_UniqueKeys(t *testing.T) { + t.Run("unique keys tracked correctly", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.IncrementEntityCount("User", `{"id":"1"}`) + c.IncrementEntityCount("User", `{"id":"2"}`) + c.IncrementEntityCount("User", `{"id":"1"}`) // duplicate + c.IncrementEntityCount("User", `{"id":"3"}`) + c.IncrementEntityCount("Product", `{"upc":"a"}`) + + snap := c.Snapshot() + assert.Equal(t, 2, len(snap.EntityTypes)) + + for _, et := range snap.EntityTypes { + switch et.TypeName { + case "User": + assert.Equal(t, 4, et.Count, "User should have 4 instances") + assert.Equal(t, 3, et.UniqueKeys, "User should have 3 unique keys") + case "Product": + assert.Equal(t, 1, et.Count, "Product should have 1 instance") + assert.Equal(t, 1, et.UniqueKeys, "Product should have 1 unique key") + } + } + }) + + t.Run("empty keyJSON not tracked for unique keys", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.IncrementEntityCount("User", "") + c.IncrementEntityCount("User", "") + c.IncrementEntityCount("User", `{"id":"1"}`) + + snap := c.Snapshot() + assert.Equal(t, 1, len(snap.EntityTypes)) + assert.Equal(t, 3, snap.EntityTypes[0].Count, "should count all 3 instances") + assert.Equal(t, 1, snap.EntityTypes[0].UniqueKeys, "should have 1 unique key (empty strings not tracked)") + }) +} + +// TestCacheAnalyticsCollector_CacheAge verifies cache age computation from +// remaining TTL, and average/max age aggregation across L2 hit events. +func TestCacheAnalyticsCollector_CacheAge(t *testing.T) { + t.Run("cache age computed correctly", func(t *testing.T) { + // Test computeCacheAgeMs directly + assert.Equal(t, int64(5000), computeCacheAgeMs(25*time.Second, 30*time.Second)) + assert.Equal(t, int64(0), computeCacheAgeMs(0, 30*time.Second)) + assert.Equal(t, int64(0), computeCacheAgeMs(30*time.Second, 0)) + assert.Equal(t, int64(0), computeCacheAgeMs(35*time.Second, 30*time.Second)) + }) + + t.Run("avg cache age", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + // Record L2 hits with different ages using MergeL2Events + c.MergeL2Events([]CacheKeyEvent{ + {EntityType: "User", Kind: CacheKeyHit, CacheKey: "k1", DataSource: "ds", ByteSize: 100, CacheAgeMs: 5000}, + {EntityType: "User", Kind: CacheKeyHit, CacheKey: "k2", DataSource: "ds", ByteSize: 100, CacheAgeMs: 15000}, + {EntityType: "Product", Kind: CacheKeyHit, CacheKey: "k3", DataSource: "ds", ByteSize: 100, CacheAgeMs: 3000}, + {EntityType: "User", Kind: CacheKeyMiss, CacheKey: "k4", DataSource: "ds", ByteSize: 0, CacheAgeMs: 0}, // miss, should be ignored + }) + + snap := c.Snapshot() + // User: (5000+15000)/2 = 10000, Product: 3000/1 + assert.Equal(t, int64(10000), snap.AvgCacheAgeMs("User")) + assert.Equal(t, int64(3000), snap.AvgCacheAgeMs("Product")) + assert.Equal(t, int64(0), snap.AvgCacheAgeMs("Unknown")) + + // Empty entity type = all types + // (5000 + 15000 + 3000) / 3 = 7666 + assert.Equal(t, int64(7666), snap.AvgCacheAgeMs("")) + }) + + t.Run("max cache age", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.MergeL2Events([]CacheKeyEvent{ + {EntityType: "User", Kind: CacheKeyHit, CacheKey: "k1", DataSource: "ds", ByteSize: 100, CacheAgeMs: 5000}, + {EntityType: "User", Kind: CacheKeyHit, CacheKey: "k2", DataSource: "ds", ByteSize: 100, CacheAgeMs: 20000}, + {EntityType: "Product", Kind: CacheKeyHit, CacheKey: "k3", DataSource: "ds", ByteSize: 100, CacheAgeMs: 3000}, + }) + + snap := c.Snapshot() + assert.Equal(t, int64(20000), snap.MaxCacheAgeMs()) + }) + + t.Run("max cache age zero when no hits", func(t *testing.T) { + snap := CacheAnalyticsSnapshot{} + assert.Equal(t, int64(0), snap.MaxCacheAgeMs()) + }) +} + +// TestTruncateErrorMessage verifies UTF-8-safe truncation of error messages +// to prevent oversized analytics payloads. +func TestTruncateErrorMessage(t *testing.T) { + assert.Equal(t, "short", truncateErrorMessage("short", 10)) + assert.Equal(t, "12345", truncateErrorMessage("1234567890", 5)) + assert.Equal(t, "", truncateErrorMessage("", 10)) + assert.Equal(t, "exact", truncateErrorMessage("exact", 5)) + assert.Equal(t, "hello ", truncateErrorMessage("hello 世界 test", 8), "cuts before 世 (3-byte char at positions 6-8)") +} + +func BenchmarkCacheAnalytics_Disabled(b *testing.B) { + // Verify zero overhead when analytics is disabled + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + // EnableCacheAnalytics = false (default) + + b.ResetTimer() + for b.Loop() { + // This is the guard check that should be essentially free + if ctx.cacheAnalyticsEnabled() { + ctx.cacheAnalytics.RecordL1KeyEvent(CacheKeyHit, "User", "key", "ds", 100) + } + } +} + +func BenchmarkCacheAnalytics_Enabled(b *testing.B) { + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + ctx.initCacheAnalytics() + + b.ResetTimer() + for b.Loop() { + if ctx.cacheAnalyticsEnabled() { + ctx.cacheAnalytics.RecordL1KeyEvent(CacheKeyHit, "User", "key", "ds", 100) + } + } +} + +// ============================================================================= +// Shadow Mode Unit Tests +// ============================================================================= + +// TestFieldSourceShadowCached verifies that FieldSourceShadowCached is a +// distinct source value that can be used in field hashing alongside +// Subgraph/L1/L2 sources for shadow mode comparisons. +func TestFieldSourceShadowCached(t *testing.T) { + t.Run("constant value", func(t *testing.T) { + assert.Equal(t, FieldSource(3), FieldSourceShadowCached, "FieldSourceShadowCached should be 3") + }) + + t.Run("HashFieldValue with FieldSourceShadowCached", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.HashFieldValue("User", "username", []byte(`"Alice"`), `{"id":"1"}`, 0, FieldSourceShadowCached) + + snap := c.Snapshot() + require.Equal(t, 1, len(snap.FieldHashes)) + assert.Equal(t, "User", snap.FieldHashes[0].EntityType) + assert.Equal(t, "username", snap.FieldHashes[0].FieldName) + assert.Equal(t, `{"id":"1"}`, snap.FieldHashes[0].KeyRaw) + assert.Equal(t, FieldSourceShadowCached, snap.FieldHashes[0].Source) + }) + + t.Run("can distinguish from other sources", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.HashFieldValue("User", "name", []byte(`"Alice"`), `{"id":"1"}`, 0, FieldSourceSubgraph) + c.HashFieldValue("User", "name", []byte(`"Alice"`), `{"id":"1"}`, 0, FieldSourceShadowCached) + + snap := c.Snapshot() + require.Equal(t, 2, len(snap.FieldHashes)) + assert.Equal(t, FieldSourceSubgraph, snap.FieldHashes[0].Source) + assert.Equal(t, FieldSourceShadowCached, snap.FieldHashes[1].Source) + // Same input, same hash regardless of source + assert.Equal(t, snap.FieldHashes[0].FieldHash, snap.FieldHashes[1].FieldHash, "same input = same hash") + }) +} + +// TestShadowComparisonEvent_Recording verifies that shadow comparison events +// capture all fields (hash, size, age, TTL) needed to detect staleness. +func TestShadowComparisonEvent_Recording(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.RecordShadowComparison(ShadowComparisonEvent{ + CacheKey: "key1", + EntityType: "User", + IsFresh: true, + CachedHash: 12345, + FreshHash: 12345, + CachedBytes: 100, + FreshBytes: 100, + DataSource: "accounts", + CacheAgeMs: 5000, + ConfiguredTTL: 30 * time.Second, + }) + c.RecordShadowComparison(ShadowComparisonEvent{ + CacheKey: "key2", + EntityType: "Product", + IsFresh: false, + CachedHash: 11111, + FreshHash: 22222, + CachedBytes: 80, + FreshBytes: 90, + DataSource: "products", + CacheAgeMs: 10000, + ConfiguredTTL: 60 * time.Second, + }) + + snap := c.Snapshot() + assert.Equal(t, 2, len(snap.ShadowComparisons)) + + assert.Equal(t, "key1", snap.ShadowComparisons[0].CacheKey) + assert.Equal(t, "User", snap.ShadowComparisons[0].EntityType) + assert.Equal(t, true, snap.ShadowComparisons[0].IsFresh) + assert.Equal(t, uint64(12345), snap.ShadowComparisons[0].CachedHash) + assert.Equal(t, uint64(12345), snap.ShadowComparisons[0].FreshHash) + assert.Equal(t, 100, snap.ShadowComparisons[0].CachedBytes) + assert.Equal(t, 100, snap.ShadowComparisons[0].FreshBytes) + assert.Equal(t, "accounts", snap.ShadowComparisons[0].DataSource) + assert.Equal(t, int64(5000), snap.ShadowComparisons[0].CacheAgeMs) + assert.Equal(t, 30*time.Second, snap.ShadowComparisons[0].ConfiguredTTL) + + assert.Equal(t, "key2", snap.ShadowComparisons[1].CacheKey) + assert.Equal(t, "Product", snap.ShadowComparisons[1].EntityType) + assert.Equal(t, false, snap.ShadowComparisons[1].IsFresh) + assert.Equal(t, uint64(11111), snap.ShadowComparisons[1].CachedHash) + assert.Equal(t, uint64(22222), snap.ShadowComparisons[1].FreshHash) + assert.Equal(t, "products", snap.ShadowComparisons[1].DataSource) + assert.Equal(t, int64(10000), snap.ShadowComparisons[1].CacheAgeMs) + assert.Equal(t, 60*time.Second, snap.ShadowComparisons[1].ConfiguredTTL) +} + +// TestShadowFreshnessRate verifies the freshness rate calculation across +// all shadow comparisons (fresh / total). +func TestShadowFreshnessRate(t *testing.T) { + t.Run("mix of fresh and stale", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.RecordShadowComparison(ShadowComparisonEvent{CacheKey: "k1", EntityType: "User", IsFresh: true}) + c.RecordShadowComparison(ShadowComparisonEvent{CacheKey: "k2", EntityType: "User", IsFresh: true}) + c.RecordShadowComparison(ShadowComparisonEvent{CacheKey: "k3", EntityType: "User", IsFresh: false}) + c.RecordShadowComparison(ShadowComparisonEvent{CacheKey: "k4", EntityType: "User", IsFresh: true}) + + snap := c.Snapshot() + // 3 fresh / 4 total = 0.75 + assert.Equal(t, 0.75, snap.ShadowFreshnessRate()) + }) + + t.Run("all fresh", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.RecordShadowComparison(ShadowComparisonEvent{CacheKey: "k1", IsFresh: true}) + c.RecordShadowComparison(ShadowComparisonEvent{CacheKey: "k2", IsFresh: true}) + + snap := c.Snapshot() + assert.Equal(t, 1.0, snap.ShadowFreshnessRate()) + }) + + t.Run("all stale", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.RecordShadowComparison(ShadowComparisonEvent{CacheKey: "k1", IsFresh: false}) + c.RecordShadowComparison(ShadowComparisonEvent{CacheKey: "k2", IsFresh: false}) + + snap := c.Snapshot() + assert.Equal(t, 0.0, snap.ShadowFreshnessRate()) + }) + + t.Run("empty returns zero", func(t *testing.T) { + snap := CacheAnalyticsSnapshot{} + assert.Equal(t, 0.0, snap.ShadowFreshnessRate()) + }) +} + +// TestShadowFreshnessRateByEntityType verifies per-entity-type freshness rate +// breakdown for shadow mode comparisons. +func TestShadowFreshnessRateByEntityType(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.RecordShadowComparison(ShadowComparisonEvent{CacheKey: "k1", EntityType: "User", IsFresh: true}) + c.RecordShadowComparison(ShadowComparisonEvent{CacheKey: "k2", EntityType: "User", IsFresh: false}) + c.RecordShadowComparison(ShadowComparisonEvent{CacheKey: "k3", EntityType: "Product", IsFresh: true}) + c.RecordShadowComparison(ShadowComparisonEvent{CacheKey: "k4", EntityType: "Product", IsFresh: true}) + + snap := c.Snapshot() + byType := snap.ShadowFreshnessRateByEntityType() + + // User: 1 fresh / 2 = 0.5, Product: 2 fresh / 2 = 1.0 + assert.Equal(t, 0.5, byType["User"]) + assert.Equal(t, 1.0, byType["Product"]) +} + +func TestShadowFreshnessRateByEntityType_Empty(t *testing.T) { + snap := CacheAnalyticsSnapshot{} + assert.Nil(t, snap.ShadowFreshnessRateByEntityType(), "should return nil with no events") +} + +func TestShadowStaleCount(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.RecordShadowComparison(ShadowComparisonEvent{CacheKey: "k1", IsFresh: true}) + c.RecordShadowComparison(ShadowComparisonEvent{CacheKey: "k2", IsFresh: false}) + c.RecordShadowComparison(ShadowComparisonEvent{CacheKey: "k3", IsFresh: false}) + c.RecordShadowComparison(ShadowComparisonEvent{CacheKey: "k4", IsFresh: true}) + + snap := c.Snapshot() + assert.Equal(t, int64(2), snap.ShadowStaleCount(), "should have exactly 2 stale entries") +} + +func TestShadowStaleCount_Empty(t *testing.T) { + snap := CacheAnalyticsSnapshot{} + assert.Equal(t, int64(0), snap.ShadowStaleCount(), "should have 0 stale entries with no events") +} + +func TestCacheKeyEvent_ShadowFlag(t *testing.T) { + c := NewCacheAnalyticsCollector() + + // Record shadow events using MergeL2Events + c.MergeL2Events([]CacheKeyEvent{ + {CacheKey: "key1", EntityType: "User", Kind: CacheKeyHit, DataSource: "accounts", ByteSize: 128, Shadow: true}, + {CacheKey: "key2", EntityType: "User", Kind: CacheKeyMiss, DataSource: "accounts", ByteSize: 0, Shadow: false}, + }) + + snap := c.Snapshot() + assert.Equal(t, 2, len(snap.L2Reads), "should have exactly 2 L2 events") + assert.Equal(t, true, snap.L2Reads[0].Shadow, "first event should be shadow") + assert.Equal(t, false, snap.L2Reads[1].Shadow, "second event should not be shadow") + + // Filter shadow events + var shadowHits int + for _, ev := range snap.L2Reads { + if ev.Shadow && ev.Kind == CacheKeyHit { + shadowHits++ + } + } + assert.Equal(t, 1, shadowHits, "should have exactly 1 shadow hit") +} + +func BenchmarkFieldHashing(b *testing.B) { + c := NewCacheAnalyticsCollector() + value := []byte(`"some-user-id-value-12345"`) + + b.ResetTimer() + for b.Loop() { + c.HashFieldValue("User", "id", value, `{"id":"1"}`, 0, FieldSourceSubgraph) + } +} + +func TestSnapshotDeduplication(t *testing.T) { + t.Run("duplicate L2 reads consolidated by CacheKey+Kind", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + // Simulate batch entity fetch where two reviews reference the same User 1234 + c.MergeL2Events([]CacheKeyEvent{ + {CacheKey: "user-1234", EntityType: "User", Kind: CacheKeyMiss, DataSource: "accounts"}, + {CacheKey: "user-1234", EntityType: "User", Kind: CacheKeyMiss, DataSource: "accounts"}, + {CacheKey: "product-1", EntityType: "Product", Kind: CacheKeyMiss, DataSource: "products"}, + }) + + snap := c.Snapshot() + assert.Equal(t, 2, len(snap.L2Reads), "duplicate User miss should be consolidated into one event") + assert.Equal(t, "user-1234", snap.L2Reads[0].CacheKey) + assert.Equal(t, "product-1", snap.L2Reads[1].CacheKey) + }) + + t.Run("same key with different Kind preserved", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + // Same key can have different kinds across requests (miss then hit) — both kept + c.MergeL2Events([]CacheKeyEvent{ + {CacheKey: "user-1234", EntityType: "User", Kind: CacheKeyMiss, DataSource: "accounts"}, + {CacheKey: "user-1234", EntityType: "User", Kind: CacheKeyHit, DataSource: "accounts", ByteSize: 49}, + }) + + snap := c.Snapshot() + assert.Equal(t, 2, len(snap.L2Reads), "same key with different Kind should be kept as separate events") + assert.Equal(t, CacheKeyMiss, snap.L2Reads[0].Kind) + assert.Equal(t, CacheKeyHit, snap.L2Reads[1].Kind) + }) + + t.Run("duplicate L2 writes consolidated by CacheKey", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + // Same entity written twice from batch positions + c.RecordWrite(CacheWriteEvent{CacheKey: "user-1234", EntityType: "User", ByteSize: 49, DataSource: "accounts", CacheLevel: CacheLevelL2, TTL: 30 * time.Second, Source: CacheSourceQuery}) + c.RecordWrite(CacheWriteEvent{CacheKey: "user-1234", EntityType: "User", ByteSize: 49, DataSource: "accounts", CacheLevel: CacheLevelL2, TTL: 30 * time.Second, Source: CacheSourceQuery}) + c.RecordWrite(CacheWriteEvent{CacheKey: "product-1", EntityType: "Product", ByteSize: 128, DataSource: "products", CacheLevel: CacheLevelL2, TTL: 30 * time.Second, Source: CacheSourceQuery}) + + snap := c.Snapshot() + assert.Equal(t, 2, len(snap.L2Writes), "duplicate User write should be consolidated into one event") + assert.Equal(t, "user-1234", snap.L2Writes[0].CacheKey) + assert.Equal(t, "product-1", snap.L2Writes[1].CacheKey) + }) + + t.Run("duplicate shadow comparisons consolidated by CacheKey", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.RecordShadowComparison(ShadowComparisonEvent{ + CacheKey: "user-1234", EntityType: "User", IsFresh: true, CachedHash: 123, FreshHash: 123, + }) + c.RecordShadowComparison(ShadowComparisonEvent{ + CacheKey: "user-1234", EntityType: "User", IsFresh: true, CachedHash: 123, FreshHash: 123, + }) + + snap := c.Snapshot() + assert.Equal(t, 1, len(snap.ShadowComparisons), "duplicate shadow comparison should be consolidated into one event") + }) + + t.Run("no events returns empty slices unchanged", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + snap := c.Snapshot() + assert.Equal(t, 0, len(snap.L1Reads)) + assert.Equal(t, 0, len(snap.L2Reads)) + assert.Equal(t, 0, len(snap.L1Writes)) + assert.Equal(t, 0, len(snap.L2Writes)) + assert.Equal(t, 0, len(snap.ShadowComparisons)) + }) + + t.Run("derived metrics correct after dedup", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + // Two L2 hits for same key (batch positions) — should count as 1 hit, not 2 + c.MergeL2Events([]CacheKeyEvent{ + {CacheKey: "user-1234", EntityType: "User", Kind: CacheKeyHit, DataSource: "accounts", ByteSize: 49}, + {CacheKey: "user-1234", EntityType: "User", Kind: CacheKeyHit, DataSource: "accounts", ByteSize: 49}, + {CacheKey: "product-1", EntityType: "Product", Kind: CacheKeyMiss, DataSource: "products"}, + }) + + snap := c.Snapshot() + assert.Equal(t, 2, len(snap.L2Reads), "should have 2 unique events after dedup") + assert.Equal(t, int64(0), snap.L1HitCount(), "no L1 hits in this test") + assert.Equal(t, int64(1), snap.L2HitCount(), "1 unique L2 hit after dedup") + assert.Equal(t, int64(49), snap.CachedBytesServed(), "bytes served from 1 unique hit") + }) +} + +func TestCacheAnalyticsCollector_HeaderImpactEvents(t *testing.T) { + base := HeaderImpactEvent{ + BaseKey: "key1", HeaderHash: 111, ResponseHash: 999, + EntityType: "User", DataSource: "accounts", + } + + t.Run("exact duplicates are collapsed", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + c.RecordHeaderImpactEvent(base) + c.RecordHeaderImpactEvent(base) + c.RecordHeaderImpactEvent(base) + snap := c.Snapshot() + assert.Equal(t, []HeaderImpactEvent{base}, snap.HeaderImpactEvents) + }) + + t.Run("different BaseKey is preserved", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + other := base + other.BaseKey = "key2" + c.RecordHeaderImpactEvent(base) + c.RecordHeaderImpactEvent(other) + snap := c.Snapshot() + assert.Equal(t, []HeaderImpactEvent{base, other}, snap.HeaderImpactEvents) + }) + + t.Run("different HeaderHash is preserved", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + other := base + other.HeaderHash = 222 + c.RecordHeaderImpactEvent(base) + c.RecordHeaderImpactEvent(other) + snap := c.Snapshot() + assert.Equal(t, []HeaderImpactEvent{base, other}, snap.HeaderImpactEvents) + }) + + t.Run("different ResponseHash is preserved", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + other := base + other.ResponseHash = 888 + c.RecordHeaderImpactEvent(base) + c.RecordHeaderImpactEvent(other) + snap := c.Snapshot() + assert.Equal(t, []HeaderImpactEvent{base, other}, snap.HeaderImpactEvents) + }) + + t.Run("different EntityType is preserved", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + other := base + other.EntityType = "Product" + c.RecordHeaderImpactEvent(base) + c.RecordHeaderImpactEvent(other) + snap := c.Snapshot() + assert.Equal(t, []HeaderImpactEvent{base, other}, snap.HeaderImpactEvents) + }) + + t.Run("different DataSource is preserved", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + other := base + other.DataSource = "reviews" + c.RecordHeaderImpactEvent(base) + c.RecordHeaderImpactEvent(other) + snap := c.Snapshot() + assert.Equal(t, []HeaderImpactEvent{base, other}, snap.HeaderImpactEvents) + }) + + t.Run("single event is preserved", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + c.RecordHeaderImpactEvent(base) + snap := c.Snapshot() + assert.Equal(t, []HeaderImpactEvent{base}, snap.HeaderImpactEvents) + }) + + t.Run("empty when no events recorded", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + snap := c.Snapshot() + assert.Equal(t, 0, len(snap.HeaderImpactEvents)) + }) +} + +// TestCacheAnalyticsCollector_WriteEventSource verifies that the Source field +// (query vs mutation vs subscription) survives the record→snapshot pipeline. +// Without this, analytics consumers can't distinguish why a cache write happened, +// which breaks per-origin cache hit-rate reporting and mutation-aware invalidation dashboards. +func TestCacheAnalyticsCollector_WriteEventSource(t *testing.T) { + // Each CacheSource variant must appear in the snapshot exactly as recorded. + t.Run("write events preserve source field", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.RecordWrite(CacheWriteEvent{CacheKey: "key1", EntityType: "User", ByteSize: 128, DataSource: "accounts", CacheLevel: CacheLevelL2, TTL: 30 * time.Second, Source: CacheSourceQuery}) + c.RecordWrite(CacheWriteEvent{CacheKey: "key2", EntityType: "Product", ByteSize: 256, DataSource: "products", CacheLevel: CacheLevelL2, TTL: 60 * time.Second, Source: CacheSourceMutation}) + c.RecordWrite(CacheWriteEvent{CacheKey: "key3", EntityType: "Review", ByteSize: 512, DataSource: "reviews", CacheLevel: CacheLevelL2, TTL: 90 * time.Second, Source: CacheSourceSubscription}) + + snap := c.Snapshot() + // Assert entire L2Writes slice — each event preserves its Source from the recording call + assert.Equal(t, []CacheWriteEvent{ + {CacheKey: "key1", EntityType: "User", ByteSize: 128, DataSource: "accounts", CacheLevel: CacheLevelL2, TTL: 30 * time.Second, Source: CacheSourceQuery}, // Recorded with CacheSourceQuery + {CacheKey: "key2", EntityType: "Product", ByteSize: 256, DataSource: "products", CacheLevel: CacheLevelL2, TTL: 60 * time.Second, Source: CacheSourceMutation}, // Recorded with CacheSourceMutation + {CacheKey: "key3", EntityType: "Review", ByteSize: 512, DataSource: "reviews", CacheLevel: CacheLevelL2, TTL: 90 * time.Second, Source: CacheSourceSubscription}, // Recorded with CacheSourceSubscription + }, snap.L2Writes) + }) + + // MutationEvent is a struct passed by value — ensure Source isn't zeroed during copy. + t.Run("mutation event preserves source field", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + event := MutationEvent{ + MutationRootField: "updateUsername", + EntityType: "User", + EntityCacheKey: `{"__typename":"User","key":{"id":"1"}}`, + HadCachedValue: true, + IsStale: true, + CachedHash: 111, + FreshHash: 222, + CachedBytes: 64, + FreshBytes: 72, + Source: CacheSourceMutation, + } + c.RecordMutationEvent(event) + + snap := c.Snapshot() + // Assert entire MutationEvents slice — Source field preserved through record→snapshot + assert.Equal(t, []MutationEvent{event}, snap.MutationEvents) + }) + + // Same entity type, different sources — verifies events aren't collapsed or overwritten. + t.Run("mixed sources in single snapshot", func(t *testing.T) { + c := NewCacheAnalyticsCollector() + + c.RecordWrite(CacheWriteEvent{CacheKey: "query-key-1", EntityType: "User", ByteSize: 128, DataSource: "accounts", CacheLevel: CacheLevelL2, TTL: 30 * time.Second, Source: CacheSourceQuery}) // Write from query resolution + c.RecordWrite(CacheWriteEvent{CacheKey: "mutation-key-2", EntityType: "User", ByteSize: 256, DataSource: "accounts", CacheLevel: CacheLevelL2, TTL: 30 * time.Second, Source: CacheSourceMutation}) // Write from mutation resolution + + snap := c.Snapshot() + // Assert entire L2Writes — different keys prevent deduplication, each retains its Source + assert.Equal(t, []CacheWriteEvent{ + {CacheKey: "query-key-1", EntityType: "User", ByteSize: 128, DataSource: "accounts", CacheLevel: CacheLevelL2, TTL: 30 * time.Second, Source: CacheSourceQuery}, // Query-triggered write + {CacheKey: "mutation-key-2", EntityType: "User", ByteSize: 256, DataSource: "accounts", CacheLevel: CacheLevelL2, TTL: 30 * time.Second, Source: CacheSourceMutation}, // Mutation-triggered write + }, snap.L2Writes) + }) +} + +// TestSnapshotIndependentOfPooledCollector verifies that a snapshot returned +// from Snapshot() does not share backing arrays with the collector's internal +// slices. GetCacheStats returns the collector to the pool immediately after +// snapshotting; a subsequent request may acquire the same collector and mutate +// its slices while the caller is still iterating the snapshot. Under -race +// this exposes a data race on the shared backing array. Uses single-event +// writes so that pool-recycled collectors hit position 0 of the pre-allocated +// backing array (cap 8) repeatedly, which is exactly the position the reader +// is iterating. +func TestSnapshotIndependentOfPooledCollector(t *testing.T) { + // Populate a collector, snapshot it, release it to the pool. + c := AcquireCacheAnalyticsCollector() + c.RecordFetchTiming(FetchTimingEvent{DataSource: "ds", DurationMs: 42}) + snap := c.Snapshot() + ReleaseCacheAnalyticsCollector(c) + + require.Len(t, snap.FetchTimings, 1) + + // Reader: iterate snap.FetchTimings repeatedly (simulates + // recordEntityCacheMetrics iterating the snapshot). + // Writer: re-acquire a collector (pool returns the same one whose + // backing array is aliased by snap.FetchTimings) and record a fetch + // timing, which overwrites position 0 of the shared backing array. + var wg sync.WaitGroup + done := make(chan struct{}) + wg.Go(func() { + for { + select { + case <-done: + return + default: + sum := int64(0) + for _, ev := range snap.FetchTimings { + sum += ev.DurationMs + } + _ = sum + } + } + }) + + wg.Go(func() { + for range 10_000 { + c2 := AcquireCacheAnalyticsCollector() + c2.RecordFetchTiming(FetchTimingEvent{DataSource: "ds", DurationMs: 99}) + ReleaseCacheAnalyticsCollector(c2) + } + close(done) + }) + wg.Wait() +} + +// TestSnapshotSlicesAreIndependent verifies that mutating the collector's +// internal slices after Snapshot() — as happens when the pool recycles the +// collector via ResetForReuse + new Record* calls — does not alter the values +// observed through the snapshot. Without Snapshot() cloning each shared slice, +// the snapshot aliases the collector's backing arrays and the next request +// overwrites positions the caller is still reading. +func TestSnapshotSlicesAreIndependent(t *testing.T) { + // Use a fresh collector instead of Acquire: RecordMutationEvent and + // RecordCacheOperationError initialize slices that NewCacheAnalyticsCollector + // leaves nil; Releasing the collector would leave the pool with a non-nil + // empty slice and break downstream tests that assert.Equal a snapshot with + // MutationEvents/CacheOpErrors set to nil. + c := NewCacheAnalyticsCollector() + + c.RecordFetchTiming(FetchTimingEvent{DataSource: "ds-orig", DurationMs: 111}) + c.RecordError(SubgraphErrorEvent{DataSource: "ds-orig"}) + c.RecordMutationEvent(MutationEvent{EntityType: "User-orig"}) + c.RecordCacheOperationError(CacheOperationError{DataSource: "ds-orig"}) + c.HashFieldValue("User-orig", "name", []byte(`"a"`), "k-orig", 1, FieldSourceL1) + + snap := c.Snapshot() + + // Deep-copy the snapshot's slices BEFORE the collector is recycled. + // These canonical values must still match snap.* after the collector + // is reset and refilled with different events. + origFetch := slices.Clone(snap.FetchTimings) + origErrors := slices.Clone(snap.ErrorEvents) + origMutations := slices.Clone(snap.MutationEvents) + origCacheOpErrors := slices.Clone(snap.CacheOpErrors) + origFieldHashes := slices.Clone(snap.FieldHashes) + + // Simulate the next request: pool returns c, ResetForReuse truncates + // the slices to len=0 while retaining backing arrays, and subsequent + // Record* calls overwrite position 0 of every shared backing array. + c.ResetForReuse() + for range 100 { + c.RecordFetchTiming(FetchTimingEvent{DataSource: "ds-new", DurationMs: 999}) + c.RecordError(SubgraphErrorEvent{DataSource: "ds-new"}) + c.RecordMutationEvent(MutationEvent{EntityType: "User-new"}) + c.RecordCacheOperationError(CacheOperationError{DataSource: "ds-new"}) + c.HashFieldValue("User-new", "name", []byte(`"z"`), "k-new", 2, FieldSourceL2) + } + + // Full-slice assertions — snapshot must still show the original events. + assert.Equal(t, origFetch, snap.FetchTimings) + assert.Equal(t, origErrors, snap.ErrorEvents) + assert.Equal(t, origMutations, snap.MutationEvents) + assert.Equal(t, origCacheOpErrors, snap.CacheOpErrors) + assert.Equal(t, origFieldHashes, snap.FieldHashes) +} diff --git a/v2/pkg/engine/resolve/cache_key_parity_test.go b/v2/pkg/engine/resolve/cache_key_parity_test.go new file mode 100644 index 0000000000..dc90431339 --- /dev/null +++ b/v2/pkg/engine/resolve/cache_key_parity_test.go @@ -0,0 +1,222 @@ +package resolve + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" +) + +// TestCacheKeyParityRegression_ReadWriteInvalidation is a cross-cutting parity +// regression test: the same logical entity must produce an identical L2 cache key +// for args-derived reads, response-derived writes, and extension-driven deletes +// when GlobalCacheKeyPrefix and IncludeSubgraphHeaderPrefix are both enabled. +// This fills the gap between narrower AC-linked tests for AC-L2-04, AC-KEY-03, +// AC-KEY-07, AC-EXT-02, and AC-EXT-03. +func TestCacheKeyParityRegression_ReadWriteInvalidation(t *testing.T) { + // schema-v42 = GlobalCacheKeyPrefix. + // 33333 = subgraph header hash for "accounts". + // JSON object = canonical User entity key with id derived from user(id: 42). + const expectedKey = `schema-v42:33333:{"__typename":"User","key":{"id":"42"}}` + + // SETUP: enable L2 with both prefix layers and use one fake cache so each + // phase can observe the exact key passed to Get, Set, or Delete. + cache := NewFakeLoaderCache() + ctx := NewContext(t.Context()) + // Operation variables; id=42 feeds the args-derived read key and matches + // the response entity used for writeback. + ctx.Variables = astjson.MustParse(`{"id":42}`) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = false + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.ExecutionOptions.Caching.GlobalCacheKeyPrefix = "schema-v42" + ctx.SubgraphHeadersBuilder = &mockSubgraphHeadersBuilder{ + hashes: map[string]uint64{"accounts": 33333}, + } + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{ + ctx: ctx, + jsonArena: ar, + caches: map[string]LoaderCache{"default": cache}, + } + + rootInfo := &FetchInfo{ + DataSourceName: "accounts", + } + // EntityKeyMappings maps query argument id -> entity key field id, so the + // read-side root template renders the same entity key as writeback. + rootCfg := FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + UseL1Cache: true, + IncludeSubgraphHeaderPrefix: true, + CacheKeyTemplate: &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []FieldArgument{ + {Name: "id", Variable: &ContextVariable{Path: []string{"id"}, Renderer: NewCacheKeyVariableRenderer()}}, + }, + }, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + }, + } + rootRes := &result{} + + // PHASE 1 — READ KEY: prepareCacheKeys builds the L2 lookup key before any + // fetch happens; tryL2CacheLoad records that key in the fake cache log. + _, err := loader.prepareCacheKeys(rootInfo, rootCfg, []*astjson.Value{astjson.MustParse(`{}`)}, rootRes) + require.NoError(t, err) + + readKeys := loader.extractCacheKeysStrings(ar, rootRes.l2CacheKeys) + assert.Equal(t, []string{expectedKey}, readKeys) + + skipFetch, err := loader.tryL2CacheLoad(ctx.ctx, rootInfo, rootRes) + require.NoError(t, err) + assert.False(t, skipFetch) + assert.Equal(t, []CacheLogEntry{ + { + Operation: "get", + Items: []CacheLogItem{{Key: expectedKey, Hit: false}}, + }, + }, cache.GetLog()) + cache.ClearLog() + + ctrl := gomock.NewController(t) + t.Cleanup(ctrl.Finish) + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, _ any, _ []byte) ([]byte, error) { + // Root fetch returns only the entity stub needed for entity discovery. + return []byte(`{"data":{"user":{"__typename":"User","id":"42"}}}`), nil + }).Times(1) + + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, _ any, _ []byte) ([]byte, error) { + // Entity fetch returns the full payload that L2 writeback stores. + return []byte(`{"data":{"_entities":[{"__typename":"User","id":"42","username":"Ada"}]}}`), nil + }).Times(1) + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{user {__typename id}}"}}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newUserCacheKeyTemplate(), + UseL1Cache: true, + IncludeSubgraphHeaderPrefix: true, + }, + }, + InputTemplate: InputTemplate{Segments: newUserEntityFetchSegments()}, + Info: &FetchInfo{ + DataSourceID: "accounts", + DataSourceName: "accounts", + OperationType: ast.OperationTypeQuery, + ProvidesData: newUserProvidesData(), + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.user", ObjectPath("user")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("user"), + Value: &Object{ + Path: []string{"user"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("username"), Value: &String{Path: []string{"username"}}}, + }, + }, + }, + }, + }, + } + + resolvable := NewResolvable(ar, ResolvableOptions{}) + err = resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + // PHASE 2 — WRITE KEY: run the real loader path; the cache log Set entry is + // the key used to store the fetched entity response. + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + // Two entries are expected: the entity fetch L2 miss, then the entity + // writeback Set using the response-derived key. + assert.Equal(t, []CacheLogEntry{ + { + Operation: "get", + Items: []CacheLogItem{{Key: expectedKey, Hit: false}}, + }, + { + Operation: "set", + Items: []CacheLogItem{{Key: expectedKey, TTL: 30 * time.Second}}, + }, + }, cache.GetLog()) + + // PHASE 3 — INVALIDATION KEY: use a separate execution because + // processExtensionsCacheInvalidation skips deleting a key that the active + // fetch is about to write. This independent env exposes the Delete key. + env := newExtInvEnv(t, + // extensions.cacheInvalidation.keys[0] is the subgraph contract for + // telling the loader which entity key to invalidate. + `{"data":{"_entities":[{"__typename":"User","id":"1","username":"Alice"}]},"extensions":{"cacheInvalidation":{"keys":[{"typename":"User","key":{"id":"42"}}]}}}`, + withExtInvHeaderPrefix(33333), + ) + env.ctx.ExecutionOptions.Caching.GlobalCacheKeyPrefix = "schema-v42" + env.run() + + invalidationKeys := env.deleteKeys() + assert.Equal(t, []string{expectedKey}, invalidationKeys) + + // PARITY: read == write == invalidation is the cache-key contract. + writeKeys := []string{cache.GetLog()[1].Items[0].Key} + assert.Equal(t, readKeys, writeKeys) + assert.Equal(t, readKeys, invalidationKeys) +} diff --git a/v2/pkg/engine/resolve/cache_key_test.go b/v2/pkg/engine/resolve/cache_key_test.go new file mode 100644 index 0000000000..40fff794af --- /dev/null +++ b/v2/pkg/engine/resolve/cache_key_test.go @@ -0,0 +1,2723 @@ +package resolve + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" +) + +// TestCachingRenderRootQueryCacheKeyTemplate verifies root field cache key +// rendering with various argument types (none, single, multiple, boolean, +// string, prefix). Incorrect keys would cause cache misses or cross-query +// collisions. +func TestCachingRenderRootQueryCacheKeyTemplate(t *testing.T) { + t.Run("single field no arguments", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "users"}, + ResponseKey: "users", + Args: []FieldArgument{}, + }, + }, + } + + ctx := &Context{ + Variables: astjson.MustParse(`{}`), + ctx: context.Background(), + } + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + expected := []*CacheKey{ + { + Item: data, + Keys: []string{`{"__typename":"Query","field":"users"}`}, + }, + } + assert.Equal(t, expected, cacheKeys) + }) + + t.Run("single field single argument", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "droid"}, + ResponseKey: "droid", + Args: []FieldArgument{ + { + Name: "id", + Variable: &ContextVariable{ + Path: []string{"id"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + }, + }, + }, + } + + ctx := &Context{ + Variables: astjson.MustParse(`{"id":1}`), + ctx: context.Background(), + } + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + expected := []*CacheKey{ + { + Item: data, + Keys: []string{`{"__typename":"Query","field":"droid","args":{"id":1}}`}, + }, + } + assert.Equal(t, expected, cacheKeys) + }) + + t.Run("single field single string argument", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []FieldArgument{ + { + Name: "name", + Variable: &ContextVariable{ + Path: []string{"name"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + }, + }, + }, + } + + ctx := &Context{ + Variables: astjson.MustParse(`{"name":"john"}`), + ctx: context.Background(), + } + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + expected := []*CacheKey{ + { + Item: data, + Keys: []string{`{"__typename":"Query","field":"user","args":{"name":"john"}}`}, + }, + } + assert.Equal(t, expected, cacheKeys) + }) + + t.Run("single field multiple arguments", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "search"}, + ResponseKey: "search", + Args: []FieldArgument{ + { + Name: "term", + Variable: &ContextVariable{ + Path: []string{"term"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + { + Name: "max", + Variable: &ContextVariable{ + Path: []string{"max"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + }, + }, + }, + } + + ctx := &Context{ + Variables: astjson.MustParse(`{"term":"C3PO","max":10}`), + ctx: context.Background(), + } + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + expected := []*CacheKey{ + { + Item: data, + Keys: []string{`{"__typename":"Query","field":"search","args":{"term":"C3PO","max":10}}`}, + }, + } + assert.Equal(t, expected, cacheKeys) + }) + + t.Run("single field multiple arguments with boolean", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "products"}, + ResponseKey: "products", + Args: []FieldArgument{ + { + Name: "includeDeleted", + Variable: &ContextVariable{ + Path: []string{"includeDeleted"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + { + Name: "limit", + Variable: &ContextVariable{ + Path: []string{"limit"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + }, + }, + }, + } + + ctx := &Context{ + Variables: astjson.MustParse(`{"includeDeleted":true,"limit":20}`), + ctx: context.Background(), + } + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + expected := []*CacheKey{ + { + Item: data, + Keys: []string{`{"__typename":"Query","field":"products","args":{"includeDeleted":true,"limit":20}}`}, + }, + } + assert.Equal(t, expected, cacheKeys) + }) + + t.Run("multiple fields single argument each", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "droid"}, + ResponseKey: "droid", + Args: []FieldArgument{ + { + Name: "id", + Variable: &ContextVariable{ + Path: []string{"id"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + }, + }, + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []FieldArgument{ + { + Name: "name", + Variable: &ContextVariable{ + Path: []string{"name"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + }, + }, + }, + } + + ctx := &Context{ + Variables: astjson.MustParse(`{"id":1,"name":"john"}`), + ctx: context.Background(), + } + data := astjson.MustParse(`{}`) + + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + expected := []*CacheKey{ + { + Item: data, + Keys: []string{ + `{"__typename":"Query","field":"droid","args":{"id":1}}`, + `{"__typename":"Query","field":"user","args":{"name":"john"}}`, + }, + }, + } + assert.Equal(t, expected, cacheKeys) + }) + + t.Run("multiple fields with mixed arguments", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "product"}, + ResponseKey: "product", + Args: []FieldArgument{ + { + Name: "id", + Variable: &ContextVariable{ + Path: []string{"id"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + { + Name: "includeReviews", + Variable: &ContextVariable{ + Path: []string{"includeReviews"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + }, + }, + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "hero"}, + ResponseKey: "hero", + Args: []FieldArgument{}, + }, + }, + } + + ctx := &Context{ + Variables: astjson.MustParse(`{"id":"123","includeReviews":true}`), + ctx: context.Background(), + } + data := astjson.MustParse(`{}`) + + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + expected := []*CacheKey{ + { + Item: data, + Keys: []string{ + `{"__typename":"Query","field":"product","args":{"id":"123","includeReviews":true}}`, + `{"__typename":"Query","field":"hero"}`, + }, + }, + } + assert.Equal(t, expected, cacheKeys) + }) + + t.Run("field with object variable argument", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "search"}, + ResponseKey: "search", + Args: []FieldArgument{ + { + Name: "filter", + Variable: &ObjectVariable{ + Path: []string{"filter"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + }, + }, + }, + } + + ctx := &Context{ + Variables: astjson.MustParse(`{}`), + ctx: context.Background(), + } + data := astjson.MustParse(`{"filter":{"category":"electronics","price":100}}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + expected := []*CacheKey{ + { + Item: data, + Keys: []string{`{"__typename":"Query","field":"search","args":{"filter":{"category":"electronics","price":100}}}`}, + }, + } + assert.Equal(t, expected, cacheKeys) + }) + + t.Run("field with null argument", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []FieldArgument{ + { + Name: "id", + Variable: &ContextVariable{ + Path: []string{"id"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + }, + }, + }, + } + + ctx := &Context{ + Variables: astjson.MustParse(`{"id":null}`), + ctx: context.Background(), + } + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + expected := []*CacheKey{ + { + Item: data, + Keys: []string{`{"__typename":"Query","field":"user","args":{"id":null}}`}, + }, + } + assert.Equal(t, expected, cacheKeys) + }) + + t.Run("field with missing argument", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []FieldArgument{ + { + Name: "id", + Variable: &ContextVariable{ + Path: []string{"id"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + }, + }, + }, + } + + ctx := &Context{ + Variables: astjson.MustParse(`{}`), + ctx: context.Background(), + } + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + expected := []*CacheKey{ + { + Item: data, + Keys: []string{`{"__typename":"Query","field":"user","args":{"id":null}}`}, + }, + } + assert.Equal(t, expected, cacheKeys) + }) + + t.Run("field with array argument", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "products"}, + ResponseKey: "products", + Args: []FieldArgument{ + { + Name: "ids", + Variable: &ContextVariable{ + Path: []string{"ids"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + }, + }, + }, + } + + ctx := &Context{ + Variables: astjson.MustParse(`{"ids":[1,2,3]}`), + ctx: context.Background(), + } + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + expected := []*CacheKey{ + { + Item: data, + Keys: []string{`{"__typename":"Query","field":"products","args":{"ids":[1,2,3]}}`}, + }, + } + assert.Equal(t, expected, cacheKeys) + }) + + t.Run("non-Query type", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Subscription", FieldName: "messageAdded"}, + ResponseKey: "messageAdded", + Args: []FieldArgument{ + { + Name: "roomId", + Variable: &ContextVariable{ + Path: []string{"roomId"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + }, + }, + }, + } + + ctx := &Context{ + Variables: astjson.MustParse(`{"roomId":"123"}`), + ctx: context.Background(), + } + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + expected := []*CacheKey{ + { + Item: data, + Keys: []string{`{"__typename":"Subscription","field":"messageAdded","args":{"roomId":"123"}}`}, + }, + } + assert.Equal(t, expected, cacheKeys) + }) + + t.Run("single field with arena", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []FieldArgument{ + { + Name: "name", + Variable: &ContextVariable{ + Path: []string{"name"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + }, + }, + }, + } + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + ctx := &Context{ + Variables: astjson.MustParse(`{"name":"john"}`), + ctx: context.Background(), + } + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(ar, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + expected := []*CacheKey{ + { + Item: data, + Keys: []string{`{"__typename":"Query","field":"user","args":{"name":"john"}}`}, + }, + } + assert.Equal(t, expected, cacheKeys) + }) + + t.Run("single field with prefix", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []FieldArgument{ + { + Name: "id", + Variable: &ContextVariable{ + Path: []string{"id"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + }, + }, + }, + } + + ctx := &Context{ + Variables: astjson.MustParse(`{"id":1}`), + ctx: context.Background(), + } + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "prefix") + assert.NoError(t, err) + expected := []*CacheKey{ + { + Item: data, + Keys: []string{`prefix:{"__typename":"Query","field":"user","args":{"id":1}}`}, + }, + } + assert.Equal(t, expected, cacheKeys) + }) + + t.Run("multiple fields with prefix", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "droid"}, + ResponseKey: "droid", + Args: []FieldArgument{ + { + Name: "id", + Variable: &ContextVariable{ + Path: []string{"id"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + }, + }, + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []FieldArgument{ + { + Name: "name", + Variable: &ContextVariable{ + Path: []string{"name"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + }, + }, + }, + } + + ctx := &Context{ + Variables: astjson.MustParse(`{"id":1,"name":"john"}`), + ctx: context.Background(), + } + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "my-prefix") + assert.NoError(t, err) + expected := []*CacheKey{ + { + Item: data, + Keys: []string{ + `my-prefix:{"__typename":"Query","field":"droid","args":{"id":1}}`, + `my-prefix:{"__typename":"Query","field":"user","args":{"name":"john"}}`, + }, + }, + } + assert.Equal(t, expected, cacheKeys) + }) +} + +// TestCachingRenderEntityQueryCacheKeyTemplate verifies entity cache key +// rendering from __typename + @key fields. Covers single entities, batches, +// composite keys, and nested key fields. +func TestCachingRenderEntityQueryCacheKeyTemplate(t *testing.T) { + t.Run("single entity with typename and id", func(t *testing.T) { + tmpl := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + { + Name: []byte("__typename"), + Value: &String{ + Path: []string{"__typename"}, + }, + }, + { + Name: []byte("id"), + Value: &String{ + Path: []string{"id"}, + }, + }, + }, + }), + } + + ctx := &Context{ + Variables: astjson.MustParse(`{}`), + ctx: context.Background(), + } + data := astjson.MustParse(`{"__typename":"Product","id":"123"}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + expected := []*CacheKey{ + { + Item: data, + Keys: []string{`{"__typename":"Product","key":{"id":"123"}}`}, + }, + } + assert.Equal(t, expected, cacheKeys) + }) + + t.Run("single entity with multiple keys", func(t *testing.T) { + tmpl := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + { + Name: []byte("__typename"), + Value: &String{ + Path: []string{"__typename"}, + }, + }, + { + Name: []byte("sku"), + Value: &String{ + Path: []string{"sku"}, + }, + }, + { + Name: []byte("upc"), + Value: &String{ + Path: []string{"upc"}, + }, + }, + }, + }), + } + + ctx := &Context{ + Variables: astjson.MustParse(`{}`), + ctx: context.Background(), + } + data := astjson.MustParse(`{"__typename":"Product","sku":"ABC123","upc":"DEF456","name":"Trilby"}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + expected := []*CacheKey{ + { + Item: data, + Keys: []string{`{"__typename":"Product","key":{"sku":"ABC123","upc":"DEF456"}}`}, + }, + } + assert.Equal(t, expected, cacheKeys) + }) + + t.Run("single entity with prefix", func(t *testing.T) { + tmpl := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + { + Name: []byte("__typename"), + Value: &String{ + Path: []string{"__typename"}, + }, + }, + { + Name: []byte("id"), + Value: &String{ + Path: []string{"id"}, + }, + }, + }, + }), + } + + ctx := &Context{ + Variables: astjson.MustParse(`{}`), + ctx: context.Background(), + } + data := astjson.MustParse(`{"__typename":"Product","id":"123"}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "entity-prefix") + assert.NoError(t, err) + expected := []*CacheKey{ + { + Item: data, + Keys: []string{`entity-prefix:{"__typename":"Product","key":{"id":"123"}}`}, + }, + } + assert.Equal(t, expected, cacheKeys) + }) + + t.Run("entity with multiple keys and prefix", func(t *testing.T) { + tmpl := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + { + Name: []byte("__typename"), + Value: &String{ + Path: []string{"__typename"}, + }, + }, + { + Name: []byte("sku"), + Value: &String{ + Path: []string{"sku"}, + }, + }, + { + Name: []byte("upc"), + Value: &String{ + Path: []string{"upc"}, + }, + }, + }, + }), + } + + ctx := &Context{ + Variables: astjson.MustParse(`{}`), + ctx: context.Background(), + } + data := astjson.MustParse(`{"__typename":"Product","sku":"ABC123","upc":"DEF456","name":"Trilby"}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "cache") + assert.NoError(t, err) + expected := []*CacheKey{ + { + Item: data, + Keys: []string{`cache:{"__typename":"Product","key":{"sku":"ABC123","upc":"DEF456"}}`}, + }, + } + assert.Equal(t, expected, cacheKeys) + }) + + t.Run("entity with array key field", func(t *testing.T) { + // Test that arrays in entity keys are properly resolved + tmpl := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + { + Name: []byte("__typename"), + Value: &String{ + Path: []string{"__typename"}, + }, + }, + { + Name: []byte("tags"), + Value: &Array{ + Path: []string{"tags"}, + Item: &String{}, + }, + }, + }, + }), + } + + ctx := &Context{ + Variables: astjson.MustParse(`{}`), + ctx: context.Background(), + } + data := astjson.MustParse(`{"__typename":"Product","tags":["electronics","sale"]}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + expected := []*CacheKey{ + { + Item: data, + Keys: []string{`{"__typename":"Product","key":{"tags":["electronics","sale"]}}`}, + }, + } + assert.Equal(t, expected, cacheKeys) + }) +} + +// TestDerivedEntityCacheKey verifies EntityKeyMappings-based cache key +// derivation for root field queries. These keys allow L2 cache lookups +// by entity identity (e.g., User by id) for root field responses. +func TestDerivedEntityCacheKey(t *testing.T) { + t.Run("simple string ID", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []FieldArgument{ + {Name: "id", Variable: &ContextVariable{Path: []string{"id"}, Renderer: NewCacheKeyVariableRenderer()}}, + }, + }, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"id":"123"}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{`{"__typename":"User","key":{"id":"123"}}`}, cacheKeys[0].Keys) + }) + + t.Run("integer argument", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []FieldArgument{ + {Name: "id", Variable: &ContextVariable{Path: []string{"id"}, Renderer: NewCacheKeyVariableRenderer()}}, + }, + }, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"id":42}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + // Numbers are coerced to strings in entity cache keys for consistent matching + // between read path (request args) and write path (response entity data) + assert.Equal(t, []string{`{"__typename":"User","key":{"id":"42"}}`}, cacheKeys[0].Keys) + }) + + t.Run("number to string coercion in entity cache keys", func(t *testing.T) { + makeTmpl := func() *RootQueryCacheKeyTemplate { + return &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []FieldArgument{ + {Name: "id", Variable: &ContextVariable{Path: []string{"id"}, Renderer: NewCacheKeyVariableRenderer()}}, + }, + }, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + } + } + + tests := []struct { + name string + variables string + wantKey string + }{ + { + name: "integer coerced to string", + variables: `{"id":1}`, + wantKey: `{"__typename":"User","key":{"id":"1"}}`, + }, + { + name: "float with decimal coerced to string", + variables: `{"id":1.5}`, + wantKey: `{"__typename":"User","key":{"id":"1.5"}}`, + }, + { + name: "float whole number coerced to string", + variables: `{"id":1.0}`, + wantKey: `{"__typename":"User","key":{"id":"1.0"}}`, + }, + { + name: "large integer coerced to string", + variables: `{"id":9999999}`, + wantKey: `{"__typename":"User","key":{"id":"9999999"}}`, + }, + { + name: "string stays string", + variables: `{"id":"1"}`, + wantKey: `{"__typename":"User","key":{"id":"1"}}`, + }, + { + name: "integer and string produce same key", + variables: `{"id":42}`, + wantKey: `{"__typename":"User","key":{"id":"42"}}`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tmpl := makeTmpl() + ctx := &Context{Variables: astjson.MustParse(tt.variables), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{tt.wantKey}, cacheKeys[0].Keys) + }) + } + + // Verify integer and string inputs produce identical cache keys + t.Run("integer and string inputs match", func(t *testing.T) { + tmpl1 := makeTmpl() + ctx1 := &Context{Variables: astjson.MustParse(`{"id":1}`), ctx: context.Background()} + keys1, err := tmpl1.RenderCacheKeys(nil, ctx1, []*astjson.Value{astjson.MustParse(`{}`)}, "") + assert.NoError(t, err) + + tmpl2 := makeTmpl() + ctx2 := &Context{Variables: astjson.MustParse(`{"id":"1"}`), ctx: context.Background()} + keys2, err := tmpl2.RenderCacheKeys(nil, ctx2, []*astjson.Value{astjson.MustParse(`{}`)}, "") + assert.NoError(t, err) + + assert.Equal(t, keys1[0].Keys, keys2[0].Keys) + }) + }) + + t.Run("nested object path", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []FieldArgument{ + {Name: "input", Variable: &ContextVariable{Path: []string{"input"}, Renderer: NewCacheKeyVariableRenderer()}}, + }, + }, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"input", "userId"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"input":{"userId":"456"}}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{`{"__typename":"User","key":{"id":"456"}}`}, cacheKeys[0].Keys) + }) + + t.Run("deep nested path", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "thing"}, ResponseKey: "thing"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "X", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"a", "b", "c"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"a":{"b":{"c":"deep"}}}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{`{"__typename":"X","key":{"id":"deep"}}`}, cacheKeys[0].Keys) + }) + + t.Run("array index path", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, ResponseKey: "user"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"ids", "0"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"ids":["first","second"]}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{`{"__typename":"User","key":{"id":"first"}}`}, cacheKeys[0].Keys) + }) + + t.Run("array index path - empty array", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, ResponseKey: "user"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"ids", "0"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"ids":[]}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + // Empty array has no index 0 → skip caching + assert.Equal(t, 0, len(cacheKeys[0].Keys)) + }) + + t.Run("array index path - null variable", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, ResponseKey: "user"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"ids", "0"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"ids":null}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + // Null variable → skip caching + assert.Equal(t, 0, len(cacheKeys[0].Keys)) + }) + + t.Run("multiple key fields", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "orgUser"}, ResponseKey: "orgUser"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "OrgUser", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "orgId", ArgumentPath: []string{"orgId"}}, + {EntityKeyField: "userId", ArgumentPath: []string{"userId"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"orgId":"org1","userId":"u1"}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{`{"__typename":"OrgUser","key":{"orgId":"org1","userId":"u1"}}`}, cacheKeys[0].Keys) + }) + + t.Run("with prefix", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, ResponseKey: "user"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"id":"123"}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "12345") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{`12345:{"__typename":"User","key":{"id":"123"}}`}, cacheKeys[0].Keys) + }) + + t.Run("missing variable - skip caching", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, ResponseKey: "user"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"nonexistent"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + // No keys generated (empty) because variable is missing + assert.Equal(t, 0, len(cacheKeys[0].Keys)) + }) + + t.Run("null variable - skip caching", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, ResponseKey: "user"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"id":null}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + // No keys generated because variable is null + assert.Equal(t, 0, len(cacheKeys[0].Keys)) + }) + + t.Run("variable remapping", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, ResponseKey: "user"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + } + + ctx := &Context{ + Variables: astjson.MustParse(`{"userId":"123"}`), + RemapVariables: map[string]string{"id": "userId"}, + ctx: context.Background(), + } + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{`{"__typename":"User","key":{"id":"123"}}`}, cacheKeys[0].Keys) + }) + + t.Run("dot-notation entity key field", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "productByStore"}, ResponseKey: "productByStore"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "store.id", ArgumentPath: []string{"storeId"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"storeId":"123"}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{`{"__typename":"Product","key":{"store":{"id":"123"}}}`}, cacheKeys[0].Keys) + }) + + t.Run("deeply nested dot-notation entity key field", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "thing"}, ResponseKey: "thing"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Thing", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "owner.company.id", ArgumentPath: []string{"companyId"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"companyId":"abc"}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{`{"__typename":"Thing","key":{"owner":{"company":{"id":"abc"}}}}`}, cacheKeys[0].Keys) + }) + + t.Run("dot-notation shared prefix merges into same object", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "product"}, ResponseKey: "product"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "store.id", ArgumentPath: []string{"storeId"}}, + {EntityKeyField: "store.region", ArgumentPath: []string{"region"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"storeId":"s1","region":"us"}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + // Both store.id and store.region must appear under the same "store" object + assert.Equal(t, []string{`{"__typename":"Product","key":{"store":{"id":"s1","region":"us"}}}`}, cacheKeys[0].Keys) + }) + + t.Run("multiple entity key mappings - multi-key lookup", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "product"}, ResponseKey: "product"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "sku", ArgumentPath: []string{"sku"}}, + {EntityKeyField: "region", ArgumentPath: []string{"region"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"id":"123","sku":"abc","region":"us"}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{ + `{"__typename":"Product","key":{"id":"123"}}`, + `{"__typename":"Product","key":{"sku":"abc","region":"us"}}`, + }, cacheKeys[0].Keys) + }) + + t.Run("multiple entity key mappings - partial missing skips that key only", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "product"}, ResponseKey: "product"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "sku", ArgumentPath: []string{"sku"}}, + {EntityKeyField: "region", ArgumentPath: []string{"region"}}, + }, + }, + }, + } + + // Only id and sku provided, region missing → second mapping skipped + ctx := &Context{Variables: astjson.MustParse(`{"id":"123","sku":"abc"}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{ + `{"__typename":"Product","key":{"id":"123"}}`, + }, cacheKeys[0].Keys) + }) + + t.Run("flat key + composite key - all args present", func(t *testing.T) { + // Flat @key(fields: "id") + composite @key(fields: "sku region"). + // All arguments provided → both mappings resolve → two cache keys. + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "productByAll"}, ResponseKey: "productByAll"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "sku", ArgumentPath: []string{"sku"}}, + {EntityKeyField: "region", ArgumentPath: []string{"region"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"id":"p1","sku":"ABC","region":"us-east"}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + require.NoError(t, err) + require.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{ + `{"__typename":"Product","key":{"id":"p1"}}`, + `{"__typename":"Product","key":{"sku":"ABC","region":"us-east"}}`, + }, cacheKeys[0].Keys) + }) + + t.Run("flat key + composite key - only composite args present", func(t *testing.T) { + // Flat @key(fields: "id") + composite @key(fields: "sku region"). + // Only sku and region provided, id missing → flat mapping skipped → one cache key. + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "productBySku"}, ResponseKey: "productBySku"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "sku", ArgumentPath: []string{"sku"}}, + {EntityKeyField: "region", ArgumentPath: []string{"region"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"sku":"ABC","region":"us-east"}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + require.NoError(t, err) + require.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{ + `{"__typename":"Product","key":{"sku":"ABC","region":"us-east"}}`, + }, cacheKeys[0].Keys) + }) + + t.Run("flat key + nested composite key - all args present", func(t *testing.T) { + // Flat @key(fields: "id") + nested @key(fields: "store { id region }"). + // All arguments provided → both mappings resolve → two cache keys, + // the second with nested JSON structure from dot-notation. + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "productByAll"}, ResponseKey: "productByAll"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "store.id", ArgumentPath: []string{"storeId"}}, + {EntityKeyField: "store.region", ArgumentPath: []string{"storeRegion"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"id":"p1","storeId":"s1","storeRegion":"us"}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + require.NoError(t, err) + require.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{ + `{"__typename":"Product","key":{"id":"p1"}}`, + `{"__typename":"Product","key":{"store":{"id":"s1","region":"us"}}}`, + }, cacheKeys[0].Keys) + }) + + t.Run("flat key + nested composite key - only nested args present", func(t *testing.T) { + // Flat @key(fields: "id") + nested @key(fields: "store { id region }"). + // Only storeId and storeRegion provided, id missing → flat mapping skipped. + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "productByStore"}, ResponseKey: "productByStore"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "store.id", ArgumentPath: []string{"storeId"}}, + {EntityKeyField: "store.region", ArgumentPath: []string{"storeRegion"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"storeId":"s1","storeRegion":"us"}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + require.NoError(t, err) + require.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{ + `{"__typename":"Product","key":{"store":{"id":"s1","region":"us"}}}`, + }, cacheKeys[0].Keys) + }) + + t.Run("nested composite key - structured argument input", func(t *testing.T) { + // Nested @key(fields: "store { id region }") with a structured argument: + // query productByStore(store: {id: "s1", region: "us"}) + // ArgumentPath ["store", "id"] navigates into the structured variable + // to extract the value for entity key field "store.id". + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "productByStore"}}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "store.id", ArgumentPath: []string{"store", "id"}}, + {EntityKeyField: "store.region", ArgumentPath: []string{"store", "region"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"store":{"id":"s1","region":"us"}}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + require.NoError(t, err) + require.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{ + `{"__typename":"Product","key":{"store":{"id":"s1","region":"us"}}}`, + }, cacheKeys[0].Keys) + }) + + t.Run("flat key + nested composite key with structured arg - only nested resolves", func(t *testing.T) { + // Flat @key(fields: "id") + nested @key(fields: "store { id region }"). + // Argument "store" is a structured input object, "id" is a flat argument. + // Only "store" provided → flat mapping skipped → one nested cache key. + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "productByStore"}, ResponseKey: "productByStore"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "store.id", ArgumentPath: []string{"store", "id"}}, + {EntityKeyField: "store.region", ArgumentPath: []string{"store", "region"}}, + }, + }, + }, + } + + // Only structured store argument provided, no flat id + ctx := &Context{Variables: astjson.MustParse(`{"store":{"id":"s1","region":"us"}}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + require.NoError(t, err) + require.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{ + `{"__typename":"Product","key":{"store":{"id":"s1","region":"us"}}}`, + }, cacheKeys[0].Keys) + }) + + t.Run("two nested composite keys with structured args - both resolve", func(t *testing.T) { + // Two nested keys: @key(fields: "store { id }") + @key(fields: "location { city country }"). + // Arguments are structured input objects: store: {id: "s1"}, location: {city: "Berlin", country: "DE"}. + // Both resolve → two nested cache keys. + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "warehouse"}, ResponseKey: "warehouse"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Warehouse", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "store.id", ArgumentPath: []string{"store", "id"}}, + }, + }, + { + EntityTypeName: "Warehouse", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "location.city", ArgumentPath: []string{"location", "city"}}, + {EntityKeyField: "location.country", ArgumentPath: []string{"location", "country"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"store":{"id":"s1"},"location":{"city":"Berlin","country":"DE"}}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + require.NoError(t, err) + require.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{ + `{"__typename":"Warehouse","key":{"store":{"id":"s1"}}}`, + `{"__typename":"Warehouse","key":{"location":{"city":"Berlin","country":"DE"}}}`, + }, cacheKeys[0].Keys) + }) + + t.Run("two nested composite keys with structured args - only first resolves", func(t *testing.T) { + // Two nested keys: @key(fields: "store { id }") + @key(fields: "location { city country }"). + // Arguments are structured: store: {id: "s1"}, but no location argument. + // Only store resolves → location mapping skipped → one cache key. + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "warehouse"}, ResponseKey: "warehouse"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Warehouse", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "store.id", ArgumentPath: []string{"store", "id"}}, + }, + }, + { + EntityTypeName: "Warehouse", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "location.city", ArgumentPath: []string{"location", "city"}}, + {EntityKeyField: "location.country", ArgumentPath: []string{"location", "country"}}, + }, + }, + }, + } + + // Only store argument provided — location missing → second mapping skipped + ctx := &Context{Variables: astjson.MustParse(`{"store":{"id":"s1"}}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + require.NoError(t, err) + require.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{ + `{"__typename":"Warehouse","key":{"store":{"id":"s1"}}}`, + }, cacheKeys[0].Keys) + }) + + t.Run("remap variables - flat key forward lookup", func(t *testing.T) { + // Production scenario: VariablesMapper renames $id → $a in the AST. + // resolveArgumentPath resolves "id" → ContextVariable.Path ["a"]. + // RemapVariables maps newName → oldName: {"a": "id"}. + // Variables JSON keeps the original name: {"id": "user-123"}. + // Forward lookup: RemapVariables["a"] = "id" → Variables.Get("id") = "user-123". + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, ResponseKey: "user"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"a"}}, + }, + }, + }, + } + + ctx := &Context{ + Variables: astjson.MustParse(`{"id":"user-123"}`), + RemapVariables: map[string]string{"a": "id"}, + ctx: context.Background(), + } + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + require.NoError(t, err) + require.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{ + `{"__typename":"User","key":{"id":"user-123"}}`, + }, cacheKeys[0].Keys) + }) + + t.Run("remap variables - multiple mappings forward lookup", func(t *testing.T) { + // Two mappings: flat @key(fields: "id") + composite @key(fields: "sku region"). + // VariablesMapper renamed $id→$a, $sku→$b, $region→$c. + // resolveArgumentPath resolved each to ["a"], ["b"], ["c"]. + // Variables JSON keeps original names: {"id", "sku", "region"}. + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "productByAll"}, ResponseKey: "productByAll"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"a"}}, + }, + }, + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "sku", ArgumentPath: []string{"b"}}, + {EntityKeyField: "region", ArgumentPath: []string{"c"}}, + }, + }, + }, + } + + ctx := &Context{ + Variables: astjson.MustParse(`{"id":"p1","sku":"ABC","region":"us-east"}`), + RemapVariables: map[string]string{"a": "id", "b": "sku", "c": "region"}, + ctx: context.Background(), + } + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + require.NoError(t, err) + require.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{ + `{"__typename":"Product","key":{"id":"p1"}}`, + `{"__typename":"Product","key":{"sku":"ABC","region":"us-east"}}`, + }, cacheKeys[0].Keys) + }) + + t.Run("remap variables - partial remap with multi-key", func(t *testing.T) { + // Two entity key mappings: flat "id" (remapped $id→$a) + flat "username" (derived key, no argument). + // ArgumentPath ["a"] resolved by planner; ArgumentPath ["username"] unresolved (derived key). + // Only the "id" mapping resolves; "username" has no variable → skip that mapping. + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, ResponseKey: "user"}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"a"}}, + }, + }, + { + EntityTypeName: "User", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "username", ArgumentPath: []string{"username"}}, + }, + }, + }, + } + + ctx := &Context{ + Variables: astjson.MustParse(`{"id":"user-123"}`), + RemapVariables: map[string]string{"a": "id"}, + ctx: context.Background(), + } + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + require.NoError(t, err) + require.Equal(t, 1, len(cacheKeys)) + // Only the "id" mapping resolves; "username" is a derived key with no variable + assert.Equal(t, []string{ + `{"__typename":"User","key":{"id":"user-123"}}`, + }, cacheKeys[0].Keys) + }) + + t.Run("remap variables - nested input object argument path", func(t *testing.T) { + // Multi-element ArgumentPath ["a", "sellerId"] with RemapVariables {"a": "k"} + // should remap the first element "a" → "k" and resolve from {"k": {"sellerId": "s1", "sku": "WIDGET-01"}}. + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "productBySeller"}}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "sellerId", ArgumentPath: []string{"a", "sellerId"}}, + {EntityKeyField: "sku", ArgumentPath: []string{"a", "sku"}}, + }, + }, + }, + } + + ctx := &Context{ + Variables: astjson.MustParse(`{"k":{"sellerId":"s1","sku":"WIDGET-01"}}`), + RemapVariables: map[string]string{"a": "k"}, + ctx: context.Background(), + } + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + require.NoError(t, err) + require.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{ + `{"__typename":"Product","key":{"sellerId":"s1","sku":"WIDGET-01"}}`, + }, cacheKeys[0].Keys) + }) + + t.Run("remap variables - deeply nested input object argument path", func(t *testing.T) { + // 3-element ArgumentPath ["a", "address", "id"] with RemapVariables {"a": "v"} + // should remap first element "a" → "v" and resolve from {"v": {"address": {"id": "v1"}}}. + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "venue"}}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Venue", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "address.id", ArgumentPath: []string{"a", "address", "id"}}, + }, + }, + }, + } + + ctx := &Context{ + Variables: astjson.MustParse(`{"v":{"address":{"id":"v1"}}}`), + RemapVariables: map[string]string{"a": "v"}, + ctx: context.Background(), + } + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + require.NoError(t, err) + require.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{ + `{"__typename":"Venue","key":{"address":{"id":"v1"}}}`, + }, cacheKeys[0].Keys) + }) + + t.Run("flat key + composite key - neither matches (skip cache)", func(t *testing.T) { + // Flat @key(fields: "id") + composite @key(fields: "sku region"). + // No arguments provided → both mappings skip → empty keys → skip cache. + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "productByAll"}}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "sku", ArgumentPath: []string{"sku"}}, + {EntityKeyField: "region", ArgumentPath: []string{"region"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"unrelated":"value"}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{}, cacheKeys[0].Keys) + }) + + t.Run("flat key + nested composite key - neither matches (skip cache)", func(t *testing.T) { + // Flat @key(fields: "id") + nested @key(fields: "store { id region }"). + // No arguments provided → both mappings skip → empty keys → skip cache. + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "productByAll"}}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "store.id", ArgumentPath: []string{"storeId"}}, + {EntityKeyField: "store.region", ArgumentPath: []string{"storeRegion"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"unrelated":"value"}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{}, cacheKeys[0].Keys) + }) + + t.Run("flat key + nested composite key with structured arg - neither matches (skip cache)", func(t *testing.T) { + // Flat @key(fields: "id") + nested @key(fields: "store { id region }") with structured arg. + // No arguments provided → both mappings skip → empty keys → skip cache. + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "productByStore"}}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "store.id", ArgumentPath: []string{"store", "id"}}, + {EntityKeyField: "store.region", ArgumentPath: []string{"store", "region"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"unrelated":"value"}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{}, cacheKeys[0].Keys) + }) + + t.Run("two nested composite keys with structured args - neither matches (skip cache)", func(t *testing.T) { + // Two nested keys: @key(fields: "store { id }") + @key(fields: "location { city country }"). + // No arguments provided → both mappings skip → empty keys → skip cache. + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "warehouse"}}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Warehouse", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "store.id", ArgumentPath: []string{"store", "id"}}, + }, + }, + { + EntityTypeName: "Warehouse", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "location.city", ArgumentPath: []string{"location", "city"}}, + {EntityKeyField: "location.country", ArgumentPath: []string{"location", "country"}}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"unrelated":"value"}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{}, cacheKeys[0].Keys) + }) + + t.Run("no entity key mapping - uses root field key", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, + Args: []FieldArgument{ + {Name: "id", Variable: &ContextVariable{Path: []string{"id"}, Renderer: NewCacheKeyVariableRenderer()}}, + }, + }, + }, + // No EntityKeyMappings - should use root field key format + } + + ctx := &Context{Variables: astjson.MustParse(`{"id":"123"}`), ctx: context.Background()} + data := astjson.MustParse(`{}`) + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{data}, "") + assert.NoError(t, err) + assert.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{`{"__typename":"Query","field":"user","args":{"id":"123"}}`}, cacheKeys[0].Keys) + }) +} + +func BenchmarkRenderCacheKeys(b *testing.B) { + a := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + + ctxRootQuery := &Context{ + Variables: astjson.MustParse(`{"id":1,"name":"john","term":"C3PO","max":10}`), + ctx: context.Background(), + } + + ctxEntityQuery := &Context{ + Variables: astjson.MustParse(`{}`), + ctx: context.Background(), + } + + b.Run("RootQuery/SingleField", func(b *testing.B) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{ + TypeName: "Query", + FieldName: "user", + }, + Args: []FieldArgument{ + { + Name: "id", + Variable: &ContextVariable{ + Path: []string{"id"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + }, + }, + }, + } + + data := astjson.MustParse(`{}`) + items := []*astjson.Value{data} + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + a.Reset() + _, err := tmpl.RenderCacheKeys(a, ctxRootQuery, items, "") + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("RootQuery/MultipleFields", func(b *testing.B) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{ + TypeName: "Query", + FieldName: "droid", + }, + Args: []FieldArgument{ + { + Name: "id", + Variable: &ContextVariable{ + Path: []string{"id"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + }, + }, + { + Coordinate: GraphCoordinate{ + TypeName: "Query", + FieldName: "user", + }, + Args: []FieldArgument{ + { + Name: "name", + Variable: &ContextVariable{ + Path: []string{"name"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + }, + }, + { + Coordinate: GraphCoordinate{ + TypeName: "Query", + FieldName: "search", + }, + Args: []FieldArgument{ + { + Name: "term", + Variable: &ContextVariable{ + Path: []string{"term"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + { + Name: "max", + Variable: &ContextVariable{ + Path: []string{"max"}, + Renderer: NewCacheKeyVariableRenderer(), + }, + }, + }, + }, + }, + } + + data := astjson.MustParse(`{}`) + items := []*astjson.Value{data} + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + a.Reset() + _, err := tmpl.RenderCacheKeys(a, ctxRootQuery, items, "") + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run("EntityQuery", func(b *testing.B) { + tmpl := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + { + Name: []byte("__typename"), + Value: &String{ + Path: []string{"__typename"}, + }, + }, + { + Name: []byte("id"), + Value: &String{ + Path: []string{"id"}, + }, + }, + { + Name: []byte("sku"), + Value: &String{ + Path: []string{"sku"}, + }, + }, + { + Name: []byte("upc"), + Value: &String{ + Path: []string{"upc"}, + }, + }, + }, + }), + } + + data1 := astjson.MustParse(`{"__typename":"Product","id":"123","sku":"ABC123","upc":"DEF456","name":"Trilby"}`) + data2 := astjson.MustParse(`{"__typename":"Product","id":"456","sku":"XYZ789","upc":"GHI012","name":"Fedora"}`) + data3 := astjson.MustParse(`{"__typename":"Product","id":"789","sku":"JKL345","upc":"MNO678","name":"Boater"}`) + items := []*astjson.Value{data1, data2, data3} + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + a.Reset() + _, err := tmpl.RenderCacheKeys(a, ctxEntityQuery, items, "") + if err != nil { + b.Fatal(err) + } + } + }) +} + +// TestRenderCacheKeys_EntityKeyMappings_NotDuplicatedByRootFields verifies +// that EntityKeyMappings produce exactly one key per entity, not duplicated +// per root field in multi-field queries. +func TestRenderCacheKeys_EntityKeyMappings_NotDuplicatedByRootFields(t *testing.T) { + a := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + + template := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "field1"}}, + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "field2"}}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "upc", ArgumentPath: []string{"upc"}}, + }, + }, + }, + } + + ctx := NewContext(context.Background()) + ctx.Variables = astjson.MustParse(`{"upc":"top-1"}`) + + items := []*astjson.Value{astjson.NullValue} + keys, err := template.RenderCacheKeys(a, ctx, items, "") + require.NoError(t, err) + require.Len(t, keys, 1, "one CacheKey per item") + // Should have exactly 1 key string, not 2 (one per root field) + require.Equal(t, []string{ + `{"__typename":"Product","key":{"upc":"top-1"}}`, + }, keys[0].Keys, "EntityKeyMappings should produce one key, not duplicated per root field") +} + +// TestResolveFieldValue verifies that resolveFieldValue extracts arena-allocated +// values from JSON data for each node type (String, Scalar, Integer, etc.). +func TestResolveFieldValue(t *testing.T) { + a := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + template := &EntityQueryCacheKeyTemplate{} + + t.Run("String", func(t *testing.T) { + data := astjson.MustParse(`{"name":"Alice"}`) + result := template.resolveFieldValue(a, &String{Path: []string{"name"}}, data) + require.NotNil(t, result) + assert.Equal(t, `"Alice"`, string(result.MarshalTo(nil))) + }) + + t.Run("Scalar", func(t *testing.T) { + data := astjson.MustParse(`{"id":"abc-123"}`) + result := template.resolveFieldValue(a, &Scalar{Path: []string{"id"}}, data) + require.NotNil(t, result) + assert.Equal(t, `"abc-123"`, string(result.MarshalTo(nil))) + }) + + t.Run("Integer", func(t *testing.T) { + data := astjson.MustParse(`{"age":42}`) + result := template.resolveFieldValue(a, &Integer{Path: []string{"age"}}, data) + require.NotNil(t, result) + assert.Equal(t, `42`, string(result.MarshalTo(nil))) + }) + + t.Run("Float", func(t *testing.T) { + data := astjson.MustParse(`{"price":19.99}`) + result := template.resolveFieldValue(a, &Float{Path: []string{"price"}}, data) + require.NotNil(t, result) + assert.Equal(t, `19.99`, string(result.MarshalTo(nil))) + }) + + t.Run("Boolean", func(t *testing.T) { + data := astjson.MustParse(`{"active":true}`) + result := template.resolveFieldValue(a, &Boolean{Path: []string{"active"}}, data) + require.NotNil(t, result) + assert.Equal(t, `true`, string(result.MarshalTo(nil))) + }) + + t.Run("Enum", func(t *testing.T) { + data := astjson.MustParse(`{"status":"ACTIVE"}`) + result := template.resolveFieldValue(a, &Enum{Path: []string{"status"}}, data) + require.NotNil(t, result) + assert.Equal(t, `"ACTIVE"`, string(result.MarshalTo(nil))) + }) + + t.Run("BigInt", func(t *testing.T) { + data := astjson.MustParse(`{"bigId":"9007199254740993"}`) + result := template.resolveFieldValue(a, &BigInt{Path: []string{"bigId"}}, data) + require.NotNil(t, result) + assert.Equal(t, `"9007199254740993"`, string(result.MarshalTo(nil))) + }) + + t.Run("CustomNode", func(t *testing.T) { + data := astjson.MustParse(`{"custom":"some-value"}`) + result := template.resolveFieldValue(a, &CustomNode{Path: []string{"custom"}}, data) + require.NotNil(t, result) + assert.Equal(t, `"some-value"`, string(result.MarshalTo(nil))) + }) + + t.Run("Object", func(t *testing.T) { + data := astjson.MustParse(`{"address":{"city":"Berlin","zip":"10115"}}`) + node := &Object{ + Path: []string{"address"}, + Fields: []*Field{ + {Name: []byte("city"), Value: &String{Path: []string{"city"}}}, + {Name: []byte("zip"), Value: &String{Path: []string{"zip"}}}, + }, + } + result := template.resolveFieldValue(a, node, data) + require.NotNil(t, result) + assert.Equal(t, `{"city":"Berlin","zip":"10115"}`, string(result.MarshalTo(nil))) + }) + + t.Run("Object skips __typename", func(t *testing.T) { + data := astjson.MustParse(`{"address":{"__typename":"Address","city":"Berlin"}}`) + node := &Object{ + Path: []string{"address"}, + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("city"), Value: &String{Path: []string{"city"}}}, + }, + } + result := template.resolveFieldValue(a, node, data) + require.NotNil(t, result) + assert.Equal(t, `{"city":"Berlin"}`, string(result.MarshalTo(nil))) + }) + + t.Run("Object returns nil for null data", func(t *testing.T) { + data := astjson.MustParse(`{"address":null}`) + node := &Object{ + Path: []string{"address"}, + Fields: []*Field{ + {Name: []byte("city"), Value: &String{Path: []string{"city"}}}, + }, + } + result := template.resolveFieldValue(a, node, data) + assert.Nil(t, result) + }) + + t.Run("Array", func(t *testing.T) { + data := astjson.MustParse(`{"tags":["go","graphql"]}`) + node := &Array{ + Path: []string{"tags"}, + Item: &String{}, + } + result := template.resolveFieldValue(a, node, data) + require.NotNil(t, result) + assert.Equal(t, `["go","graphql"]`, string(result.MarshalTo(nil))) + }) + + t.Run("Array returns nil for missing path", func(t *testing.T) { + data := astjson.MustParse(`{}`) + node := &Array{ + Path: []string{"tags"}, + Item: &String{}, + } + result := template.resolveFieldValue(a, node, data) + assert.Nil(t, result) + }) + + t.Run("missing path returns nil", func(t *testing.T) { + data := astjson.MustParse(`{}`) + result := template.resolveFieldValue(a, &String{Path: []string{"missing"}}, data) + assert.Nil(t, result) + }) + + t.Run("nested path", func(t *testing.T) { + data := astjson.MustParse(`{"a":{"b":{"c":"deep"}}}`) + result := template.resolveFieldValue(a, &String{Path: []string{"a", "b", "c"}}, data) + require.NotNil(t, result) + assert.Equal(t, `"deep"`, string(result.MarshalTo(nil))) + }) +} + +// TestRenderCacheKeys_BatchEntityKey verifies that list arguments in +// EntityKeyMappings expand into multiple cache keys (one per list item), +// enabling per-entity L2 lookups for batch root field queries. +func TestRenderCacheKeys_BatchEntityKey(t *testing.T) { + t.Run("list argument produces multiple cache keys", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "products"}}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "upc", ArgumentPath: []string{"upcs"}, ArgumentIsEntityKey: true}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"upcs":["p1","p2","p3"]}`), ctx: context.Background()} + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{nil}, "") + assert.NoError(t, err) + assert.Equal(t, []*CacheKey{ + {Keys: []string{`{"__typename":"Product","key":{"upc":"p1"}}`}, BatchIndex: 0}, + {Keys: []string{`{"__typename":"Product","key":{"upc":"p2"}}`}, BatchIndex: 1}, + {Keys: []string{`{"__typename":"Product","key":{"upc":"p3"}}`}, BatchIndex: 2}, + }, cacheKeys) + }) + + t.Run("empty list produces no cache keys", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "products"}}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "upc", ArgumentPath: []string{"upcs"}, ArgumentIsEntityKey: true}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"upcs":[]}`), ctx: context.Background()} + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{nil}, "") + assert.NoError(t, err) + assert.Equal(t, 0, len(cacheKeys)) + }) + + t.Run("single-element list produces one cache key", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "products"}}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "upc", ArgumentPath: []string{"upcs"}, ArgumentIsEntityKey: true}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"upcs":["p1"]}`), ctx: context.Background()} + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{nil}, "") + assert.NoError(t, err) + assert.Equal(t, []*CacheKey{ + {Keys: []string{`{"__typename":"Product","key":{"upc":"p1"}}`}, BatchIndex: 0}, + }, cacheKeys) + }) + + t.Run("scalar argument with ArgumentIsEntityKey falls back to single key", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "product"}, + Args: []FieldArgument{ + {Name: "upc", Variable: &ContextVariable{Path: []string{"upc"}, Renderer: NewCacheKeyVariableRenderer()}}, + }, + }, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "upc", ArgumentPath: []string{"upc"}, ArgumentIsEntityKey: true}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"upc":"p1"}`), ctx: context.Background()} + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{nil}, "") + assert.NoError(t, err) + // Falls back to non-batch path — uses renderDerivedEntityKey, same key format + assert.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, []string{`{"__typename":"Product","key":{"upc":"p1"}}`}, cacheKeys[0].Keys) + }) + + t.Run("batch key format matches scalar key format", func(t *testing.T) { + // Scalar lookup + scalarTmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "product"}, + Args: []FieldArgument{ + {Name: "upc", Variable: &ContextVariable{Path: []string{"upc"}, Renderer: NewCacheKeyVariableRenderer()}}, + }, + }, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "upc", ArgumentPath: []string{"upc"}}, + }, + }, + }, + } + + scalarCtx := &Context{Variables: astjson.MustParse(`{"upc":"p1"}`), ctx: context.Background()} + scalarKeys, err := scalarTmpl.RenderCacheKeys(nil, scalarCtx, []*astjson.Value{nil}, "") + assert.NoError(t, err) + + // Batch lookup + batchTmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "products"}}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "upc", ArgumentPath: []string{"upcs"}, ArgumentIsEntityKey: true}, + }, + }, + }, + } + + batchCtx := &Context{Variables: astjson.MustParse(`{"upcs":["p1"]}`), ctx: context.Background()} + batchKeys, err := batchTmpl.RenderCacheKeys(nil, batchCtx, []*astjson.Value{nil}, "") + assert.NoError(t, err) + + // Same cache key format — enables cache sharing between scalar and batch lookups + assert.Equal(t, scalarKeys[0].Keys[0], batchKeys[0].Keys[0]) + }) + + t.Run("null argument produces empty cache keys", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "products"}}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "upc", ArgumentPath: []string{"upcs"}, ArgumentIsEntityKey: true}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"upcs":null}`), ctx: context.Background()} + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{nil}, "") + assert.NoError(t, err) + assert.Equal(t, 0, len(cacheKeys)) + }) + + t.Run("list argument with prefix", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "products"}}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "upc", ArgumentPath: []string{"upcs"}, ArgumentIsEntityKey: true}, + }, + }, + }, + } + + ctx := &Context{Variables: astjson.MustParse(`{"upcs":["p1","p2"]}`), ctx: context.Background()} + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{nil}, "12345") + assert.NoError(t, err) + assert.Equal(t, []*CacheKey{ + {Keys: []string{`12345:{"__typename":"Product","key":{"upc":"p1"}}`}, BatchIndex: 0}, + {Keys: []string{`12345:{"__typename":"Product","key":{"upc":"p2"}}`}, BatchIndex: 1}, + }, cacheKeys) + }) + + t.Run("list argument with RemapVariables", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "products"}}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + // ArgumentPath uses the remapped variable name "a" + {EntityKeyField: "upc", ArgumentPath: []string{"a"}, ArgumentIsEntityKey: true}, + }, + }, + }, + } + + // Variables use original name "upcs", RemapVariables maps "a" → "upcs" + ctx := &Context{ + Variables: astjson.MustParse(`{"upcs":["p1","p2"]}`), + RemapVariables: map[string]string{"a": "upcs"}, + ctx: context.Background(), + } + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{nil}, "") + assert.NoError(t, err) + assert.Equal(t, []*CacheKey{ + {Keys: []string{`{"__typename":"Product","key":{"upc":"p1"}}`}, BatchIndex: 0}, + {Keys: []string{`{"__typename":"Product","key":{"upc":"p2"}}`}, BatchIndex: 1}, + }, cacheKeys) + }) + + t.Run("constructor precomputes batch entity key metadata", func(t *testing.T) { + tmpl := NewRootQueryCacheKeyTemplate( + []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "products"}}, + }, + []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "upc", ArgumentPath: []string{"upcs"}, ArgumentIsEntityKey: true}, + }, + }, + }, + ) + + assert.True(t, tmpl.batchEntityKeyPrecomputed) + assert.True(t, tmpl.hasBatchEntityKey) + assert.Equal(t, []string{"upcs"}, tmpl.batchEntityKeyArgumentPath) + assert.True(t, tmpl.HasBatchEntityKey()) + assert.Equal(t, []string{"upcs"}, tmpl.BatchEntityKeyArgumentPath()) + }) + + t.Run("batch entity key with RemapVariables produces per-element keys", func(t *testing.T) { + tmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + {Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "articles"}}, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Article", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"a"}, ArgumentIsEntityKey: true}, + }, + }, + }, + } + + // Variables use remapped name "a", original argument name is "ids" + ctx := &Context{ + Variables: astjson.MustParse(`{"ids":["1","2","3"]}`), + RemapVariables: map[string]string{"a": "ids"}, + ctx: context.Background(), + } + cacheKeys, err := tmpl.RenderCacheKeys(nil, ctx, []*astjson.Value{nil}, "") + assert.NoError(t, err) + assert.Equal(t, 3, len(cacheKeys)) + assert.Equal(t, []*CacheKey{ + {Keys: []string{`{"__typename":"Article","key":{"id":"1"}}`}, BatchIndex: 0}, + {Keys: []string{`{"__typename":"Article","key":{"id":"2"}}`}, BatchIndex: 1}, + {Keys: []string{`{"__typename":"Article","key":{"id":"3"}}`}, BatchIndex: 2}, + }, cacheKeys) + }) +} + +// TestEntityQueryCacheKeyTemplate_NumericKeyCoercion pins down the number→string +// coercion contract on the entity-data rendering path. The sibling paths +// (RootQueryCacheKeyTemplate.renderDerivedEntityKey / +// renderDerivedEntityKeyFromValue) coerce numeric @key values to strings via +// setNestedKey so that `{"id":1}` and `{"id":"1"}` share one cache entry. +// The entity-data path at caching.go:657 (EntityQueryCacheKeyTemplate. +// renderCacheKeys) must produce a byte-identical key for the same entity, +// otherwise the read path (derived key from args) and the write path +// (direct key from entity data) silently miss the cache. +func TestEntityQueryCacheKeyTemplate_NumericKeyCoercion(t *testing.T) { + t.Parallel() + + t.Run("flat numeric @key field is coerced to string", func(t *testing.T) { + t.Parallel() + tmpl := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("upc"), Value: &Scalar{Path: []string{"upc"}}}, + }, + }), + } + entity := astjson.MustParse(`{"__typename":"Product","upc":42,"name":"Widget"}`) + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + cacheKeys, err := tmpl.RenderCacheKeys(ar, nil, []*astjson.Value{entity}, "") + require.NoError(t, err) + require.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, + `{"__typename":"Product","key":{"upc":"42"}}`, + cacheKeys[0].Keys[0], + "numeric @key values read from entity data must be coerced to strings, matching the derived-key path") + }) + + t.Run("float @key field is coerced to string", func(t *testing.T) { + t.Parallel() + tmpl := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("price"), Value: &Scalar{Path: []string{"price"}}}, + }, + }), + } + entity := astjson.MustParse(`{"__typename":"Product","price":9.99}`) + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + cacheKeys, err := tmpl.RenderCacheKeys(ar, nil, []*astjson.Value{entity}, "") + require.NoError(t, err) + require.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, + `{"__typename":"Product","key":{"price":"9.99"}}`, + cacheKeys[0].Keys[0]) + }) + + t.Run("nested composite numeric @key is coerced at all levels", func(t *testing.T) { + t.Parallel() + // Composite @key: Store is keyed by location.id where location is a + // nested Object node in the template and id is numeric in the response. + tmpl := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + { + Name: []byte("location"), + Value: &Object{ + Path: []string{"location"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}}}, + }, + }, + }, + }, + }), + } + entity := astjson.MustParse(`{"__typename":"Store","location":{"id":7}}`) + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + cacheKeys, err := tmpl.RenderCacheKeys(ar, nil, []*astjson.Value{entity}, "") + require.NoError(t, err) + require.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, + `{"__typename":"Store","key":{"location":{"id":"7"}}}`, + cacheKeys[0].Keys[0], + "numeric scalars inside nested composite @key Objects must also be coerced") + }) + + t.Run("string @key field is unchanged", func(t *testing.T) { + t.Parallel() + // Regression guard: coercion must be a no-op for strings. + tmpl := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("upc"), Value: &String{Path: []string{"upc"}}}, + }, + }), + } + entity := astjson.MustParse(`{"__typename":"Product","upc":"42"}`) + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + cacheKeys, err := tmpl.RenderCacheKeys(ar, nil, []*astjson.Value{entity}, "") + require.NoError(t, err) + require.Equal(t, 1, len(cacheKeys)) + assert.Equal(t, + `{"__typename":"Product","key":{"upc":"42"}}`, + cacheKeys[0].Keys[0]) + }) +} + +// TestCacheKeyPathSymmetry_NumericKeys verifies that the read-path key (derived +// from request args via RootQueryCacheKeyTemplate) and the write-path key +// (derived from entity data via EntityQueryCacheKeyTemplate) are byte-identical +// when the @key values are numeric. Without coercion on both sides, these +// paths silently produce different keys for the same logical entity, causing +// every write to miss every subsequent read. +func TestCacheKeyPathSymmetry_NumericKeys(t *testing.T) { + t.Parallel() + + // Read path: RootQueryCacheKeyTemplate reading args → derived entity key. + readTmpl := &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "product"}, + ResponseKey: "product", + Args: []FieldArgument{ + {Name: "upc", Variable: &ContextVariable{Path: []string{"upc"}, Renderer: NewCacheKeyVariableRenderer()}}, + }, + }, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "upc", ArgumentPath: []string{"upc"}}, + }, + }, + }, + } + + // Write path: EntityQueryCacheKeyTemplate reading entity data → entity key. + writeTmpl := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("upc"), Value: &Scalar{Path: []string{"upc"}}}, + }, + }), + } + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + + // Same logical entity: upc = 42 (number). + ctx := &Context{Variables: astjson.MustParse(`{"upc":42}`), ctx: context.Background()} + readKeys, err := readTmpl.RenderCacheKeys(ar, ctx, []*astjson.Value{astjson.MustParse(`{}`)}, "") + require.NoError(t, err) + require.Equal(t, 1, len(readKeys)) + + entity := astjson.MustParse(`{"__typename":"Product","upc":42}`) + writeKeys, err := writeTmpl.RenderCacheKeys(ar, nil, []*astjson.Value{entity}, "") + require.NoError(t, err) + require.Equal(t, 1, len(writeKeys)) + + assert.Equal(t, readKeys[0].Keys[0], writeKeys[0].Keys[0], + "read path (from args) and write path (from entity data) must produce identical keys for the same entity; otherwise reads silently miss writes") +} diff --git a/v2/pkg/engine/resolve/cache_load_test.go b/v2/pkg/engine/resolve/cache_load_test.go new file mode 100644 index 0000000000..54d2ede784 --- /dev/null +++ b/v2/pkg/engine/resolve/cache_load_test.go @@ -0,0 +1,3265 @@ +package resolve + +import ( + "context" + "strings" + "sync" + "testing" + "testing/synctest" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/fastjsonext" +) + +// Verifies L2 cache loading for a nested entity graph (products -> reviews -> users). +// Tests that cached entity values are correctly merged into the response at the right paths. +func TestCacheLoad_NestedProductsFromL2(t *testing.T) { + t.Run("products with reviews - nested products from cache", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + // Products datasource - returns list of products + productsDS := NewMockDataSource(ctrl) + productsDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + expected := `{"method":"POST","url":"http://products.service","body":{"query":"{topProducts {__typename id name}}"}}` + assert.Equal(t, expected, string(input)) + return []byte(`{"data":{"topProducts":[{"__typename":"Product","id":"prod-1","name":"Product One"},{"__typename":"Product","id":"prod-2","name":"Product Two"}]}}`), nil + }).Times(1) + + // Reviews datasource - returns reviews for products (batch entity fetch) + reviewsDS := NewMockDataSource(ctrl) + reviewsDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + // This is a batch entity fetch for reviews based on product references + return []byte(`{"data":{"_entities":[{"__typename":"Product","reviews":[{"body":"Great product!","product":{"__typename":"Product","id":"prod-1"}},{"body":"Love it!","product":{"__typename":"Product","id":"prod-1"}}]},{"__typename":"Product","reviews":[{"body":"Awesome!","product":{"__typename":"Product","id":"prod-2"}}]}]}}`), nil + }).Times(1) + + // Nested products datasource - should NOT be called if caching works + // We create it but set Times(0) to ensure it's never called + nestedProductsDS := NewMockDataSource(ctrl) + nestedProductsDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Times(0) // This should never be called - products should come from cache + + // Build the fetch tree + // 1. Root fetch: topProducts + // 2. Sequential: fetch reviews for each product (batch) + // 3. Sequential: fetch nested product (should be from cache) + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + { + Name: []byte("__typename"), + Value: &String{ + Path: []string{"__typename"}, + }, + }, + { + Name: []byte("id"), + Value: &String{ + Path: []string{"id"}, + }, + }, + }, + }), + } + + // ProvidesData for nested product fetch - what data the cache should have + nestedProductProvidesData := &Object{ + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Scalar{ + Path: []string{"id"}, + Nullable: false, + }, + }, + { + Name: []byte("name"), + Value: &Scalar{ + Path: []string{"name"}, + Nullable: false, + }, + }, + }, + } + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Fetches: Sequence( + // Step 1: Fetch top products + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: productsDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://products.service","body":{"query":"{topProducts {__typename id name}}"}}`), + SegmentType: StaticSegmentType, + }, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + + // Step 2: Fetch reviews for each product (batch entity fetch) + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://reviews.service","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){__typename ... on Product {reviews {body product {__typename id}}}}}","variables":{"representations":[`), + SegmentType: StaticSegmentType, + }, + }, + }, + Items: []InputTemplate{ + { + Segments: []TemplateSegment{ + { + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + { + Name: []byte("__typename"), + Value: &String{ + Path: []string{"__typename"}, + }, + }, + { + Name: []byte("id"), + Value: &String{ + Path: []string{"id"}, + }, + }, + }, + }), + }, + }, + }, + }, + Separator: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`,`), + SegmentType: StaticSegmentType, + }, + }, + }, + Footer: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`]}}}`), + SegmentType: StaticSegmentType, + }, + }, + }, + }, + DataSource: reviewsDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities"}, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.topProducts", ArrayPath("topProducts")), + + // Step 3: Fetch nested products (should be from cache) + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://products.service","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){__typename ... on Product {id name}}}","variables":{"representations":[`), + SegmentType: StaticSegmentType, + }, + }, + }, + Items: []InputTemplate{ + { + Segments: []TemplateSegment{ + { + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + { + Name: []byte("__typename"), + Value: &String{ + Path: []string{"__typename"}, + }, + }, + { + Name: []byte("id"), + Value: &String{ + Path: []string{"id"}, + }, + }, + }, + }), + }, + }, + }, + }, + Separator: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`,`), + SegmentType: StaticSegmentType, + }, + }, + }, + Footer: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`]}}}`), + SegmentType: StaticSegmentType, + }, + }, + }, + }, + DataSource: nestedProductsDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities"}, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: nestedProductProvidesData, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + }, + }, "query.topProducts.reviews.product", ArrayPath("topProducts"), ArrayPath("reviews"), ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("topProducts"), + Value: &Array{ + Path: []string{"topProducts"}, + Item: &Object{ + Fields: []*Field{ + { + Name: []byte("id"), + Value: &String{ + Path: []string{"id"}, + }, + }, + { + Name: []byte("name"), + Value: &String{ + Path: []string{"name"}, + }, + }, + { + Name: []byte("reviews"), + Value: &Array{ + Path: []string{"reviews"}, + Item: &Object{ + Fields: []*Field{ + { + Name: []byte("body"), + Value: &String{ + Path: []string{"body"}, + }, + }, + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Fields: []*Field{ + { + Name: []byte("id"), + Value: &String{ + Path: []string{"id"}, + }, + }, + { + Name: []byte("name"), + Value: &String{ + Path: []string{"name"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + // Pre-populate cache with product data (simulating what would happen + // if we had caching enabled on the root products fetch) + // In the real implementation, the first products fetch should cache these + prod1Data := `{"__typename":"Product","id":"prod-1","name":"Product One"}` + prod2Data := `{"__typename":"Product","id":"prod-2","name":"Product Two"}` + + err := cache.Set(context.Background(), withCacheEntryTTL([]*CacheEntry{ + {Key: `{"__typename":"Product","key":{"id":"prod-1"}}`, Value: []byte(prod1Data)}, + {Key: `{"__typename":"Product","key":{"id":"prod-2"}}`, Value: []byte(prod2Data)}, + }, 30*time.Second)) + require.NoError(t, err) + + cache.ClearLog() // Clear log after pre-population + + // Create loader with cache + loader := &Loader{ + caches: map[string]LoaderCache{ + "default": cache, + }, + } + + ctx := NewContext(context.Background()) + // Disable subgraph request deduplication to avoid needing singleFlight + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + // Create resolvable with arena + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err = resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + // Execute + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + // Output for debugging + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + t.Logf("Output: %s", out) + + // Verify cache operations + cacheLog := cache.GetLog() + t.Logf("Cache log: %+v", cacheLog) + + // We expect: + // 1. A "get" operation for the nested product cache keys (should be hits) + // The nestedProductsDS.Load should NOT have been called (Times(0)) + + // Find the get operation for product cache keys + foundCacheGet := false + for _, entry := range cacheLog { + if entry.Operation == "get" { + foundCacheGet = true + // Check if we have cache hits + for _, item := range entry.Items { + t.Logf("Cache key %s: hit=%v", item.Key, item.Hit) + } + } + } + + assert.True(t, foundCacheGet, "Expected cache get operation for nested products") + }) +} + +// Verifies L2 cache hit for a single entity fetch - the simplest cache load path. +func TestCacheLoad_SingleEntityHit(t *testing.T) { + t.Run("single entity fetch with cache hit", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + // Pre-populate cache + productData := `{"__typename":"Product","id":"prod-1","name":"Cached Product"}` + err := cache.Set(context.Background(), withCacheEntryTTL([]*CacheEntry{ + {Key: `{"__typename":"Product","key":{"id":"prod-1"}}`, Value: []byte(productData)}, + }, 30*time.Second)) + require.NoError(t, err) + cache.ClearLog() + + // Create a datasource that should NOT be called (cache hit) + productDS := NewMockDataSource(ctrl) + productDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Times(0) // Should never be called - we expect cache hit + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + { + Name: []byte("__typename"), + Value: &String{ + Path: []string{"__typename"}, + }, + }, + { + Name: []byte("id"), + Value: &String{ + Path: []string{"id"}, + }, + }, + }, + }), + } + + providesData := &Object{ + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Scalar{ + Path: []string{"id"}, + Nullable: false, + }, + }, + { + Name: []byte("name"), + Value: &Scalar{ + Path: []string{"name"}, + Nullable: false, + }, + }, + }, + } + + // Create a simple root response to give us initial data + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Fetches: Sequence( + // Root fetch to get product reference + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{product {__typename id}}"}}`), + SegmentType: StaticSegmentType, + }, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + + // Entity fetch with caching - should hit cache + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: productDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://products.service","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){__typename ... on Product {id name}}}","variables":{"representations":[`), + SegmentType: StaticSegmentType, + }, + { + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + { + Name: []byte("__typename"), + Value: &String{ + Path: []string{"__typename"}, + }, + }, + { + Name: []byte("id"), + Value: &String{ + Path: []string{"id"}, + }, + }, + }, + }), + }, + { + Data: []byte(`]}}}`), + SegmentType: StaticSegmentType, + }, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Fields: []*Field{ + { + Name: []byte("id"), + Value: &String{ + Path: []string{"id"}, + }, + }, + { + Name: []byte("name"), + Value: &String{ + Path: []string{"name"}, + }, + }, + }, + }, + }, + }, + }, + } + + // Create loader with cache + loader := &Loader{ + caches: map[string]LoaderCache{ + "default": cache, + }, + } + + ctx := NewContext(context.Background()) + // Disable subgraph request deduplication to avoid needing singleFlight + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + // Create resolvable with arena + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err = resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + // Execute + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + // Output for debugging + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + t.Logf("Output: %s", out) + + // Verify cache operations + cacheLog := cache.GetLog() + t.Logf("Cache log: %+v", cacheLog) + + // We expect at least one cache get that should be a hit + foundCacheHit := false + for _, entry := range cacheLog { + if entry.Operation == "get" { + for _, item := range entry.Items { + t.Logf("Cache key %s: hit=%v", item.Key, item.Hit) + if item.Hit { + foundCacheHit = true + } + } + } + } + + assert.True(t, foundCacheHit, "Expected at least one cache hit") + }) + + t.Run("single entity fetch with cache miss", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + // Cache is empty - expect cache miss + + // Create a datasource that SHOULD be called (cache miss) + productDS := NewMockDataSource(ctrl) + productDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Fetched Product"}]}}`), nil + }).Times(1) + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + { + Name: []byte("__typename"), + Value: &String{ + Path: []string{"__typename"}, + }, + }, + { + Name: []byte("id"), + Value: &String{ + Path: []string{"id"}, + }, + }, + }, + }), + } + + providesData := &Object{ + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Scalar{ + Path: []string{"id"}, + Nullable: false, + }, + }, + { + Name: []byte("name"), + Value: &Scalar{ + Path: []string{"name"}, + Nullable: false, + }, + }, + }, + } + + // Create a simple root response to give us initial data + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Fetches: Sequence( + // Root fetch to get product reference + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{product {__typename id}}"}}`), + SegmentType: StaticSegmentType, + }, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + + // Entity fetch with caching - should miss cache and fetch + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: productDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://products.service","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){__typename ... on Product {id name}}}","variables":{"representations":[`), + SegmentType: StaticSegmentType, + }, + { + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + { + Name: []byte("__typename"), + Value: &String{ + Path: []string{"__typename"}, + }, + }, + { + Name: []byte("id"), + Value: &String{ + Path: []string{"id"}, + }, + }, + }, + }), + }, + { + Data: []byte(`]}}}`), + SegmentType: StaticSegmentType, + }, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Fields: []*Field{ + { + Name: []byte("id"), + Value: &String{ + Path: []string{"id"}, + }, + }, + { + Name: []byte("name"), + Value: &String{ + Path: []string{"name"}, + }, + }, + }, + }, + }, + }, + }, + } + + // Create loader with cache + loader := &Loader{ + caches: map[string]LoaderCache{ + "default": cache, + }, + } + + ctx := NewContext(context.Background()) + // Disable subgraph request deduplication to avoid needing singleFlight + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + // Create resolvable with arena + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + // Execute + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + // Output for debugging + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + t.Logf("Output: %s", out) + + // Verify cache operations + cacheLog := cache.GetLog() + t.Logf("Cache log: %+v", cacheLog) + + // We expect: + // 1. A "get" operation that misses + // 2. A "set" operation to cache the result + foundCacheGet := false + foundCacheSet := false + for _, entry := range cacheLog { + if entry.Operation == "get" { + foundCacheGet = true + // Verify it's a miss + for _, item := range entry.Items { + t.Logf("Cache key %s: hit=%v", item.Key, item.Hit) + assert.False(t, item.Hit, "Expected cache miss") + } + } + if entry.Operation == "set" { + foundCacheSet = true + t.Logf("Cache set items: %v", entry.Items) + } + } + + assert.True(t, foundCacheGet, "Expected cache get operation") + assert.True(t, foundCacheSet, "Expected cache set operation after miss") + }) +} + +// Verifies the L2 miss-then-hit lifecycle: first call populates cache, second call reads from it. +func TestCacheLoad_SequentialMissThenHit(t *testing.T) { + t.Run("two sequential calls - miss then hit", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + // Cache is empty - no pre-population + + // Create a datasource that should be called exactly ONCE (first call = miss) + productDS := NewMockDataSource(ctrl) + productDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Fetched Product"}]}}`), nil + }).Times(1) // Only called once - second call should hit cache + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + { + Name: []byte("__typename"), + Value: &String{ + Path: []string{"__typename"}, + }, + }, + { + Name: []byte("id"), + Value: &String{ + Path: []string{"id"}, + }, + }, + }, + }), + } + + providesData := &Object{ + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Scalar{ + Path: []string{"id"}, + Nullable: false, + }, + }, + { + Name: []byte("name"), + Value: &Scalar{ + Path: []string{"name"}, + Nullable: false, + }, + }, + }, + } + + // Root datasource - will be called twice (once per execution) + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(2) // Called for each execution + + buildResponse := func() *GraphQLResponse { + return &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{product {__typename id}}"}}`), + SegmentType: StaticSegmentType, + }, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: productDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://products.service","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){__typename ... on Product {id name}}}","variables":{"representations":[`), + SegmentType: StaticSegmentType, + }, + { + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + { + Name: []byte("__typename"), + Value: &String{ + Path: []string{"__typename"}, + }, + }, + { + Name: []byte("id"), + Value: &String{ + Path: []string{"id"}, + }, + }, + }, + }), + }, + { + Data: []byte(`]}}}`), + SegmentType: StaticSegmentType, + }, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Fields: []*Field{ + { + Name: []byte("id"), + Value: &String{ + Path: []string{"id"}, + }, + }, + { + Name: []byte("name"), + Value: &String{ + Path: []string{"name"}, + }, + }, + }, + }, + }, + }, + }, + } + } + + // Shared loader with cache + loader := &Loader{ + caches: map[string]LoaderCache{ + "default": cache, + }, + } + + // === First execution: expect cache MISS === + t.Log("=== First execution (expect cache miss) ===") + + ctx1 := NewContext(context.Background()) + ctx1.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx1.ExecutionOptions.Caching.EnableL2Cache = true + + ar1 := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable1 := NewResolvable(ar1, ResolvableOptions{}) + err := resolvable1.Init(ctx1, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + response1 := buildResponse() + err = loader.LoadGraphQLResponseData(ctx1, response1, resolvable1) + require.NoError(t, err) + + out1 := fastjsonext.PrintGraphQLResponse(resolvable1.data, resolvable1.errors) + t.Logf("First output: %s", out1) + + // Verify first call had cache miss and set + cacheLog1 := cache.GetLog() + t.Logf("Cache log after first call: %+v", cacheLog1) + + var firstGetHits []bool + foundFirstGet := false + foundFirstSet := false + for _, entry := range cacheLog1 { + if entry.Operation == "get" { + foundFirstGet = true + firstGetHits = make([]bool, 0, len(entry.Items)) + for _, item := range entry.Items { + firstGetHits = append(firstGetHits, item.Hit) + t.Logf("First call - Cache key %s: hit=%v", item.Key, item.Hit) + } + } + if entry.Operation == "set" { + foundFirstSet = true + } + } + + assert.True(t, foundFirstGet, "Expected cache get operation on first call") + assert.True(t, foundFirstSet, "Expected cache set operation on first call (after miss)") + require.Len(t, firstGetHits, 1, "Expected exactly one cache key") + assert.False(t, firstGetHits[0], "Expected cache MISS on first call") + + // Clear log for second execution + cache.ClearLog() + + // === Second execution: expect cache HIT === + t.Log("=== Second execution (expect cache hit) ===") + + ctx2 := NewContext(context.Background()) + ctx2.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx2.ExecutionOptions.Caching.EnableL2Cache = true + + ar2 := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable2 := NewResolvable(ar2, ResolvableOptions{}) + err = resolvable2.Init(ctx2, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + response2 := buildResponse() + err = loader.LoadGraphQLResponseData(ctx2, response2, resolvable2) + require.NoError(t, err) + + out2 := fastjsonext.PrintGraphQLResponse(resolvable2.data, resolvable2.errors) + t.Logf("Second output: %s", out2) + + // Verify second call had cache hit (no set) + cacheLog2 := cache.GetLog() + t.Logf("Cache log after second call: %+v", cacheLog2) + + var secondGetHits []bool + foundSecondGet := false + foundSecondSet := false + for _, entry := range cacheLog2 { + if entry.Operation == "get" { + foundSecondGet = true + secondGetHits = make([]bool, 0, len(entry.Items)) + for _, item := range entry.Items { + secondGetHits = append(secondGetHits, item.Hit) + t.Logf("Second call - Cache key %s: hit=%v", item.Key, item.Hit) + } + } + if entry.Operation == "set" { + foundSecondSet = true + } + } + + assert.True(t, foundSecondGet, "Expected cache get operation on second call") + assert.False(t, foundSecondSet, "Expected NO cache set on second call (cache hit)") + require.Len(t, secondGetHits, 1, "Expected exactly one cache key") + assert.True(t, secondGetHits[0], "Expected cache HIT on second call") + + // Verify both outputs are identical + assert.Equal(t, out1, out2, "Both executions should produce identical output") + }) +} + +// Testing utilities + +// CacheLogItem is one key touched by a cache operation. +// Field meaning depends on Operation: +// - "get": Key + Hit are populated; TTL is unused. +// - "set": Key + TTL are populated; Hit is unused. +// - "delete": only Key is populated. +type CacheLogItem struct { + Key string + Hit bool + TTL time.Duration +} + +// CacheLogEntry tracks a cache operation for testing. +type CacheLogEntry struct { + Operation string + Items []CacheLogItem +} + +type cacheEntry struct { + data []byte + expiresAt *time.Time +} + +func withCacheEntryTTL(entries []*CacheEntry, ttl time.Duration) []*CacheEntry { + for _, entry := range entries { + if entry != nil { + entry.TTL = ttl + } + } + return entries +} + +// FakeLoaderCache is an in-memory cache implementation for testing +type FakeLoaderCache struct { + mu sync.RWMutex + storage map[string]cacheEntry + log []CacheLogEntry +} + +func NewFakeLoaderCache() *FakeLoaderCache { + return &FakeLoaderCache{ + storage: make(map[string]cacheEntry), + log: make([]CacheLogEntry, 0), + } +} + +func (f *FakeLoaderCache) cleanupExpired() { + now := time.Now() + for key, entry := range f.storage { + if entry.expiresAt != nil && now.After(*entry.expiresAt) { + delete(f.storage, key) + } + } +} + +func (f *FakeLoaderCache) Get(ctx context.Context, keys []string) ([]*CacheEntry, error) { + f.mu.Lock() + defer f.mu.Unlock() + + // Clean up expired entries before executing command + f.cleanupExpired() + + items := make([]CacheLogItem, len(keys)) + result := make([]*CacheEntry, len(keys)) + for i, key := range keys { + items[i].Key = key + if entry, exists := f.storage[key]; exists { + // Make a copy of the data to prevent external modifications + dataCopy := make([]byte, len(entry.data)) + copy(dataCopy, entry.data) + ce := &CacheEntry{ + Key: key, + Value: dataCopy, + } + // Populate RemainingTTL from expiresAt for cache age analytics + if entry.expiresAt != nil { + remaining := time.Until(*entry.expiresAt) + if remaining > 0 { + ce.RemainingTTL = remaining + } + } + result[i] = ce + items[i].Hit = true + } else { + result[i] = nil + } + } + + // Log the operation + f.log = append(f.log, CacheLogEntry{ + Operation: "get", + Items: items, + }) + + return result, nil +} + +func (f *FakeLoaderCache) Set(ctx context.Context, entries []*CacheEntry) error { + if len(entries) == 0 { + return nil + } + + f.mu.Lock() + defer f.mu.Unlock() + + // Clean up expired entries before executing command + f.cleanupExpired() + + items := make([]CacheLogItem, 0, len(entries)) + for _, entry := range entries { + if entry == nil { + continue + } + ce := cacheEntry{ + // Make a copy of the data to prevent external modifications + data: make([]byte, len(entry.Value)), + } + copy(ce.data, entry.Value) + + // Non-positive TTLs use the fake cache's no-expiration default. + if entry.TTL > 0 { + expiresAt := time.Now().Add(entry.TTL) + ce.expiresAt = &expiresAt + } + + f.storage[entry.Key] = ce + items = append(items, CacheLogItem{Key: entry.Key, TTL: entry.TTL}) + } + + // Log the operation + f.log = append(f.log, CacheLogEntry{ + Operation: "set", + Items: items, + }) + + return nil +} + +func (f *FakeLoaderCache) Delete(ctx context.Context, keys []string) error { + f.mu.Lock() + defer f.mu.Unlock() + + // Clean up expired entries before executing command + f.cleanupExpired() + + for _, key := range keys { + delete(f.storage, key) + } + items := make([]CacheLogItem, len(keys)) + for i, key := range keys { + items[i] = CacheLogItem{Key: key} + } + + // Log the operation + f.log = append(f.log, CacheLogEntry{ + Operation: "delete", + Items: items, + }) + + return nil +} + +// GetLog returns a copy of the cache operation log +func (f *FakeLoaderCache) GetLog() []CacheLogEntry { + f.mu.RLock() + defer f.mu.RUnlock() + logCopy := make([]CacheLogEntry, len(f.log)) + copy(logCopy, f.log) + return logCopy +} + +// ClearLog clears the cache operation log +func (f *FakeLoaderCache) ClearLog() { + f.mu.Lock() + defer f.mu.Unlock() + f.log = make([]CacheLogEntry, 0) +} + +// GetValue returns the raw cached value for a key, or nil if not found. +func (f *FakeLoaderCache) GetValue(key string) []byte { + f.mu.RLock() + defer f.mu.RUnlock() + if entry, exists := f.storage[key]; exists { + dataCopy := make([]byte, len(entry.data)) + copy(dataCopy, entry.data) + return dataCopy + } + return nil +} + +// Clear removes all entries from the cache +func (f *FakeLoaderCache) Clear() { + f.mu.Lock() + defer f.mu.Unlock() + f.storage = make(map[string]cacheEntry) +} + +// SetRawData directly injects data into the cache for testing purposes. +// This bypasses the normal Set path and allows injecting stale/modified data. +func (f *FakeLoaderCache) SetRawData(key string, value []byte, ttl time.Duration) { + f.mu.Lock() + defer f.mu.Unlock() + ce := cacheEntry{ + data: make([]byte, len(value)), + } + copy(ce.data, value) + if ttl > 0 { + expiresAt := time.Now().Add(ttl) + ce.expiresAt = &expiresAt + } + f.storage[key] = ce +} + +// ============================================================================= +// Shadow Mode Integration Tests +// ============================================================================= + +// normalizeCacheAnalyticsSnapshot zeroes out non-deterministic fields (FetchTimings.DurationMs) +// and normalizes empty slices to nil for consistent assert.Equal comparison. +// CacheAgeMs is deterministic when tests run inside synctest.Test (fake clock). +func normalizeCacheAnalyticsSnapshot(snap CacheAnalyticsSnapshot) CacheAnalyticsSnapshot { + // Zero out non-deterministic FetchTimings (DurationMs varies between runs) + snap.FetchTimings = nil + + // Normalize empty slices to nil + if len(snap.L1Reads) == 0 { + snap.L1Reads = nil + } + if len(snap.L2Reads) == 0 { + snap.L2Reads = nil + } + if len(snap.L1Writes) == 0 { + snap.L1Writes = nil + } + if len(snap.L2Writes) == 0 { + snap.L2Writes = nil + } + if len(snap.ErrorEvents) == 0 { + snap.ErrorEvents = nil + } + if len(snap.FieldHashes) == 0 { + snap.FieldHashes = nil + } + if len(snap.EntityTypes) == 0 { + snap.EntityTypes = nil + } + if len(snap.ShadowComparisons) == 0 { + snap.ShadowComparisons = nil + } + + return snap +} + +const ( + shadowTestKeyProduct = `{"__typename":"Product","key":{"id":"prod-1"}}` + shadowTestKeyUser = `{"__typename":"User","key":{"id":"u1"}}` +) + +// Verifies that shadow mode always fetches from the subgraph even when L2 has data. +// Shadow mode exists for staleness detection without serving potentially stale cached data. +func TestShadowMode_L2_AlwaysFetches(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + // Root fetch (not cached) + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(2) // called twice (once per request) + + // Entity fetch - called BOTH times (shadow mode prevents cache serving) + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil + }).Times(2) // called twice because shadow mode + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + }, + } + + buildResponse := func() *GraphQLResponse { + return &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{product {__typename id}}"}}`), SegmentType: StaticSegmentType}, + }}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities", "0"}}, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + ShadowMode: true, + KeyFields: []KeyField{{Name: "id"}}, + }, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://products.service","body":{"query":"...","variables":{"representations":[`), SegmentType: StaticSegmentType}, + {SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + })}, + {Data: []byte(`]}}}`), SegmentType: StaticSegmentType}, + }}, + Info: &FetchInfo{ + DataSourceID: "products", DataSourceName: "products", + RootFields: []GraphCoordinate{{TypeName: "Product", FieldName: "name"}}, + OperationType: ast.OperationTypeQuery, ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{{ + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }, + }, + }}, + }, + } + } + + // Request 1: L2 miss -> DataSource called -> L2 populated + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx1 := NewContext(context.Background()) + ctx1.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx1.ExecutionOptions.Caching.EnableL1Cache = true + ctx1.ExecutionOptions.Caching.EnableL2Cache = true + ctx1.ExecutionOptions.Caching.EnableCacheAnalytics = true + + ar1 := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable1 := NewResolvable(ar1, ResolvableOptions{}) + err := resolvable1.Init(ctx1, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx1, buildResponse(), resolvable1) + require.NoError(t, err) + + out1 := fastjsonext.PrintGraphQLResponse(resolvable1.data, resolvable1.errors) + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1","name":"Product One"}}}`, out1) + + assert.Equal(t, normalizeCacheAnalyticsSnapshot(CacheAnalyticsSnapshot{ + L1Reads: []CacheKeyEvent{ + {CacheKey: shadowTestKeyProduct, EntityType: "Product", Kind: CacheKeyMiss, DataSource: "products"}, // First request, L1 is empty + }, + L2Reads: []CacheKeyEvent{ + {CacheKey: shadowTestKeyProduct, EntityType: "Product", Kind: CacheKeyMiss, DataSource: "products", Shadow: true}, // First request, L2 is empty; Shadow marks shadow-mode fetch + }, + L1Writes: []CacheWriteEvent{ + {CacheKey: shadowTestKeyProduct, EntityType: "Product", ByteSize: 59, DataSource: "products", CacheLevel: CacheLevelL1, Source: CacheSourceQuery}, // Miss triggered subgraph fetch, result written to L1 + }, + L2Writes: []CacheWriteEvent{ + {CacheKey: shadowTestKeyProduct, EntityType: "Product", ByteSize: 59, DataSource: "products", CacheLevel: CacheLevelL2, TTL: 30 * time.Second, Source: CacheSourceQuery}, // Miss triggered subgraph fetch, result written to L2 + }, + }), normalizeCacheAnalyticsSnapshot(ctx1.GetCacheStats())) + + // Advance fake clock by 5s so Request 2's L2 hit has a measurable CacheAgeMs + time.Sleep(5 * time.Second) + + // Request 2: L2 hit (shadow) -> DataSource STILL called + loader2 := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx2 := NewContext(context.Background()) + ctx2.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx2.ExecutionOptions.Caching.EnableL1Cache = true + ctx2.ExecutionOptions.Caching.EnableL2Cache = true + ctx2.ExecutionOptions.Caching.EnableCacheAnalytics = true + + ar2 := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable2 := NewResolvable(ar2, ResolvableOptions{}) + err = resolvable2.Init(ctx2, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader2.LoadGraphQLResponseData(ctx2, buildResponse(), resolvable2) + require.NoError(t, err) + + out2 := fastjsonext.PrintGraphQLResponse(resolvable2.data, resolvable2.errors) + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1","name":"Product One"}}}`, out2) + + assert.Equal(t, normalizeCacheAnalyticsSnapshot(CacheAnalyticsSnapshot{ + L1Reads: []CacheKeyEvent{ + {CacheKey: shadowTestKeyProduct, EntityType: "Product", Kind: CacheKeyMiss, DataSource: "products"}, // New Loader instance, L1 is per-request and empty + }, + L2Reads: []CacheKeyEvent{ + {CacheKey: shadowTestKeyProduct, EntityType: "Product", Kind: CacheKeyHit, DataSource: "products", ByteSize: 59, Shadow: true, CacheAgeMs: 5000}, // L2 populated by Request 1, 5s ago; Shadow=true so subgraph is still fetched + }, + L1Writes: []CacheWriteEvent{ + {CacheKey: shadowTestKeyProduct, EntityType: "Product", ByteSize: 59, DataSource: "products", CacheLevel: CacheLevelL1, Source: CacheSourceQuery}, // Written from subgraph response (shadow mode always fetches) + }, + L2Writes: []CacheWriteEvent{ + {CacheKey: shadowTestKeyProduct, EntityType: "Product", ByteSize: 59, DataSource: "products", CacheLevel: CacheLevelL2, TTL: 30 * time.Second, Source: CacheSourceQuery}, // Overwritten in L2 with fresh subgraph response + }, + ShadowComparisons: []ShadowComparisonEvent{ + {CacheKey: shadowTestKeyProduct, EntityType: "Product", IsFresh: true, CachedHash: 16331343294028781429, FreshHash: 16331343294028781429, CachedBytes: 36, FreshBytes: 36, DataSource: "products", ConfiguredTTL: 30 * time.Second, CacheAgeMs: 5000}, // Cached data matches subgraph (same hash), no staleness; entry was 5s old + }, + FieldHashes: []EntityFieldHash{ + {EntityType: "Product", FieldName: "id", FieldHash: 4016270444951293489, KeyRaw: `{"id":"prod-1"}`, Source: FieldSourceShadowCached}, // Cached "id" field from shadow comparison + {EntityType: "Product", FieldName: "name", FieldHash: 8385814294091472045, KeyRaw: `{"id":"prod-1"}`, Source: FieldSourceShadowCached}, // Cached "name" field from shadow comparison + }, + }), normalizeCacheAnalyticsSnapshot(ctx2.GetCacheStats())) + }) +} + +// Verifies that shadow mode records staleness comparison events when cached data +// differs from fresh subgraph data. +func TestShadowMode_StalenessDetection(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"user":{"__typename":"User","id":"u1"}}}`), nil + }).Times(2) + + entityDS := NewMockDataSource(ctrl) + // First call returns "Alice" + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"User","id":"u1","username":"Alice"}]}}`), nil + }).Times(1) + // Second call returns "AliceUpdated" (subgraph data changed) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"User","id":"u1","username":"AliceUpdated"}]}}`), nil + }).Times(1) + + userCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("username"), Value: &Scalar{Path: []string{"username"}, Nullable: false}}, + }, + } + + buildResponse := func() *GraphQLResponse { + return &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities", "0"}}, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: userCacheKeyTemplate, + UseL1Cache: true, + ShadowMode: true, + KeyFields: []KeyField{{Name: "id"}}, + }, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://accounts.service","body":{"query":"...","variables":{"representations":[`), SegmentType: StaticSegmentType}, + {SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + })}, + {Data: []byte(`]}}}`), SegmentType: StaticSegmentType}, + }}, + Info: &FetchInfo{ + DataSourceID: "accounts", DataSourceName: "accounts", + RootFields: []GraphCoordinate{{TypeName: "User", FieldName: "username"}}, + OperationType: ast.OperationTypeQuery, ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.user", ObjectPath("user")), + ), + Data: &Object{ + Fields: []*Field{{ + Name: []byte("user"), + Value: &Object{ + Path: []string{"user"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("username"), Value: &String{Path: []string{"username"}}}, + }, + }, + }}, + }, + } + } + + // Request 1: Populate L2 cache with "Alice" + loader1 := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx1 := NewContext(context.Background()) + ctx1.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx1.ExecutionOptions.Caching.EnableL1Cache = true + ctx1.ExecutionOptions.Caching.EnableL2Cache = true + ctx1.ExecutionOptions.Caching.EnableCacheAnalytics = true + + ar1 := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable1 := NewResolvable(ar1, ResolvableOptions{}) + err := resolvable1.Init(ctx1, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader1.LoadGraphQLResponseData(ctx1, buildResponse(), resolvable1) + require.NoError(t, err) + + assert.Equal(t, normalizeCacheAnalyticsSnapshot(CacheAnalyticsSnapshot{ + L1Reads: []CacheKeyEvent{ + {CacheKey: shadowTestKeyUser, EntityType: "User", Kind: CacheKeyMiss, DataSource: "accounts"}, // First request, L1 is empty + }, + L2Reads: []CacheKeyEvent{ + {CacheKey: shadowTestKeyUser, EntityType: "User", Kind: CacheKeyMiss, DataSource: "accounts", Shadow: true}, // First request, L2 is empty; Shadow marks shadow-mode fetch + }, + L1Writes: []CacheWriteEvent{ + {CacheKey: shadowTestKeyUser, EntityType: "User", ByteSize: 50, DataSource: "accounts", CacheLevel: CacheLevelL1, Source: CacheSourceQuery}, // "Alice" written to L1 after subgraph fetch + }, + L2Writes: []CacheWriteEvent{ + {CacheKey: shadowTestKeyUser, EntityType: "User", ByteSize: 50, DataSource: "accounts", CacheLevel: CacheLevelL2, TTL: 30 * time.Second, Source: CacheSourceQuery}, // "Alice" written to L2 after subgraph fetch + }, + }), normalizeCacheAnalyticsSnapshot(ctx1.GetCacheStats())) + + // Advance fake clock by 5s so Request 2's L2 hit has a measurable CacheAgeMs + time.Sleep(5 * time.Second) + + // Request 2: L2 has "Alice" but subgraph returns "AliceUpdated" + loader2 := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx2 := NewContext(context.Background()) + ctx2.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx2.ExecutionOptions.Caching.EnableL1Cache = true + ctx2.ExecutionOptions.Caching.EnableL2Cache = true + ctx2.ExecutionOptions.Caching.EnableCacheAnalytics = true + + ar2 := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable2 := NewResolvable(ar2, ResolvableOptions{}) + err = resolvable2.Init(ctx2, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader2.LoadGraphQLResponseData(ctx2, buildResponse(), resolvable2) + require.NoError(t, err) + + // Verify fresh data is served (not stale cache) + out2 := fastjsonext.PrintGraphQLResponse(resolvable2.data, resolvable2.errors) + assert.Equal(t, `{"data":{"user":{"__typename":"User","id":"u1","username":"AliceUpdated"}}}`, out2) + + assert.Equal(t, normalizeCacheAnalyticsSnapshot(CacheAnalyticsSnapshot{ + L1Reads: []CacheKeyEvent{ + {CacheKey: shadowTestKeyUser, EntityType: "User", Kind: CacheKeyMiss, DataSource: "accounts"}, // New Loader instance, L1 is per-request and empty + }, + L2Reads: []CacheKeyEvent{ + {CacheKey: shadowTestKeyUser, EntityType: "User", Kind: CacheKeyHit, DataSource: "accounts", ByteSize: 50, Shadow: true, CacheAgeMs: 5000}, // L2 has "Alice" from Request 1, 5s ago; Shadow=true so subgraph is still fetched + }, + L1Writes: []CacheWriteEvent{ + {CacheKey: shadowTestKeyUser, EntityType: "User", ByteSize: 57, DataSource: "accounts", CacheLevel: CacheLevelL1, Source: CacheSourceQuery}, // "AliceUpdated" written to L1 from fresh subgraph response + }, + L2Writes: []CacheWriteEvent{ + {CacheKey: shadowTestKeyUser, EntityType: "User", ByteSize: 57, DataSource: "accounts", CacheLevel: CacheLevelL2, TTL: 30 * time.Second, Source: CacheSourceQuery}, // "AliceUpdated" overwrites "Alice" in L2 + }, + ShadowComparisons: []ShadowComparisonEvent{ + {CacheKey: shadowTestKeyUser, EntityType: "User", IsFresh: false, CachedHash: 272931794584083561, FreshHash: 4550742678894771079, CachedBytes: 30, FreshBytes: 37, DataSource: "accounts", ConfiguredTTL: 30 * time.Second, CacheAgeMs: 5000}, // Cached "Alice" differs from fresh "AliceUpdated" (different hashes); entry was 5s old + }, + FieldHashes: []EntityFieldHash{ + {EntityType: "User", FieldName: "id", FieldHash: 13311642224980425257, KeyRaw: `{"id":"u1"}`, Source: FieldSourceShadowCached}, // Cached "id" field from "Alice" entity + {EntityType: "User", FieldName: "username", FieldHash: 5631231822564450273, KeyRaw: `{"id":"u1"}`, Source: FieldSourceShadowCached}, // Cached "username"="Alice" (stale value) + }, + }), normalizeCacheAnalyticsSnapshot(ctx2.GetCacheStats())) + }) +} + +// Verifies that L1 cache operates normally even when shadow mode is enabled for L2. +// Shadow mode should only affect L2 behavior. +func TestShadowMode_L1_WorksNormally(t *testing.T) { + t.Run("L1 cache serves data normally even with shadow mode entity", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + // Entity fetch called only ONCE (second occurrence served from L1) + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil + }).Times(1) + + // Second entity fetch for SAME entity - should hit L1 (not called) + entityDS2 := NewMockDataSource(ctrl) + entityDS2.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + }, + } + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + // First entity fetch (shadow mode + L1) + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities", "0"}}, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + ShadowMode: true, + }, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://products.service","body":{"query":"...","variables":{"representations":[`), SegmentType: StaticSegmentType}, + {SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + })}, + {Data: []byte(`]}}}`), SegmentType: StaticSegmentType}, + }}, + Info: &FetchInfo{ + DataSourceID: "products", DataSourceName: "products", + RootFields: []GraphCoordinate{{TypeName: "Product", FieldName: "name"}}, + OperationType: ast.OperationTypeQuery, ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + // Second entity fetch for SAME entity - should hit L1 (shadow doesn't affect L1) + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS2, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities", "0"}}, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + ShadowMode: true, + }, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://products.service","body":{"query":"...","variables":{"representations":[`), SegmentType: StaticSegmentType}, + {SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + })}, + {Data: []byte(`]}}}`), SegmentType: StaticSegmentType}, + }}, + Info: &FetchInfo{ + DataSourceID: "products", DataSourceName: "products", + RootFields: []GraphCoordinate{{TypeName: "Product", FieldName: "name"}}, + OperationType: ast.OperationTypeQuery, ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{{ + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }, + }, + }}, + }, + } + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = false // L2 disabled — only L1 can serve the second fetch + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1","name":"Product One"}}}`, out) + + // No stats when analytics disabled — EnableCacheAnalytics not set, so no events are collected + assert.Equal(t, CacheAnalyticsSnapshot{}, ctx.GetCacheStats()) + }) +} + +// Verifies that shadow mode works safely when analytics are disabled. +func TestShadowMode_WithoutAnalytics(t *testing.T) { + t.Run("shadow mode works without analytics - safety only", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(2) + + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil + }).Times(2) // Called both times (shadow mode) + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + }, + } + + buildResponse := func() *GraphQLResponse { + return &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities", "0"}}, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + ShadowMode: true, + }, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://products.service","body":{"query":"...","variables":{"representations":[`), SegmentType: StaticSegmentType}, + {SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + })}, + {Data: []byte(`]}}}`), SegmentType: StaticSegmentType}, + }}, + Info: &FetchInfo{ + DataSourceID: "products", DataSourceName: "products", + RootFields: []GraphCoordinate{{TypeName: "Product", FieldName: "name"}}, + OperationType: ast.OperationTypeQuery, ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{{ + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }, + }, + }}, + }, + } + } + + // Request 1: Populate cache + loader1 := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx1 := NewContext(context.Background()) + ctx1.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx1.ExecutionOptions.Caching.EnableL1Cache = true + ctx1.ExecutionOptions.Caching.EnableL2Cache = true + // Analytics disabled + + ar1 := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable1 := NewResolvable(ar1, ResolvableOptions{}) + err := resolvable1.Init(ctx1, nil, ast.OperationTypeQuery) + require.NoError(t, err) + err = loader1.LoadGraphQLResponseData(ctx1, buildResponse(), resolvable1) + require.NoError(t, err) + + // Empty: EnableCacheAnalytics not set, so no L1/L2 events are recorded + assert.Equal(t, CacheAnalyticsSnapshot{}, ctx1.GetCacheStats()) + + // Request 2: Shadow mode - still fetches from subgraph + loader2 := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx2 := NewContext(context.Background()) + ctx2.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx2.ExecutionOptions.Caching.EnableL1Cache = true + ctx2.ExecutionOptions.Caching.EnableL2Cache = true + // Analytics disabled + + ar2 := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable2 := NewResolvable(ar2, ResolvableOptions{}) + err = resolvable2.Init(ctx2, nil, ast.OperationTypeQuery) + require.NoError(t, err) + err = loader2.LoadGraphQLResponseData(ctx2, buildResponse(), resolvable2) + require.NoError(t, err) + + out2 := fastjsonext.PrintGraphQLResponse(resolvable2.data, resolvable2.errors) + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1","name":"Product One"}}}`, out2) + + // Empty: EnableCacheAnalytics not set, so no events or shadow comparisons collected + assert.Equal(t, CacheAnalyticsSnapshot{}, ctx2.GetCacheStats()) + }) +} + +// ErrorLoaderCache wraps FakeLoaderCache but returns errors on Get/Set calls +// when configured to do so. Used for testing L2 error resilience. +type ErrorLoaderCache struct { + *FakeLoaderCache + + getErr error + setErr error +} + +func (e *ErrorLoaderCache) Get(ctx context.Context, keys []string) ([]*CacheEntry, error) { + if e.getErr != nil { + return nil, e.getErr + } + return e.FakeLoaderCache.Get(ctx, keys) +} + +func (e *ErrorLoaderCache) Set(ctx context.Context, entries []*CacheEntry) error { + if e.setErr != nil { + return e.setErr + } + return e.FakeLoaderCache.Set(ctx, entries) +} + +// buildProductEntityResponse creates a GraphQLResponse for a single product entity fetch. +// Used by error resilience and mutation skip tests to avoid repeating boilerplate. +func buildProductEntityResponse(rootDS, entityDS DataSource, cacheKeyTemplate CacheKeyTemplate, providesData *Object, operationType ast.OperationType) *GraphQLResponse { + rootOpName := "query" + rootFieldType := "Query" + rootFieldName := "product" + if operationType == ast.OperationTypeMutation { + rootOpName = "mutation" + rootFieldType = "Mutation" + rootFieldName = "updateUser" + } + + return &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: operationType}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + Info: &FetchInfo{ + DataSourceID: "ds", DataSourceName: "ds", + RootFields: []GraphCoordinate{{TypeName: rootFieldType, FieldName: rootFieldName}}, + OperationType: operationType, + }, + }, rootOpName), + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities", "0"}}, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: cacheKeyTemplate, + UseL1Cache: true, + }, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://ds.service","body":{"query":"...","variables":{"representations":[`), SegmentType: StaticSegmentType}, + {SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + })}, + {Data: []byte(`]}}}`), SegmentType: StaticSegmentType}, + }}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + Info: &FetchInfo{ + DataSourceID: "ds", DataSourceName: "ds", + RootFields: []GraphCoordinate{{TypeName: "Product", FieldName: "name"}}, + OperationType: operationType, ProvidesData: providesData, + }, + }, rootOpName+"."+rootFieldName, ObjectPath(rootFieldName)), + ), + Data: &Object{ + Fields: []*Field{{ + Name: []byte(rootFieldName), + Value: &Object{ + Path: []string{rootFieldName}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }, + }, + }}, + }, + } +} + +// Verifies graceful degradation when the L2 cache returns errors. +// Cache failures should fall through to subgraph fetch, not fail the request. +func TestL2CacheErrorResilience(t *testing.T) { + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("name"), Value: &Scalar{}}, + }, + } + + t.Run("L2 Get error falls through to fetch", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + errorCache := &ErrorLoaderCache{ + FakeLoaderCache: NewFakeLoaderCache(), + getErr: assert.AnError, + } + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil + }).Times(1) + + response := buildProductEntityResponse(rootDS, entityDS, productCacheKeyTemplate, providesData, ast.OperationTypeQuery) + + loader := &Loader{caches: map[string]LoaderCache{"default": errorCache}} + ctx := NewContext(t.Context()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1","name":"Product One"}}}`, out) + }) + + t.Run("L2 Set error does not fail request", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + errorCache := &ErrorLoaderCache{ + FakeLoaderCache: NewFakeLoaderCache(), + setErr: assert.AnError, + } + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil + }).Times(1) + + response := buildProductEntityResponse(rootDS, entityDS, productCacheKeyTemplate, providesData, ast.OperationTypeQuery) + + loader := &Loader{caches: map[string]LoaderCache{"default": errorCache}} + ctx := NewContext(t.Context()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1","name":"Product One"}}}`, out) + }) + + t.Run("corrupted cache entry treated as miss", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + // Pre-populate cache with corrupted JSON using the real key format + _ = cache.Set(t.Context(), withCacheEntryTTL([]*CacheEntry{ + {Key: `{"__typename":"Product","key":{"id":"prod-1"}}`, Value: []byte(`{not valid json!!!}`)}, + }, 30*time.Second)) + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil + }).Times(1) // Must fetch because cached entry is corrupted + + response := buildProductEntityResponse(rootDS, entityDS, productCacheKeyTemplate, providesData, ast.OperationTypeQuery) + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(t.Context()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1","name":"Product One"}}}`, out) + + // Verify L2 cache was actually accessed (Get returned the corrupted entry, then Set wrote fresh data) + log := cache.GetLog() + assert.Equal(t, 3, len(log), "should have set (seed) + get (corrupted hit) + set (fresh data)") + assert.Equal(t, "set", log[0].Operation) + assert.Equal(t, "get", log[1].Operation) + assert.Equal(t, true, log[1].Items[0].Hit, "L2 Get should find the seeded corrupted entry") + assert.Equal(t, "set", log[2].Operation) + }) +} + +// Verifies that mutation operations bypass L2 cache reads and always fetch fresh data. +// Mutations must not serve stale cached entities. +func TestMutationSkipsL2Read(t *testing.T) { + t.Run("mutation operation type skips L2 read and always fetches", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + // Pre-populate cache with stale data using the real key format + _ = cache.Set(t.Context(), withCacheEntryTTL([]*CacheEntry{ + {Key: `{"__typename":"Product","key":{"id":"prod-1"}}`, Value: []byte(`{"__typename":"Product","id":"prod-1","name":"Old Name"}`)}, + }, 30*time.Second)) + + userCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("name"), Value: &Scalar{}}, + }, + } + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"updateUser":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"New Name"}]}}`), nil + }).Times(1) // Must fetch fresh data despite cache having stale entry + + response := buildProductEntityResponse(rootDS, entityDS, userCacheKeyTemplate, providesData, ast.OperationTypeMutation) + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(t.Context()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeMutation) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"updateUser":{"__typename":"Product","id":"prod-1","name":"New Name"}}}`, out, "mutation should fetch fresh data, not use cached stale data") + }) +} + +func newUserRootQueryTemplate(requestedFields []string, entityKeyFields []string) *RootQueryCacheKeyTemplate { + rootArgs := make([]FieldArgument, 0, len(requestedFields)) + for _, field := range requestedFields { + rootArgs = append(rootArgs, FieldArgument{ + Name: field, + Variable: &ContextVariable{ + Path: []string{field}, + Renderer: NewPlainVariableRenderer(), + }, + }) + } + + entityKeyMappings := make([]EntityKeyMappingConfig, 0, len(entityKeyFields)) + for _, field := range entityKeyFields { + entityKeyMappings = append(entityKeyMappings, EntityKeyMappingConfig{ + EntityTypeName: "User", + FieldMappings: []EntityFieldMappingConfig{ + { + EntityKeyField: field, + ArgumentPath: []string{field}, + }, + }, + }) + } + + return NewRootQueryCacheKeyTemplate( + []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, + Args: rootArgs, + }, + }, + entityKeyMappings, + ) +} + +func newUserRootQueryResponse(rootDS DataSource, cacheKeyTemplate CacheKeyTemplate, providesData *Object) *GraphQLResponse { + rootProvidesData := providesData + if providesData != nil { + rootProvidesData = &Object{ + Fields: providesData.Fields, + } + rootProvidesData = &Object{ + Fields: []*Field{ + { + Name: []byte("user"), + Value: rootProvidesData, + }, + }, + } + } + + return &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: cacheKeyTemplate, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }, + }, + Info: &FetchInfo{ + DataSourceID: "accounts", + DataSourceName: "accounts", + RootFields: []GraphCoordinate{{TypeName: "Query", FieldName: "user"}}, + OperationType: ast.OperationTypeQuery, + ProvidesData: rootProvidesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("user"), + Value: &Object{ + Path: []string{"user"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("email"), Value: &String{Path: []string{"email"}}}, + {Name: []byte("username"), Value: &String{Path: []string{"username"}}}, + }, + }, + }, + }, + }, + } +} + +// Verifies that when all EntityKeyMappings produce cache hits, the fetch is skipped +// and missing derived keys are backfilled from the cached data. +func TestCacheBackfill_SkipFetch_HappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Scenario: the request asks for id + email keys, only the id key is cached, + // and that cached entity already contains the email field required to prove + // the missing sibling key. The loader should skip the subgraph fetch, backfill + // only the missing email key, and leave the existing id key untouched. + cache := NewFakeLoaderCache() + idKey := `{"__typename":"User","key":{"id":"u1"}}` + emailKey := `{"__typename":"User","key":{"email":"a@example.com"}}` + + // Seed L2 with only the id key. The stored entity is complete enough to serve + // the request and to prove that the email key belongs to the same entity. + err := cache.Set(t.Context(), withCacheEntryTTL([]*CacheEntry{ + {Key: idKey, Value: []byte(`{"__typename":"User","id":"u1","email":"a@example.com","username":"Alice"}`)}, + }, 30*time.Second)) + require.NoError(t, err) + cache.ClearLog() + + // The request should stay on the cache-only path, so the root datasource must + // never be called. + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) + + response := newUserRootQueryResponse( + rootDS, + newUserRootQueryTemplate([]string{"id", "email"}, []string{"id", "email"}), + &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("username"), Value: &Scalar{Path: []string{"username"}, Nullable: false}}, + }, + }, + ) + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(t.Context()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"id":"u1","email":"a@example.com"}`)) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err = resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + // Assert the exact cache story: + // 1. L2 reads both requested keys and finds only the id key. + // 2. L2 writes only the missing email key. + assert.Equal(t, []CacheLogEntry{ + { + Operation: "get", + Items: []CacheLogItem{ + {Key: idKey, Hit: true}, + {Key: emailKey, Hit: false}, + }, + }, + {Operation: "set", Items: []CacheLogItem{{Key: emailKey, TTL: 30 * time.Second}}}, + }, cache.GetLog()) + // Assert the written value matches the final merged entity and that the + // existing id entry was preserved rather than rewritten. + assert.Equal(t, `{"__typename":"User","id":"u1","email":"a@example.com","username":"Alice"}`, string(cache.GetValue(emailKey))) + assert.Equal(t, `{"__typename":"User","id":"u1","email":"a@example.com","username":"Alice"}`, string(cache.GetValue(idKey))) + + snap := normalizeCacheAnalyticsSnapshot(ctx.GetCacheStats()) + assert.Equal(t, normalizeCacheAnalyticsSnapshot(CacheAnalyticsSnapshot{ + L2Reads: []CacheKeyEvent{ + // id key found in L2 (first key in CacheKey.Keys) + { + CacheKey: idKey, + EntityType: "Query", + Kind: CacheKeyHit, + DataSource: "accounts", + ByteSize: 83, + }, + }, + L2Writes: []CacheWriteEvent{ + // backfill: missing requested key proven by cached entity data + { + CacheKey: emailKey, + EntityType: "Query", + ByteSize: 74, + DataSource: "accounts", + CacheLevel: CacheLevelL2, + TTL: 30 * time.Second, + Source: CacheSourceQuery, + WriteReason: CacheWriteReasonBackfill, + }, + }, + }), snap) +} + +// REGRESSION: a root-field SingleFetch whose L2 lookup is a complete cache hit +// must record `LoadSkipped = true` on the fetch's DataSourceLoadTrace, mirroring +// how the entity-fetch and bulk-parallel paths already do. Otherwise downstream +// observability (Cosmo Router cache_trace, ART) reports `load_skipped=false` on +// fetches that demonstrably never called the subgraph — making it impossible to +// distinguish "served from cache" from "fetched fresh". +func TestSingleFetch_CacheHit_SetsLoadSkippedOnTrace_RED(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + idKey := `{"__typename":"User","key":{"id":"u1"}}` + emailKey := `{"__typename":"User","key":{"email":"a@example.com"}}` + + // Pre-warm L2 with a fully-derivable cached entity so tryCacheLoad returns skip=true. + err := cache.Set(t.Context(), withCacheEntryTTL([]*CacheEntry{ + {Key: idKey, Value: []byte(`{"__typename":"User","id":"u1","email":"a@example.com","username":"Alice"}`)}, + }, 30*time.Second)) + require.NoError(t, err) + cache.ClearLog() + + // Subgraph must NOT be called. + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) + + response := newUserRootQueryResponse( + rootDS, + newUserRootQueryTemplate([]string{"id", "email"}, []string{"id", "email"}), + &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("username"), Value: &Scalar{Path: []string{"username"}, Nullable: false}}, + }, + }, + ) + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(t.Context()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"id":"u1","email":"a@example.com"}`)) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + // Enable tracing — that's how the loader populates fetch.Trace.LoadSkipped. + ctx.TracingOptions.Enable = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err = resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + // Walk the fetch tree to find the SingleFetch and verify its trace. + var checked int + walkFetchTreeForTest(response.Fetches, func(f Fetch) { + single, ok := f.(*SingleFetch) + if !ok { + return + } + require.NotNil(t, single.Trace, "SingleFetch.Trace must be populated when tracing is enabled") + assert.True(t, single.Trace.LoadSkipped, + "SingleFetch.Trace.LoadSkipped must be true when tryCacheLoad returned skip=true (cache hit, no subgraph call)") + checked++ + }) + assert.Equal(t, 1, checked, "expected exactly one SingleFetch to inspect") + + // Sanity: the cache get happened, no set, no subgraph call. + assert.Equal(t, []CacheLogEntry{ + { + Operation: "get", + Items: []CacheLogItem{ + {Key: idKey, Hit: true}, + {Key: emailKey, Hit: false}, + }, + }, + {Operation: "set", Items: []CacheLogItem{{Key: emailKey, TTL: 30 * time.Second}}}, + }, cache.GetLog()) +} + +// walkFetchTreeForTest visits every Fetch in the tree. +func walkFetchTreeForTest(node *FetchTreeNode, visit func(Fetch)) { + if node == nil { + return + } + if node.Kind == FetchTreeNodeKindSingle && node.Item != nil && node.Item.Fetch != nil { + visit(node.Item.Fetch) + } + for _, c := range node.ChildNodes { + walkFetchTreeForTest(c, visit) + } +} + +// Verifies that backfill is skipped when the cached entity data doesn't contain +// the fields needed to derive the missing key. +func TestCacheBackfill_SkipFetch_Counterexample_NotDerivable(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Scenario: the request asks for id + email keys, only the id key is cached, + // but the cached entity does not contain email. The loader may still skip the + // fetch because the requested response only needs id + username, but it must + // not backfill the missing email key from request args alone. + cache := NewFakeLoaderCache() + idKey := `{"__typename":"User","key":{"id":"u1"}}` + emailKey := `{"__typename":"User","key":{"email":"a@example.com"}}` + + // Seed L2 with only the id key and omit email from the cached entity to make + // the missing email key impossible to prove from final entity data. + err := cache.Set(t.Context(), withCacheEntryTTL([]*CacheEntry{ + {Key: idKey, Value: []byte(`{"__typename":"User","id":"u1","username":"Alice"}`)}, + }, 30*time.Second)) + require.NoError(t, err) + cache.ClearLog() + + // Cache-only path again: the subgraph must not be called. + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) + + response := newUserRootQueryResponse( + rootDS, + newUserRootQueryTemplate([]string{"id", "email"}, []string{"id", "email"}), + &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("username"), Value: &Scalar{Path: []string{"username"}, Nullable: false}}, + }, + }, + ) + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(t.Context()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"id":"u1","email":"a@example.com"}`)) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err = resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + // Assert the exact cache story: + // 1. L2 reads both requested keys and finds only the id key. + // 2. No write happens because email is still not provable from the final entity. + assert.Equal(t, []CacheLogEntry{ + { + Operation: "get", + Items: []CacheLogItem{ + {Key: idKey, Hit: true}, + {Key: emailKey, Hit: false}, + }, + }, + }, cache.GetLog()) + // Assert the missing email key stays absent and the original id entry is unchanged. + assert.Nil(t, cache.GetValue(emailKey)) + assert.Equal(t, `{"__typename":"User","id":"u1","username":"Alice"}`, string(cache.GetValue(idKey))) + + snap := normalizeCacheAnalyticsSnapshot(ctx.GetCacheStats()) + assert.Equal(t, normalizeCacheAnalyticsSnapshot(CacheAnalyticsSnapshot{ + L2Reads: []CacheKeyEvent{ + // id key found in L2 (entity lacks email field) + { + CacheKey: idKey, + EntityType: "Query", + Kind: CacheKeyHit, + DataSource: "accounts", + ByteSize: 59, + }, + }, + // no L2 writes: email field missing from entity, cannot prove emailKey + }), snap) +} + +// Verifies that after a subgraph fetch, both the requested key and the derived key +// are written to L2 cache. +func TestCacheBackfill_FetchPath_HappyPath(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Scenario: the request asks for id + email keys, only the id key is cached, + // and the cached entity is too incomplete to satisfy the request. The loader + // must fetch fresh data, refresh the existing id key, and backfill the missing + // email key from the fetched entity. + cache := NewFakeLoaderCache() + idKey := `{"__typename":"User","key":{"id":"u1"}}` + emailKey := `{"__typename":"User","key":{"email":"a@example.com"}}` + + // Seed L2 with a stale/incomplete id entry so the fetch path is required. + err := cache.Set(t.Context(), withCacheEntryTTL([]*CacheEntry{ + {Key: idKey, Value: []byte(`{"__typename":"User","id":"u1"}`)}, + }, 30*time.Second)) + require.NoError(t, err) + cache.ClearLog() + + // The subgraph returns the complete entity, which should refresh id and prove email. + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"user":{"__typename":"User","id":"u1","email":"a@example.com","username":"Alice"}}}`), nil + }).Times(1) + + response := newUserRootQueryResponse( + rootDS, + newUserRootQueryTemplate([]string{"id", "email"}, []string{"id", "email"}), + &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("username"), Value: &Scalar{Path: []string{"username"}, Nullable: false}}, + }, + }, + ) + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(t.Context()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"id":"u1","email":"a@example.com"}`)) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err = resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + // Assert the exact cache story: + // 1. L2 reads both requested keys and finds only the stale id key. + // 2. The fetch runs and writes both the refreshed id key and the backfilled email key. + assert.Equal(t, []CacheLogEntry{ + { + Operation: "get", + Items: []CacheLogItem{ + {Key: idKey, Hit: true}, + {Key: emailKey, Hit: false}, + }, + }, + { + Operation: "set", + Items: []CacheLogItem{ + {Key: idKey, TTL: 30 * time.Second}, + {Key: emailKey, TTL: 30 * time.Second}, + }, + }, + }, cache.GetLog()) + // Assert both keys now store the same fresh entity payload. + assert.Equal(t, `{"__typename":"User","id":"u1","email":"a@example.com","username":"Alice"}`, string(cache.GetValue(idKey))) + assert.Equal(t, `{"__typename":"User","id":"u1","email":"a@example.com","username":"Alice"}`, string(cache.GetValue(emailKey))) + + snap := normalizeCacheAnalyticsSnapshot(ctx.GetCacheStats()) + assert.Equal(t, normalizeCacheAnalyticsSnapshot(CacheAnalyticsSnapshot{ + L2Reads: []CacheKeyEvent{ + // id key found but incomplete for ProvidesData → partial hit, fetch needed + { + CacheKey: idKey, + EntityType: "Query", + Kind: CacheKeyPartialHit, + DataSource: "accounts", + }, + }, + L2Writes: []CacheWriteEvent{ + // refresh: existing key rewritten with fresh subgraph data + { + CacheKey: idKey, + EntityType: "Query", + ByteSize: 74, + DataSource: "accounts", + CacheLevel: CacheLevelL2, + TTL: 30 * time.Second, + Source: CacheSourceQuery, + WriteReason: CacheWriteReasonRefresh, + }, + // backfill: missing requested key proven by subgraph response + { + CacheKey: emailKey, + EntityType: "Query", + ByteSize: 74, + DataSource: "accounts", + CacheLevel: CacheLevelL2, + TTL: 30 * time.Second, + Source: CacheSourceQuery, + WriteReason: CacheWriteReasonBackfill, + }, + }, + }), snap) +} + +// Verifies that when the subgraph response is missing a field needed for key derivation, +// only the requested key is written (derived key is skipped). +func TestCacheBackfill_FetchPath_MissingField(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Scenario: the request asks for id + email keys, only the id key is cached, + // and the fetch runs. The fetched entity still does not contain email, so the + // loader may refresh the existing id key but must not backfill email. + cache := NewFakeLoaderCache() + idKey := `{"__typename":"User","key":{"id":"u1"}}` + emailKey := `{"__typename":"User","key":{"email":"a@example.com"}}` + + // Seed L2 with an incomplete id entry to force the fetch path. + err := cache.Set(t.Context(), withCacheEntryTTL([]*CacheEntry{ + {Key: idKey, Value: []byte(`{"__typename":"User","id":"u1"}`)}, + }, 30*time.Second)) + require.NoError(t, err) + cache.ClearLog() + + // The subgraph returns username but still no email. + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"user":{"__typename":"User","id":"u1","username":"Alice"}}}`), nil + }).Times(1) + + response := newUserRootQueryResponse( + rootDS, + newUserRootQueryTemplate([]string{"id", "email"}, []string{"id", "email"}), + &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("username"), Value: &Scalar{Path: []string{"username"}, Nullable: false}}, + }, + }, + ) + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(t.Context()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"id":"u1","email":"a@example.com"}`)) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err = resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + // Assert the exact cache story: + // 1. L2 reads both requested keys and finds only the id key. + // 2. The fetch refreshes id with the new data. + // 3. The email key is backfilled with the response payload, even though the response + // didn't carry the email field. The cache key was derived from the request arguments, + // and a non-null response from the subgraph confirms this entity matches that key. + // A future query selecting `email` would trigger a widening refetch since the cached + // payload doesn't contain it; a query selecting only id+username gets a cache hit. + assert.Equal(t, []CacheLogEntry{ + { + Operation: "get", + Items: []CacheLogItem{ + {Key: idKey, Hit: true}, + {Key: emailKey, Hit: false}, + }, + }, + { + Operation: "set", + Items: []CacheLogItem{ + {Key: idKey, TTL: 30 * time.Second}, + {Key: emailKey, TTL: 30 * time.Second}, + }, + }, + }, cache.GetLog()) + assert.Equal(t, `{"__typename":"User","id":"u1","username":"Alice"}`, string(cache.GetValue(idKey))) + assert.Equal(t, `{"__typename":"User","id":"u1","username":"Alice"}`, string(cache.GetValue(emailKey))) + + snap := normalizeCacheAnalyticsSnapshot(ctx.GetCacheStats()) + assert.Equal(t, normalizeCacheAnalyticsSnapshot(CacheAnalyticsSnapshot{ + L2Reads: []CacheKeyEvent{ + // id key found but incomplete for ProvidesData → partial hit, fetch needed + { + CacheKey: idKey, + EntityType: "Query", + Kind: CacheKeyPartialHit, + DataSource: "accounts", + }, + }, + L2Writes: []CacheWriteEvent{ + // refresh: existing key rewritten with fresh data + { + CacheKey: idKey, + EntityType: "Query", + ByteSize: 50, + DataSource: "accounts", + CacheLevel: CacheLevelL2, + TTL: 30 * time.Second, + Source: CacheSourceQuery, + WriteReason: CacheWriteReasonRefresh, + }, + // backfill: email key was missing on read; written with the response payload + // because the entity is the canonical match for the request args. + { + CacheKey: emailKey, + EntityType: "Query", + ByteSize: 50, + DataSource: "accounts", + CacheLevel: CacheLevelL2, + TTL: 30 * time.Second, + Source: CacheSourceQuery, + WriteReason: CacheWriteReasonBackfill, + }, + }, + }), snap) +} + +// Verifies that when the entity's field value doesn't match the requested argument, +// the derived key is written but the unproven requested key is skipped. +func TestCacheBackfill_FetchPath_ValueMismatch(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Scenario: the request asks for email=a@example.com, but the fetched entity + // comes back with email=b@example.com. The loader must refresh the existing id + // key, must NOT backfill the requested email key (a@), but MUST write a derived + // key for the actual email value (b@) because it is backend-proven entity data. + cache := NewFakeLoaderCache() + idKey := `{"__typename":"User","key":{"id":"u1"}}` + requestedEmailKey := `{"__typename":"User","key":{"email":"a@example.com"}}` + actualEmailKey := `{"__typename":"User","key":{"email":"b@example.com"}}` + + // Seed L2 with an incomplete id entry to force the fetch path. + err := cache.Set(t.Context(), withCacheEntryTTL([]*CacheEntry{ + {Key: idKey, Value: []byte(`{"__typename":"User","id":"u1"}`)}, + }, 30*time.Second)) + require.NoError(t, err) + cache.ClearLog() + + // The subgraph returns a different email value than the requested key. + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"user":{"__typename":"User","id":"u1","email":"b@example.com","username":"Alice"}}}`), nil + }).Times(1) + + response := newUserRootQueryResponse( + rootDS, + newUserRootQueryTemplate([]string{"id", "email"}, []string{"id", "email"}), + &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("username"), Value: &Scalar{Path: []string{"username"}, Nullable: false}}, + }, + }, + ) + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(t.Context()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"id":"u1","email":"a@example.com"}`)) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err = resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + // Assert the exact cache story: + // 1. L2 reads both requested keys and finds only the id key. + // 2. The fetch refreshes id with fresh data. + // 3. The requested email key (a@) is NOT written — the entity doesn't prove it. + // 4. The actual email key (b@) IS written — the subgraph returned b@example.com + // as backend-proven entity data, so we can build and store a key for it. + assert.Equal(t, []CacheLogEntry{ + { + Operation: "get", + Items: []CacheLogItem{ + {Key: idKey, Hit: true}, + {Key: requestedEmailKey, Hit: false}, + }, + }, + { + Operation: "set", + Items: []CacheLogItem{ + {Key: idKey, TTL: 30 * time.Second}, + {Key: actualEmailKey, TTL: 30 * time.Second}, + }, + }, + }, cache.GetLog()) + assert.Equal(t, `{"__typename":"User","id":"u1","email":"b@example.com","username":"Alice"}`, string(cache.GetValue(idKey))) + assert.Nil(t, cache.GetValue(requestedEmailKey)) + assert.Equal(t, `{"__typename":"User","id":"u1","email":"b@example.com","username":"Alice"}`, string(cache.GetValue(actualEmailKey))) + + snap := normalizeCacheAnalyticsSnapshot(ctx.GetCacheStats()) + assert.Equal(t, normalizeCacheAnalyticsSnapshot(CacheAnalyticsSnapshot{ + L2Reads: []CacheKeyEvent{ + // id key found but incomplete for ProvidesData → partial hit, fetch needed + { + CacheKey: idKey, + EntityType: "Query", + Kind: CacheKeyPartialHit, + DataSource: "accounts", + }, + }, + L2Writes: []CacheWriteEvent{ + // refresh: existing key rewritten with fresh subgraph data + { + CacheKey: idKey, + EntityType: "Query", + ByteSize: 74, + DataSource: "accounts", + CacheLevel: CacheLevelL2, + TTL: 30 * time.Second, + Source: CacheSourceQuery, + WriteReason: CacheWriteReasonRefresh, + }, + // derived: subgraph returned b@ email, written as new derived key + { + CacheKey: actualEmailKey, + EntityType: "Query", + ByteSize: 74, + DataSource: "accounts", + CacheLevel: CacheLevelL2, + TTL: 30 * time.Second, + Source: CacheSourceQuery, + WriteReason: CacheWriteReasonDerived, + }, + }, + }), snap) +} + +// Verifies that derived key expansion writes cache entries for entity key mappings +// that weren't part of the original request. +func TestCacheBackfill_DerivedKeyExpansion(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Scenario: the request asks for id + email keys, but the cache config also + // knows about username as a third entity key. The fetch runs, returns all + // three fields, and the loader should refresh id, backfill email, and add the + // extra derived username key from final entity data. + cache := NewFakeLoaderCache() + idKey := `{"__typename":"User","key":{"id":"u1"}}` + emailKey := `{"__typename":"User","key":{"email":"a@example.com"}}` + usernameKey := `{"__typename":"User","key":{"username":"Alice"}}` + + // Seed L2 with only the incomplete id entry so the fetch path is required. + err := cache.Set(t.Context(), withCacheEntryTTL([]*CacheEntry{ + {Key: idKey, Value: []byte(`{"__typename":"User","id":"u1"}`)}, + }, 30*time.Second)) + require.NoError(t, err) + cache.ClearLog() + + // The subgraph returns the full entity, including the extra username key field. + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"user":{"__typename":"User","id":"u1","email":"a@example.com","username":"Alice"}}}`), nil + }).Times(1) + + response := newUserRootQueryResponse( + rootDS, + newUserRootQueryTemplate([]string{"id", "email"}, []string{"id", "email", "username"}), + &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("username"), Value: &Scalar{Path: []string{"username"}, Nullable: false}}, + }, + }, + ) + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(t.Context()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"id":"u1","email":"a@example.com"}`)) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err = resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + // Assert the exact cache story: + // 1. L2 reads the requested id + email keys and finds only id. + // 2. The fetch refreshes id, backfills email, and adds the derived username key. + assert.Equal(t, []CacheLogEntry{ + { + Operation: "get", + Items: []CacheLogItem{ + {Key: idKey, Hit: true}, + {Key: emailKey, Hit: false}, + }, + }, + { + Operation: "set", + Items: []CacheLogItem{ + {Key: idKey, TTL: 30 * time.Second}, + {Key: emailKey, TTL: 30 * time.Second}, + {Key: usernameKey, TTL: 30 * time.Second}, + }, + }, + }, cache.GetLog()) + // Assert all three keys now point at the same final entity payload. + assert.Equal(t, `{"__typename":"User","id":"u1","email":"a@example.com","username":"Alice"}`, string(cache.GetValue(idKey))) + assert.Equal(t, `{"__typename":"User","id":"u1","email":"a@example.com","username":"Alice"}`, string(cache.GetValue(emailKey))) + assert.Equal(t, `{"__typename":"User","id":"u1","email":"a@example.com","username":"Alice"}`, string(cache.GetValue(usernameKey))) + + snap := normalizeCacheAnalyticsSnapshot(ctx.GetCacheStats()) + assert.Equal(t, normalizeCacheAnalyticsSnapshot(CacheAnalyticsSnapshot{ + L2Reads: []CacheKeyEvent{ + // id key found but incomplete for ProvidesData → partial hit, fetch needed + { + CacheKey: idKey, + EntityType: "Query", + Kind: CacheKeyPartialHit, + DataSource: "accounts", + }, + }, + L2Writes: []CacheWriteEvent{ + // refresh: existing key rewritten with fresh subgraph data + { + CacheKey: idKey, + EntityType: "Query", + ByteSize: 74, + DataSource: "accounts", + CacheLevel: CacheLevelL2, + TTL: 30 * time.Second, + Source: CacheSourceQuery, + WriteReason: CacheWriteReasonRefresh, + }, + // backfill: missing requested key proven by subgraph response + { + CacheKey: emailKey, + EntityType: "Query", + ByteSize: 74, + DataSource: "accounts", + CacheLevel: CacheLevelL2, + TTL: 30 * time.Second, + Source: CacheSourceQuery, + WriteReason: CacheWriteReasonBackfill, + }, + // derived: username key not requested but derivable from entity data + { + CacheKey: usernameKey, + EntityType: "Query", + ByteSize: 74, + DataSource: "accounts", + CacheLevel: CacheLevelL2, + TTL: 30 * time.Second, + Source: CacheSourceQuery, + WriteReason: CacheWriteReasonDerived, + }, + }, + }), snap) +} + +// Verifies that writeCanonicalJSON produces deterministic output regardless of +// key ordering in the input, ensuring stable cache keys. +func TestWriteCanonicalJSON(t *testing.T) { + canonicalize := func(input string) string { + v, err := astjson.Parse(input) + require.NoError(t, err) + var buf strings.Builder + writeCanonicalJSON(&buf, v) + return buf.String() + } + + t.Run("object keys sorted alphabetically", func(t *testing.T) { + assert.Equal(t, `{"a":1,"b":2,"c":3}`, canonicalize(`{"c":3,"a":1,"b":2}`)) + }) + + t.Run("different key order produces same output", func(t *testing.T) { + out1 := canonicalize(`{"style":"FORMAL","formatting":{"uppercase":true}}`) + out2 := canonicalize(`{"formatting":{"uppercase":true},"style":"FORMAL"}`) + assert.Equal(t, out1, out2) + assert.Equal(t, `{"formatting":{"uppercase":true},"style":"FORMAL"}`, out1) + }) + + t.Run("nested objects sorted recursively", func(t *testing.T) { + out := canonicalize(`{"z":{"b":2,"a":1},"a":{"d":4,"c":3}}`) + assert.Equal(t, `{"a":{"c":3,"d":4},"z":{"a":1,"b":2}}`, out) + }) + + t.Run("array elements preserve order", func(t *testing.T) { + assert.Equal(t, `[3,1,2]`, canonicalize(`[3,1,2]`)) + }) + + t.Run("array of objects sorted by keys", func(t *testing.T) { + out := canonicalize(`[{"b":2,"a":1},{"d":4,"c":3}]`) + assert.Equal(t, `[{"a":1,"b":2},{"c":3,"d":4}]`, out) + }) + + t.Run("empty object", func(t *testing.T) { + assert.Equal(t, `{}`, canonicalize(`{}`)) + }) + + t.Run("empty array", func(t *testing.T) { + assert.Equal(t, `[]`, canonicalize(`[]`)) + }) + + t.Run("scalar string", func(t *testing.T) { + assert.Equal(t, `"hello"`, canonicalize(`"hello"`)) + }) + + t.Run("scalar number", func(t *testing.T) { + assert.Equal(t, `42`, canonicalize(`42`)) + }) + + t.Run("scalar boolean", func(t *testing.T) { + assert.Equal(t, `true`, canonicalize(`true`)) + assert.Equal(t, `false`, canonicalize(`false`)) + }) + + t.Run("null", func(t *testing.T) { + assert.Equal(t, `null`, canonicalize(`null`)) + }) + + t.Run("mixed nested structure", func(t *testing.T) { + input := `{"tags":["b","a"],"config":{"z":true,"a":false},"name":"test"}` + expected := `{"config":{"a":false,"z":true},"name":"test","tags":["b","a"]}` + assert.Equal(t, expected, canonicalize(input)) + }) +} diff --git a/v2/pkg/engine/resolve/cache_utility_coverage_test.go b/v2/pkg/engine/resolve/cache_utility_coverage_test.go new file mode 100644 index 0000000000..8f5e4f70c9 --- /dev/null +++ b/v2/pkg/engine/resolve/cache_utility_coverage_test.go @@ -0,0 +1,499 @@ +package resolve + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" +) + +// TestRootFieldL2CachePrefix verifies that rootFieldL2CachePrefix correctly +// combines the global prefix and header hash into an L2 cache key prefix. +// +// The `includeHeaderPrefix` flag is the source of truth for whether header +// partitioning is active for this fetch — it's set in tryL2CacheLoad alongside +// headerHash whenever `IncludeSubgraphHeaderPrefix && SubgraphHeadersBuilder != nil`. +// The flag matters for the empty-headers case: hash == 0 from "no headers +// forwarded" must still produce a "0:" prefix so the WRITE key matches the +// READ key (which always builds the prefix when partitioning is active). +func TestRootFieldL2CachePrefix(t *testing.T) { + tests := []struct { + name string + globalPrefix string + headerHash uint64 + includeHeaderPrefix bool + expected string + }{ + { + name: "both globalPrefix and headerHash present", + globalPrefix: "tenant123", + headerHash: 12345, + includeHeaderPrefix: true, + expected: "tenant123:12345", + }, + { + name: "headerHash only", + globalPrefix: "", + headerHash: 12345, + includeHeaderPrefix: true, + expected: "12345", + }, + { + name: "globalPrefix only, no header partitioning", + globalPrefix: "tenant123", + headerHash: 0, + includeHeaderPrefix: false, + expected: "tenant123", + }, + { + name: "neither present, no header partitioning", + globalPrefix: "", + headerHash: 0, + includeHeaderPrefix: false, + expected: "", + }, + // REGRESSION: includeHeaders=true with no headers forwarded (hash=0). + // Previously the WRITE path dropped the prefix because hash==0, + // while the READ path built "0:..." — every read missed. + { + name: "includeHeaders=true, hash=0 (no headers forwarded), no globalPrefix", + globalPrefix: "", + headerHash: 0, + includeHeaderPrefix: true, + expected: "0", + }, + { + name: "includeHeaders=true, hash=0 (no headers forwarded), with globalPrefix", + globalPrefix: "tenant123", + headerHash: 0, + includeHeaderPrefix: true, + expected: "tenant123:0", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.GlobalCacheKeyPrefix = tt.globalPrefix + + l := &Loader{ctx: ctx} + res := &result{ + headerHash: tt.headerHash, + includeHeaderPrefix: tt.includeHeaderPrefix, + } + + got := l.rootFieldL2CachePrefix(res) + assert.Equal(t, tt.expected, got) + }) + } +} + +// TestApplyL2CacheKeyInterceptor verifies that applyL2CacheKeyInterceptor +// returns the key unchanged when no interceptor is set, and applies the +// interceptor function correctly when one is configured. +func TestApplyL2CacheKeyInterceptor(t *testing.T) { + t.Run("nil interceptor returns key unchanged", func(t *testing.T) { + ctx := NewContext(context.Background()) + // No interceptor set (nil by default) + + l := &Loader{ctx: ctx} + res := &result{ + ds: DataSourceInfo{Name: "accounts"}, + cacheConfig: FetchCacheConfiguration{CacheName: "default"}, + } + + got := l.applyL2CacheKeyInterceptor("entity:user:1", res) + assert.Equal(t, "entity:user:1", got) + }) + + t.Run("interceptor that prepends tenant", func(t *testing.T) { + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.L2CacheKeyInterceptor = func(_ context.Context, key string, _ L2CacheKeyInterceptorInfo) string { + return "tenantX:" + key + } + + l := &Loader{ctx: ctx} + res := &result{ + ds: DataSourceInfo{Name: "accounts"}, + cacheConfig: FetchCacheConfiguration{CacheName: "default"}, + } + + got := l.applyL2CacheKeyInterceptor("entity:user:1", res) + assert.Equal(t, "tenantX:entity:user:1", got) + }) + + t.Run("interceptor uses fetchInfo DataSourceName", func(t *testing.T) { + ctx := NewContext(context.Background()) + var capturedInfo L2CacheKeyInterceptorInfo + ctx.ExecutionOptions.Caching.L2CacheKeyInterceptor = func(_ context.Context, key string, info L2CacheKeyInterceptorInfo) string { + capturedInfo = info + return key + } + + l := &Loader{ctx: ctx} + res := &result{ + ds: DataSourceInfo{Name: "accounts"}, + cacheConfig: FetchCacheConfiguration{CacheName: "myCache"}, + fetchInfo: &FetchInfo{DataSourceName: "overridden-accounts"}, + } + + l.applyL2CacheKeyInterceptor("key", res) + // fetchInfo.DataSourceName overrides ds.Name + assert.Equal(t, L2CacheKeyInterceptorInfo{ + SubgraphName: "overridden-accounts", + CacheName: "myCache", + }, capturedInfo) + }) +} + +// TestCompareCacheCandidateFreshness verifies the ordering logic that selects +// the freshest cache candidate when multiple L2 entries exist for the same key. +func TestCompareCacheCandidateFreshness(t *testing.T) { + tests := []struct { + name string + a, b time.Duration + expected int + }{ + { + name: "both unknown (0, 0) — equal", + a: 0, + b: 0, + expected: 0, + }, + { + name: "only a known — a is fresher", + a: 100 * time.Millisecond, + b: 0, + expected: -1, + }, + { + name: "only b known — b is fresher", + a: 0, + b: 100 * time.Millisecond, + expected: 1, + }, + { + name: "both known, b has more remaining TTL — b is fresher", + a: 100 * time.Millisecond, + b: 200 * time.Millisecond, + expected: 1, + }, + { + name: "both known, equal TTL", + a: 100 * time.Millisecond, + b: 100 * time.Millisecond, + expected: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := compareCacheCandidateFreshness(tt.a, tt.b) + assert.Equal(t, tt.expected, got) + }) + } +} + +// TestMergeCachedValueForWrite verifies that mergeCachedValueForWrite preserves +// older cached fields while letting fresh fields win on overlap. +func TestMergeCachedValueForWrite(t *testing.T) { + a := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + + t.Run("cachedValue nil returns freshValue", func(t *testing.T) { + fresh := astjson.MustParse(`{"name":"Alice"}`) + got := mergeCachedValueForWrite(a, nil, fresh) + assert.Equal(t, `{"name":"Alice"}`, string(got.MarshalTo(nil))) + }) + + t.Run("freshValue nil returns nil", func(t *testing.T) { + cached := astjson.MustParse(`{"name":"Alice"}`) + got := mergeCachedValueForWrite(a, cached, nil) + assert.Nil(t, got) + }) + + t.Run("cachedValue not object returns freshValue", func(t *testing.T) { + cached := astjson.MustParse(`[1,2,3]`) + fresh := astjson.MustParse(`{"name":"Bob"}`) + got := mergeCachedValueForWrite(a, cached, fresh) + assert.Equal(t, `{"name":"Bob"}`, string(got.MarshalTo(nil))) + }) + + t.Run("freshValue not object returns freshValue", func(t *testing.T) { + cached := astjson.MustParse(`{"name":"Alice"}`) + fresh := astjson.MustParse(`"just a string"`) + got := mergeCachedValueForWrite(a, cached, fresh) + assert.Equal(t, `"just a string"`, string(got.MarshalTo(nil))) + }) + + t.Run("both objects merge succeeds with fresh winning on overlap", func(t *testing.T) { + cached := astjson.MustParse(`{"name":"Alice","email":"alice@old.com"}`) + fresh := astjson.MustParse(`{"name":"Bob"}`) + got := mergeCachedValueForWrite(a, cached, fresh) + result := string(got.MarshalTo(nil)) + // Fresh "name" wins over cached "name", cached "email" is preserved + assert.Equal(t, `{"name":"Bob","email":"alice@old.com"}`, result) + }) + + t.Run("both objects fresh has new fields merged contains both", func(t *testing.T) { + cached := astjson.MustParse(`{"id":"1"}`) + fresh := astjson.MustParse(`{"id":"1","age":30}`) + got := mergeCachedValueForWrite(a, cached, fresh) + result := string(got.MarshalTo(nil)) + assert.Equal(t, `{"id":"1","age":30}`, result) + }) +} + +// TestMaterializeNullableFieldsAsNull verifies that missing nullable fields are +// set to null while non-nullable and already-present fields are left alone. +func TestMaterializeNullableFieldsAsNull(t *testing.T) { + a := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + ctx := NewContext(context.Background()) + l := &Loader{ctx: ctx} + + t.Run("nil entity is no-op", func(t *testing.T) { + obj := &Object{ + Fields: []*Field{ + {Name: []byte("name"), Value: &String{Nullable: true}}, + }, + } + // Should not panic + l.materializeNullableFieldsAsNull(a, nil, obj) + }) + + t.Run("entity missing nullable field gets null", func(t *testing.T) { + entity, err := astjson.ParseBytesWithArena(a, []byte(`{"id":"1"}`)) + assert.NoError(t, err) + obj := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Nullable: false}}, + {Name: []byte("email"), Value: &String{Nullable: true}}, + }, + } + l.materializeNullableFieldsAsNull(a, entity, obj) + assert.Equal(t, `{"id":"1","email":null}`, string(entity.MarshalTo(nil))) + }) + + t.Run("entity missing non-nullable field is not set", func(t *testing.T) { + entity, err := astjson.ParseBytesWithArena(a, []byte(`{"id":"1"}`)) + assert.NoError(t, err) + obj := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Nullable: false}}, + {Name: []byte("name"), Value: &String{Nullable: false}}, + }, + } + l.materializeNullableFieldsAsNull(a, entity, obj) + // Non-nullable "name" must NOT be materialized + assert.Equal(t, `{"id":"1"}`, string(entity.MarshalTo(nil))) + }) + + t.Run("entity has all fields no change", func(t *testing.T) { + entity, err := astjson.ParseBytesWithArena(a, []byte(`{"id":"1","email":"a@b.com"}`)) + assert.NoError(t, err) + obj := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Nullable: false}}, + {Name: []byte("email"), Value: &String{Nullable: true}}, + }, + } + l.materializeNullableFieldsAsNull(a, entity, obj) + assert.Equal(t, `{"id":"1","email":"a@b.com"}`, string(entity.MarshalTo(nil))) + }) + + t.Run("nested object with missing nullable field is recursively materialized", func(t *testing.T) { + entity, err := astjson.ParseBytesWithArena(a, []byte(`{"address":{"city":"NYC"}}`)) + assert.NoError(t, err) + obj := &Object{ + Fields: []*Field{ + { + Name: []byte("address"), + Value: &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("city"), Value: &String{Nullable: false}}, + {Name: []byte("zip"), Value: &String{Nullable: true}}, + }, + }, + }, + }, + } + l.materializeNullableFieldsAsNull(a, entity, obj) + assert.Equal(t, `{"address":{"city":"NYC","zip":null}}`, string(entity.MarshalTo(nil))) + }) +} + +// TestCacheKeyHasPositiveEntityData verifies edge cases for detecting whether +// a CacheKey carries entity data beyond just the identity key fields. +func TestCacheKeyHasPositiveEntityData(t *testing.T) { + t.Run("nil CacheKey returns false", func(t *testing.T) { + assert.Equal(t, false, cacheKeyHasPositiveEntityData(nil)) + }) + + t.Run("empty CacheKey no values returns false", func(t *testing.T) { + ck := &CacheKey{} + assert.Equal(t, false, cacheKeyHasPositiveEntityData(ck)) + }) + + t.Run("key-only payload returns false", func(t *testing.T) { + // Entity has only __typename and the key field "id" — no extra data + ck := &CacheKey{ + Item: astjson.MustParse(`{"__typename":"User","id":"1"}`), + Keys: []string{`prefix:{"__typename":"User","key":{"id":"1"}}`}, + } + assert.Equal(t, false, cacheKeyHasPositiveEntityData(ck)) + }) + + t.Run("payload with extra fields returns true", func(t *testing.T) { + // Entity has "name" beyond the key fields + ck := &CacheKey{ + Item: astjson.MustParse(`{"__typename":"User","id":"1","name":"Alice"}`), + Keys: []string{`prefix:{"__typename":"User","key":{"id":"1"}}`}, + } + assert.Equal(t, true, cacheKeyHasPositiveEntityData(ck)) + }) + + t.Run("FromCache with extra fields returns true", func(t *testing.T) { + ck := &CacheKey{ + FromCache: astjson.MustParse(`{"__typename":"User","id":"1","email":"a@b.com"}`), + Keys: []string{`prefix:{"__typename":"User","key":{"id":"1"}}`}, + } + assert.Equal(t, true, cacheKeyHasPositiveEntityData(ck)) + }) + + t.Run("with EntityMergePath extracts nested entity", func(t *testing.T) { + // The entity is nested under "user" path; the inner object has extra fields + ck := &CacheKey{ + Item: astjson.MustParse(`{"user":{"__typename":"User","id":"1","name":"Alice"}}`), + Keys: []string{`prefix:{"__typename":"User","key":{"id":"1"}}`}, + EntityMergePath: []string{"user"}, + } + assert.Equal(t, true, cacheKeyHasPositiveEntityData(ck)) + }) + + t.Run("with EntityMergePath key-only nested entity returns false", func(t *testing.T) { + ck := &CacheKey{ + Item: astjson.MustParse(`{"user":{"__typename":"User","id":"1"}}`), + Keys: []string{`prefix:{"__typename":"User","key":{"id":"1"}}`}, + EntityMergePath: []string{"user"}, + } + assert.Equal(t, false, cacheKeyHasPositiveEntityData(ck)) + }) +} + +// TestHasNonEmptyKey verifies the defensive guard used before issuing L2 Get. +// When extractCacheKeysStrings yields nothing but empty strings (e.g., a template +// missed a required variable), we must skip the L2 round-trip instead of asking +// the backend for entries keyed by "". +func TestHasNonEmptyKey(t *testing.T) { + assert.Equal(t, false, hasNonEmptyKey(nil)) + assert.Equal(t, false, hasNonEmptyKey([]string{})) + assert.Equal(t, false, hasNonEmptyKey([]string{""})) + assert.Equal(t, false, hasNonEmptyKey([]string{"", "", ""})) + assert.Equal(t, true, hasNonEmptyKey([]string{"", "a"})) + assert.Equal(t, true, hasNonEmptyKey([]string{"a"})) + assert.Equal(t, true, hasNonEmptyKey([]string{"a", "b"})) +} + +// TestTryL2CacheLoad_AllEmptyKeysSkipsBackend verifies that a CacheKey whose +// Keys slice expands to only empty strings does not reach the L2 backend. +// Without the guard, the Loader would call cache.Get(ctx, []string{""}) — wasted +// round-trip and undefined backend semantics. Instead we short-circuit cleanly: +// skipFetch=false, cacheMustBeUpdated=true, inner cache untouched. +func TestTryL2CacheLoad_AllEmptyKeysSkipsBackend(t *testing.T) { + inner := &failingCache{} // Get on this would bump getCalls — it must not be called. + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + l := &Loader{ctx: ctx} + res := &result{ + cache: inner, + l2CacheKeys: []*CacheKey{{Keys: []string{"", ""}}}, + cacheConfig: FetchCacheConfiguration{CacheName: "default"}, + } + + skip, err := l.tryL2CacheLoad(t.Context(), &FetchInfo{DataSourceName: "users"}, res) + assert.NoError(t, err) + assert.Equal(t, false, skip) + assert.Equal(t, true, res.cacheMustBeUpdated) + assert.Equal(t, int64(0), inner.getCalls.Load()) +} + +// TestShouldWriteRequestedKey covers the request-key write decision matrix on the +// fetch path, with particular attention to the case where the response payload +// doesn't carry the entity's @key field — `renderedKey` is "" and the requested +// key (built from request arguments) must still be written. Previously this +// branch returned false, suppressing every cache write for queries that selected +// only non-key fields off a cached entity. +func TestShouldWriteRequestedKey(t *testing.T) { + requested := `{"__typename":"Venue","key":{"address":{"id":"v1"}}}` + missing := map[string]struct{}{requested: {}} + + tests := []struct { + name string + cacheSkipFetch bool + writeback bool + requested string + rendered string + missingKeys map[string]struct{} + want bool + }{ + { + name: "fetch path, key not previously requested → always write", + requested: requested, + rendered: requested, + missingKeys: nil, + want: true, + }, + { + name: "fetch path, key was missing on read, rendered matches requested → write", + requested: requested, + rendered: requested, + missingKeys: missing, + want: true, + }, + { + name: "fetch path, key was missing on read, response carries no key field → write requested key (REGRESSION)", + requested: requested, + rendered: "", // response payload didn't contain the @key field + missingKeys: missing, + want: true, + }, + { + name: "fetch path, key was missing on read, rendered disagrees → suppress (key skew)", + requested: requested, + rendered: `{"__typename":"Venue","key":{"address":{"id":"different"}}}`, + missingKeys: missing, + want: false, + }, + { + name: "skip-fetch path with writeback flag → write", + cacheSkipFetch: true, + writeback: true, + requested: requested, + rendered: requested, + missingKeys: nil, + want: true, + }, + { + name: "skip-fetch path without writeback flag → suppress", + cacheSkipFetch: true, + writeback: false, + requested: requested, + rendered: requested, + missingKeys: nil, + want: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := shouldWriteRequestedKey(tc.cacheSkipFetch, tc.writeback, tc.requested, tc.rendered, tc.missingKeys) + assert.Equal(t, tc.want, got) + }) + } +} diff --git a/v2/pkg/engine/resolve/caching.go b/v2/pkg/engine/resolve/caching.go new file mode 100644 index 0000000000..d24cc69b6e --- /dev/null +++ b/v2/pkg/engine/resolve/caching.go @@ -0,0 +1,755 @@ +package resolve + +import ( + "strings" + "time" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/internal/unsafebytes" +) + +type CacheKeyTemplate interface { + // RenderCacheKeys returns multiple cache keys (one per root field or entity) + // Generates keys for all items at once + RenderCacheKeys(a arena.Arena, ctx *Context, items []*astjson.Value, prefix string) ([]*CacheKey, error) + // IsEntityFetch reports whether the rendered keys describe entity fetch inputs. + IsEntityFetch() bool + // BatchEntityKeyArgumentPath returns the argument path for root-field batch entity lookups. + // Returns nil when the template does not support batch entity key construction. + BatchEntityKeyArgumentPath() []string + // EntityMergePath returns the entity-level merge path for root-field entity mappings. + // Returns nil when the template stores complete response payloads instead of entity payloads. + EntityMergePath(postProcessing PostProcessingConfiguration) []string +} + +type CacheKey struct { + // cachedData groups the non-FromCache cache-read state (candidates, freshness, + // writeback flag). Embedded so promoted field access keeps call sites unchanged; + // FromCache stays at the top level for struct-literal compatibility across tests. + // Set together by populateCacheKeysFromIndex / candidate-resolution helpers and + // propagated together when mirroring between L1 and L2 cache keys. + cachedData + + Item *astjson.Value + FromCache *astjson.Value + Keys []string + // BatchIndex records this cache key's position in the original batch argument list. + // For batch keys (ArgumentIsEntityKey + list), this is the index into the original + // list argument (e.g., ids[0], ids[1], ...). Used for response reassembly. + // For non-batch cache keys, this field is unused (default 0). + BatchIndex int + // missingKeys tracks the requested L2 keys that were absent on read for this entity. + // It is used during writeback to distinguish existing-key refreshes from missing-key backfills. + missingKeys []string + // EntityMergePath enables cache sharing between root field and entity fetches. + // On STORE: extracts entity-level data at this path (e.g., ["user"] extracts from {"user":{...}}). + // On LOAD: wraps cached entity-level data back at this path (e.g., wraps {...} into {"user":{...}}). + EntityMergePath []string + // NegativeCacheHit is set during mergeResult when the subgraph returned null for this entity. + // Used by updateL2Cache to store a null sentinel with NegativeCacheTTL instead of regular TTL. + NegativeCacheHit bool +} + +// cachedData bundles the auxiliary cache-read state for a CacheKey. +// FromCache is intentionally NOT here — it remains a top-level field on +// CacheKey to preserve the many struct-literal initializations in tests. +type cachedData struct { + // fromCacheCandidates stores all matching L2 candidates for this cache key, sorted freshest first. + fromCacheCandidates []fromCacheCandidate + // fromCacheRemainingTTL tracks the selected candidate freshness for multi-key cache hits. + fromCacheRemainingTTL time.Duration + // fromCacheNeedsWriteback marks cache-hit resolution paths that should rewrite canonical data to L2. + fromCacheNeedsWriteback bool +} + +type fromCacheCandidate struct { + value []byte + remainingTTL time.Duration +} + +type RootQueryCacheKeyTemplate struct { + RootFields []QueryField + EntityKeyMappings []EntityKeyMappingConfig + + batchEntityKeyPrecomputed bool + hasBatchEntityKey bool + batchEntityKeyArgumentPath []string +} + +func (*RootQueryCacheKeyTemplate) IsEntityFetch() bool { + return false +} + +func NewRootQueryCacheKeyTemplate(rootFields []QueryField, entityKeyMappings []EntityKeyMappingConfig) *RootQueryCacheKeyTemplate { + template := &RootQueryCacheKeyTemplate{ + RootFields: rootFields, + EntityKeyMappings: entityKeyMappings, + } + template.precomputeDerivedFields() + return template +} + +// EntityKeyMappingConfig configures how root field arguments map to entity @key fields +// for derived entity cache keys. +type EntityKeyMappingConfig struct { + EntityTypeName string + FieldMappings []EntityFieldMappingConfig +} + +// EntityFieldMappingConfig maps a single entity @key field to a root field argument path. +type EntityFieldMappingConfig struct { + EntityKeyField string + ArgumentPath []string + ArgumentIsEntityKey bool +} + +type QueryField struct { + Coordinate GraphCoordinate + Args []FieldArgument + // ResponseKey is the alias (if present) or field name — used for looking up + // the field value in the response JSON. + ResponseKey string +} + +// HasBatchEntityKey returns true if any entity key mapping uses ArgumentIsEntityKey, +// indicating this root field supports batch cache key construction from list arguments. +func (r *RootQueryCacheKeyTemplate) HasBatchEntityKey() bool { + if r == nil { + return false + } + return r.hasBatchEntityKey +} + +func (r *RootQueryCacheKeyTemplate) precomputeDerivedFields() { + if r == nil || r.batchEntityKeyPrecomputed { + return + } + r.batchEntityKeyPrecomputed = true + for _, mapping := range r.EntityKeyMappings { + for _, fm := range mapping.FieldMappings { + if !fm.ArgumentIsEntityKey { + continue + } + r.hasBatchEntityKey = true + r.batchEntityKeyArgumentPath = fm.ArgumentPath + return + } + } +} + +// BatchEntityKeyArgumentPath returns the argument path for the batch entity key field mapping. +// Returns nil if no batch entity key mapping exists. +func (r *RootQueryCacheKeyTemplate) BatchEntityKeyArgumentPath() []string { + if r == nil { + return nil + } + return r.batchEntityKeyArgumentPath +} + +func (r *RootQueryCacheKeyTemplate) EntityMergePath(postProcessing PostProcessingConfiguration) []string { + if len(r.EntityKeyMappings) == 0 { + return nil + } + + entityPath := postProcessing.MergePath + if len(entityPath) == 0 && len(r.RootFields) == 1 { + entityPath = []string{r.RootFields[0].Coordinate.FieldName} + } + + return entityPath +} + +type FieldArgument struct { + Name string + Variable Variable +} + +// RenderCacheKeys returns multiple cache keys, one per item. +// Each cache key contains one or more KeyEntry objects (one per root field). +// When EntityKeyMappings are configured, entity key format is used INSTEAD of root field format. +// For batch mode (ArgumentIsEntityKey + list argument), returns one CacheKey per list element +// with BatchIndex set to the element's position in the original list. +func (r *RootQueryCacheKeyTemplate) RenderCacheKeys(a arena.Arena, ctx *Context, items []*astjson.Value, prefix string) ([]*CacheKey, error) { + if len(r.RootFields) == 0 { + return nil, nil + } + + // Check for batch mode: ArgumentIsEntityKey + array argument + if len(r.EntityKeyMappings) > 0 { + if batchKeys, isBatch := r.tryRenderBatchEntityKeys(a, ctx, prefix); isBatch { + return batchKeys, nil + } + } + + // Use heap slices for pointer-containing types (*CacheKey, string) because + // arena memory is backed by []byte (noscan) — GC cannot trace pointers stored + // in arena memory, which can cause premature collection of heap objects. + cacheKeys := make([]*CacheKey, 0, len(items)) + jsonBytes := arena.AllocateSlice[byte](a, 0, 64) + + for _, item := range items { + keyEntries := make([]string, 0, len(r.RootFields)) + + // Entity key mappings are independent of root fields — render once per item + if len(r.EntityKeyMappings) > 0 { + for _, mapping := range r.EntityKeyMappings { + entityKey, jsonBytesOut := r.renderDerivedEntityKey(a, ctx, jsonBytes, mapping, prefix) + jsonBytes = jsonBytesOut + if entityKey != "" { + keyEntries = append(keyEntries, entityKey) + } + // If entityKey is empty (missing arg), keyEntries stays empty → no caching + } + } else { + // No entity key mapping: use root field keys + for _, field := range r.RootFields { + var key string + key, jsonBytes = r.renderField(a, ctx, item, jsonBytes, field) + if prefix != "" { + l := len(prefix) + 1 + len(key) + tmp := arena.AllocateSlice[byte](a, 0, l) + tmp = arena.SliceAppend(a, tmp, unsafebytes.StringToBytes(prefix)...) + tmp = arena.SliceAppend(a, tmp, []byte(`:`)...) + tmp = arena.SliceAppend(a, tmp, unsafebytes.StringToBytes(key)...) + key = string(tmp) + } + keyEntries = append(keyEntries, key) + } + } + + cacheKeys = append(cacheKeys, &CacheKey{ + Item: item, + Keys: keyEntries, + }) + } + return cacheKeys, nil +} + +// tryRenderBatchEntityKeys checks if the entity key mappings contain a batch argument +// (ArgumentIsEntityKey=true with a JSON array value). If so, it produces one CacheKey +// per array element with BatchIndex tracking. Returns (nil, false) if not batch mode. +func (r *RootQueryCacheKeyTemplate) tryRenderBatchEntityKeys(a arena.Arena, ctx *Context, prefix string) ([]*CacheKey, bool) { + batchMapping, ok := r.batchEntityKeyMapping() + if !ok { + return nil, false + } + + argValue := resolveArgumentValue(ctx, batchMapping.argumentPath) + switch { + case argValue == nil || argValue.Type() == astjson.TypeNull: + // null argument → return empty batch (caller handles as empty response) + return []*CacheKey{}, true + case argValue.Type() != astjson.TypeArray: + // Scalar value with ArgumentIsEntityKey — fall back to non-batch path + return nil, false + } + + return r.renderBatchEntityCacheKeys(a, argValue.GetArray(), batchMapping, prefix), true +} + +// resolveArgumentValue extracts a variable value from ctx, handling RemapVariables. +func resolveArgumentValue(ctx *Context, argumentPath []string) *astjson.Value { + if ctx == nil || ctx.Variables == nil { + return nil + } + path := resolveArgumentVariablePath(ctx, argumentPath) + return ctx.Variables.Get(path...) +} + +// resolveArgumentVariablePath resolves the variables path for an argument, +// applying the forward RemapVariables lookup. In production, resolveArgumentPath +// resolves ArgumentPath to the remapped variable name (e.g., ["a"]), while +// ctx.Variables keeps the original names. Forward lookup maps the remapped name +// back to the original for variable access. +func resolveArgumentVariablePath(ctx *Context, argumentPath []string) []string { + path := argumentPath + if ctx == nil || ctx.RemapVariables == nil { + return path + } + if len(path) == 1 { + if nameToUse, hasMapping := ctx.RemapVariables[path[0]]; hasMapping && nameToUse != path[0] { + path = []string{nameToUse} + } + } + return path +} + +// cloneVariablesWithBatchIndices clones ctx.Variables and replaces the batch argument +// array at argumentPath with only the elements referenced by batchIndices. +func cloneVariablesWithBatchIndices(ctx *Context, argumentPath []string, batchIndices []int) (*astjson.Value, error) { + if ctx == nil || ctx.Variables == nil { + return nil, nil + } + + resolvedPath := resolveArgumentVariablePath(ctx, argumentPath) + originalArray := ctx.Variables.Get(resolvedPath...) + if originalArray == nil || originalArray.Type() != astjson.TypeArray { + return nil, nil + } + + clonedVariables, err := astjson.ParseBytes(ctx.Variables.MarshalTo(nil)) + if err != nil { + return nil, err + } + + filteredArray := astjson.ArrayValue(nil) + elements := clonedVariables.GetArray(resolvedPath...) + for _, batchIndex := range batchIndices { + if batchIndex < 0 || batchIndex >= len(elements) { + continue + } + astjson.AppendToArray(nil, filteredArray, elements[batchIndex]) + } + + astjson.SetValue(nil, clonedVariables, filteredArray, resolvedPath...) + return clonedVariables, nil +} + +// renderSingleEntityKey renders a cache key for a single entity element. +// Format: {"__typename":"Product","key":{"upc":"top-1"}} with optional prefix. +func (r *RootQueryCacheKeyTemplate) renderSingleEntityKey(a arena.Arena, jsonBytes []byte, entityTypeName, keyField string, elemValue *astjson.Value, prefix string) (string, []byte) { + if elemValue == nil || elemValue.Type() == astjson.TypeNull { + return "", jsonBytes + } + keyObj := astjson.ObjectValue(a) + keyObj.Set(a, "__typename", astjson.StringValue(a, entityTypeName)) + keysObj := astjson.ObjectValue(a) + setNestedKey(a, keysObj, keyField, elemValue) + keyObj.Set(a, "key", keysObj) + + jsonBytes = keyObj.MarshalTo(jsonBytes[:0]) + l := len(jsonBytes) + if prefix != "" { + l += 1 + len(prefix) + } + slice := arena.AllocateSlice[byte](a, 0, l) + if prefix != "" { + slice = arena.SliceAppend(a, slice, unsafebytes.StringToBytes(prefix)...) + slice = arena.SliceAppend(a, slice, []byte(`:`)...) + } + slice = arena.SliceAppend(a, slice, jsonBytes...) + return string(slice), jsonBytes +} + +type batchEntityKeyMapping struct { + entityTypeName string + entityKeyField string + argumentPath []string +} + +// batchEntityKeyMapping returns the single batch-entity mapping for this root template. +// Composition guarantees at most one ArgumentIsEntityKey mapping per root field. +func (r *RootQueryCacheKeyTemplate) batchEntityKeyMapping() (batchEntityKeyMapping, bool) { + for _, mapping := range r.EntityKeyMappings { + for _, fieldMapping := range mapping.FieldMappings { + if !fieldMapping.ArgumentIsEntityKey { + continue + } + return batchEntityKeyMapping{ + entityTypeName: mapping.EntityTypeName, + entityKeyField: fieldMapping.EntityKeyField, + argumentPath: fieldMapping.ArgumentPath, + }, true + } + } + return batchEntityKeyMapping{}, false +} + +// renderBatchEntityCacheKeys renders one cache key per selected batch argument item. +func (r *RootQueryCacheKeyTemplate) renderBatchEntityCacheKeys(a arena.Arena, elements []*astjson.Value, mapping batchEntityKeyMapping, prefix string) []*CacheKey { + if len(elements) == 0 { + return []*CacheKey{} + } + + cacheKeys := make([]*CacheKey, 0, len(elements)) + jsonBytes := arena.AllocateSlice[byte](a, 0, 64) + for i, elem := range elements { + entityKey, jsonBytesOut := r.renderSingleEntityKey(a, jsonBytes, mapping.entityTypeName, mapping.entityKeyField, elem, prefix) + jsonBytes = jsonBytesOut + if entityKey == "" { + continue + } + cacheKeys = append(cacheKeys, &CacheKey{ + Keys: []string{entityKey}, + BatchIndex: i, + }) + } + return cacheKeys +} + +// renderDerivedEntityKey renders a cache key in entity format using root field arguments. +// Returns "" if any argument cannot be resolved (skip caching for this request). +// Format: {"__typename":"User","key":{"id":"123"}} with optional prefix. +func (r *RootQueryCacheKeyTemplate) renderDerivedEntityKey(a arena.Arena, ctx *Context, jsonBytes []byte, mapping EntityKeyMappingConfig, prefix string) (string, []byte) { + keyObj := astjson.ObjectValue(a) + keyObj.Set(a, "__typename", astjson.StringValue(a, mapping.EntityTypeName)) + + keysObj := astjson.ObjectValue(a) + for _, fm := range mapping.FieldMappings { + argumentPath := fm.ArgumentPath + // Apply variable remapping via forward lookup. RemapVariables maps newName → oldName. + // In production, resolveArgumentPath resolves ArgumentPath to the remapped variable + // name (e.g., ["a"]), while ctx.Variables keeps the original names (e.g., {"id": ...}). + // Forward lookup maps argumentPath[0] back to the original name for variable access. + if len(argumentPath) > 0 && ctx.RemapVariables != nil { + if nameToUse, hasMapping := ctx.RemapVariables[argumentPath[0]]; hasMapping && nameToUse != argumentPath[0] { + remapped := make([]string, len(argumentPath)) + copy(remapped, argumentPath) + remapped[0] = nameToUse + argumentPath = remapped + } + } + + argValue := ctx.Variables.Get(argumentPath...) + if argValue == nil || argValue.Type() == astjson.TypeNull { + // Missing or null argument → skip caching + return "", jsonBytes + } + setNestedKey(a, keysObj, fm.EntityKeyField, argValue) + } + + keyObj.Set(a, "key", keysObj) + + // Marshal to JSON + jsonBytes = keyObj.MarshalTo(jsonBytes[:0]) + l := len(jsonBytes) + if prefix != "" { + l += 1 + len(prefix) + } + slice := arena.AllocateSlice[byte](a, 0, l) + if prefix != "" { + slice = arena.SliceAppend(a, slice, unsafebytes.StringToBytes(prefix)...) + slice = arena.SliceAppend(a, slice, []byte(`:`)...) + } + slice = arena.SliceAppend(a, slice, jsonBytes...) + return string(slice), jsonBytes +} + +func (r *RootQueryCacheKeyTemplate) renderDerivedEntityKeyFromValue(a arena.Arena, entity *astjson.Value, jsonBytes []byte, mapping EntityKeyMappingConfig, prefix string) (string, []byte) { + keyObj := astjson.ObjectValue(a) + keyObj.Set(a, "__typename", astjson.StringValue(a, mapping.EntityTypeName)) + + keysObj := astjson.ObjectValue(a) + for _, fm := range mapping.FieldMappings { + value := entity.Get(strings.Split(fm.EntityKeyField, ".")...) + if value == nil || value.Type() == astjson.TypeNull { + return "", jsonBytes + } + setNestedKey(a, keysObj, fm.EntityKeyField, value) + } + + keyObj.Set(a, "key", keysObj) + jsonBytes = keyObj.MarshalTo(jsonBytes[:0]) + l := len(jsonBytes) + if prefix != "" { + l += 1 + len(prefix) + } + slice := arena.AllocateSlice[byte](a, 0, l) + if prefix != "" { + slice = arena.SliceAppend(a, slice, unsafebytes.StringToBytes(prefix)...) + slice = arena.SliceAppend(a, slice, []byte(`:`)...) + } + slice = arena.SliceAppend(a, slice, jsonBytes...) + return string(slice), jsonBytes +} + +// setNestedKey sets a value on a JSON object, supporting dot-notation for nested keys. +// For "store.id" with value "123", it produces {"store":{"id":"123"}}. +// For flat keys (no dot), it behaves like obj.Set(a, key, value). +func setNestedKey(a arena.Arena, obj *astjson.Value, key string, value *astjson.Value) { + // Coerce numbers to strings for consistent cache keys. + // Entity @key fields are identifiers (ID, String) — the GraphQL response always + // serializes ID as a string, but clients may send integer literals (id: 1 vs id: "1"). + // Without coercion, the read-path key {"id":1} won't match the write-path key {"id":"1"}. + if value != nil && value.Type() == astjson.TypeNumber { + value = value.CoerceToString(a) + } + parts := strings.Split(key, ".") + if len(parts) == 1 { + obj.Set(a, key, value) + return + } + // Walk top-down, reusing existing intermediate objects + current := obj + for i := 0; i < len(parts)-1; i++ { + existing := current.Get(parts[i]) + if existing != nil && existing.Type() == astjson.TypeObject { + current = existing + } else { + next := astjson.ObjectValue(a) + current.Set(a, parts[i], next) + current = next + } + } + current.Set(a, parts[len(parts)-1], value) +} + +// renderField renders a single field cache key as JSON +func (r *RootQueryCacheKeyTemplate) renderField(a arena.Arena, ctx *Context, item *astjson.Value, jsonBytes []byte, field QueryField) (string, []byte) { + // Build JSON object starting with __typename + keyObj := astjson.ObjectValue(a) + typeName := field.Coordinate.TypeName + keyObj.Set(a, "__typename", astjson.StringValue(a, typeName)) + keyObj.Set(a, "field", astjson.StringValue(a, field.Coordinate.FieldName)) + + // Build args object if there are any arguments + if len(field.Args) > 0 { + argsObj := astjson.ObjectValue(a) + for _, arg := range field.Args { + var argValue *astjson.Value + segment := arg.Variable.TemplateSegment() + if segment.Renderer != nil { + switch segment.VariableKind { + case ContextVariableKind: + // Extract value from context variables + variableSourcePath := segment.VariableSourcePath + if len(variableSourcePath) == 1 && ctx.RemapVariables != nil { + if nameToUse, hasMapping := ctx.RemapVariables[variableSourcePath[0]]; hasMapping && nameToUse != variableSourcePath[0] { + variableSourcePath = []string{nameToUse} + } + } + argValue = ctx.Variables.Get(variableSourcePath...) + if argValue == nil { + argValue = astjson.NullValue + } + case ObjectVariableKind: + // Use data parameter for object variables + if item != nil { + value := item.Get(segment.VariableSourcePath...) + if value == nil || value.Type() == astjson.TypeNull { + argValue = astjson.NullValue + } else { + // Values are already JSON-compatible astjson.Value + argValue = value + } + } else { + argValue = astjson.NullValue + } + default: + // For other variable kinds, use data parameter + if item != nil { + argValue = item + } else { + argValue = astjson.NullValue + } + } + } else { + argValue = astjson.NullValue + } + argsObj.Set(a, arg.Name, argValue) + } + keyObj.Set(a, "args", argsObj) + } + + // Marshal to JSON and write to output + jsonBytes = keyObj.MarshalTo(jsonBytes[:0]) + slice := arena.AllocateSlice[byte](a, len(jsonBytes), len(jsonBytes)) + copy(slice, jsonBytes) + return string(slice), jsonBytes +} + +type EntityQueryCacheKeyTemplate struct { + // Keys contains only @key fields (without @requires fields). + // Used for both L1 and L2 cache keys to ensure stable entity identity. + Keys *ResolvableObjectVariable + // TypeName is the entity type name from the query plan (e.g. "Product", "User"). + // Used as fallback when __typename is missing from the response data. + TypeName string +} + +func (*EntityQueryCacheKeyTemplate) IsEntityFetch() bool { + return true +} + +// KeyFields extracts the full @key structure from the template's Object tree. +func (e *EntityQueryCacheKeyTemplate) KeyFields() []KeyField { + if e.Keys == nil || e.Keys.Renderer == nil { + return nil + } + obj, ok := e.Keys.Renderer.Node.(*Object) + if !ok { + return nil + } + return objectToKeyFields(obj) +} + +func (*EntityQueryCacheKeyTemplate) BatchEntityKeyArgumentPath() []string { + return nil +} + +func (*EntityQueryCacheKeyTemplate) EntityMergePath(PostProcessingConfiguration) []string { + return nil +} + +// objectToKeyFields converts an Object node tree to a KeyField tree. +func objectToKeyFields(obj *Object) []KeyField { + var fields []KeyField + for _, f := range obj.Fields { + name := string(f.Name) + if name == "__typename" { + continue + } + kf := KeyField{Name: name} + // Check if value is a nested Object (composite key field) + if childObj, ok := f.Value.(*Object); ok { + kf.Children = objectToKeyFields(childObj) + } + fields = append(fields, kf) + } + return fields +} + +// RenderCacheKeys implements CacheKeyTemplate interface. +// Uses Keys template (only @key fields) for stable entity identity. +// Prefix is used for L2 cache isolation (typically subgraph header hash). +func (e *EntityQueryCacheKeyTemplate) RenderCacheKeys(a arena.Arena, ctx *Context, items []*astjson.Value, prefix string) ([]*CacheKey, error) { + return e.renderCacheKeys(a, items, e.Keys, prefix) +} + +// renderCacheKeys is the internal implementation for RenderCacheKeys. +// Returns one cache key per item for entity queries with keys nested under "key". +func (e *EntityQueryCacheKeyTemplate) renderCacheKeys(a arena.Arena, items []*astjson.Value, keysTemplate *ResolvableObjectVariable, prefix string) ([]*CacheKey, error) { + jsonBytes := arena.AllocateSlice[byte](a, 0, 64) + // Use heap slices for pointer-containing types — arena memory is noscan, + // so GC cannot trace pointers stored there, risking premature collection. + cacheKeys := make([]*CacheKey, 0, len(items)) + + for _, item := range items { + if item == nil { + continue + } + + // Build JSON object starting with __typename + keyObj := astjson.ObjectValue(a) + + // Extract __typename from the data + typename := item.Get("__typename") + if typename == nil { + // Fallback to plan-time type name when __typename is missing from response data + keyObj.Set(a, "__typename", astjson.StringValue(a, e.TypeName)) + } else { + keyObj.Set(a, "__typename", typename) + } + + // Put entity keys under "key" nested object + keysObj := astjson.ObjectValue(a) + + // Extract only the fields defined in the template (not all fields from data) + if keysTemplate != nil && keysTemplate.Renderer != nil { + if obj, ok := keysTemplate.Renderer.Node.(*Object); ok { + for _, field := range obj.Fields { + fieldName := unsafebytes.BytesToString(field.Name) + // Skip __typename as it's already handled separately + if fieldName == "__typename" { + continue + } + // Resolve field value based on its template definition + fieldValue := e.resolveFieldValue(a, field.Value, item) + if fieldValue != nil && fieldValue.Type() != astjson.TypeNull { + // Coerce numbers to strings for consistent cache keys with the + // sibling derived-key paths (renderDerivedEntityKey / + // renderDerivedEntityKeyFromValue) that go through setNestedKey. + // See caching.go:468-471 for the reasoning. + if fieldValue.Type() == astjson.TypeNumber { + fieldValue = fieldValue.CoerceToString(a) + } + keysObj.Set(a, fieldName, fieldValue) + } + } + } + } + + // Skip entities with empty key objects — @key fields are missing from + // the query selection. Such keys would collide for all entities of the + // same type, causing incorrect cache sharing. + if keysObj.GetObject().Len() == 0 { + continue + } + + keyObj.Set(a, "key", keysObj) + + // Marshal to JSON and write to buffer + jsonBytes = keyObj.MarshalTo(jsonBytes[:0]) + l := len(jsonBytes) + if prefix != "" { + l += 1 + len(prefix) + } + slice := arena.AllocateSlice[byte](a, 0, l) + if prefix != "" { + slice = arena.SliceAppend(a, slice, unsafebytes.StringToBytes(prefix)...) + slice = arena.SliceAppend(a, slice, []byte(`:`)...) + } + slice = arena.SliceAppend(a, slice, jsonBytes...) + + // Create KeyEntry with empty path for entity queries + keyEntries := []string{string(slice)} + + cacheKeys = append(cacheKeys, &CacheKey{ + Item: item, + Keys: keyEntries, + }) + } + + return cacheKeys, nil +} + +// resolveFieldValue resolves a field value from data based on its template definition +func (e *EntityQueryCacheKeyTemplate) resolveFieldValue(a arena.Arena, valueNode Node, data *astjson.Value) *astjson.Value { + switch n := valueNode.(type) { + case *String, *Scalar, *Integer, *Float, *Boolean, *Enum, *BigInt, *CustomNode: + return data.Get(n.NodePath()...) + case *Object: + // For nested objects, recursively build the object using only template-defined fields + nestedObj := astjson.ObjectValue(a) + // Get the base object from data using the object's path + baseData := data.Get(n.Path...) + if baseData == nil || baseData.Type() == astjson.TypeNull { + return nil + } + // Recursively resolve each field in the nested object template + for _, field := range n.Fields { + fieldName := unsafebytes.BytesToString(field.Name) + // Skip __typename in nested objects + if fieldName == "__typename" { + continue + } + fieldValue := e.resolveFieldValue(a, field.Value, baseData) + if fieldValue != nil && fieldValue.Type() != astjson.TypeNull { + // Coerce numbers to strings for consistent cache keys (see + // caching.go:468-471). Applies inside composite @key Objects + // too — nested scalars must follow the same contract as + // flat scalars. + if fieldValue.Type() == astjson.TypeNumber { + fieldValue = fieldValue.CoerceToString(a) + } + nestedObj.Set(a, fieldName, fieldValue) + } + } + return nestedObj + case *Array: + // Handle arrays by resolving each item based on the Item template + arrayValue := data.Get(n.Path...) + if arrayValue == nil || arrayValue.Type() != astjson.TypeArray { + return nil + } + items := arrayValue.GetArray() + resultArray := astjson.ArrayValue(a) + resultIndex := 0 + for _, itemData := range items { + if itemData == nil { + continue + } + resolvedItem := e.resolveFieldValue(a, n.Item, itemData) + if resolvedItem != nil { + resultArray.SetArrayItem(a, resultIndex, resolvedItem) + resultIndex++ + } + } + return resultArray + default: + // For other types not handled above, return nil + return nil + } +} diff --git a/v2/pkg/engine/resolve/caching_overhead_bench_test.go b/v2/pkg/engine/resolve/caching_overhead_bench_test.go new file mode 100644 index 0000000000..b2e748dfc6 --- /dev/null +++ b/v2/pkg/engine/resolve/caching_overhead_bench_test.go @@ -0,0 +1,696 @@ +package resolve + +import ( + "bytes" + "context" + "net/http" + "strconv" + "sync" + "testing" + "time" + + "github.com/wundergraph/go-arena" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/httpclient" +) + +// benchDataSource returns a fixed response with no allocations beyond the copy. +type benchDataSource struct { + data []byte +} + +func (d *benchDataSource) Load(_ context.Context, _ http.Header, _ []byte) ([]byte, error) { + out := make([]byte, len(d.data)) + copy(out, d.data) + return out, nil +} + +func (d *benchDataSource) LoadWithFiles(_ context.Context, _ http.Header, _ []byte, _ []*httpclient.FileUpload) ([]byte, error) { + return d.Load(context.TODO(), nil, nil) +} + +// benchCache is a zero-latency in-memory cache for benchmarking L2 overhead. +type benchCache struct { + mu sync.RWMutex + storage map[string][]byte +} + +func newBenchCache() *benchCache { + return &benchCache{storage: make(map[string][]byte)} +} + +func (c *benchCache) Get(_ context.Context, keys []string) ([]*CacheEntry, error) { + c.mu.RLock() + defer c.mu.RUnlock() + result := make([]*CacheEntry, len(keys)) + for i, key := range keys { + if v, ok := c.storage[key]; ok { + result[i] = &CacheEntry{Key: key, Value: v, RemainingTTL: 30 * time.Second} + } + } + return result, nil +} + +func (c *benchCache) Set(_ context.Context, entries []*CacheEntry) error { + c.mu.Lock() + defer c.mu.Unlock() + for _, e := range entries { + if e == nil { + continue + } + c.storage[e.Key] = e.Value + } + return nil +} + +func (c *benchCache) Delete(_ context.Context, keys []string) error { + c.mu.Lock() + defer c.mu.Unlock() + for _, key := range keys { + delete(c.storage, key) + } + return nil +} + +// buildBenchResponse constructs a GraphQLResponse representing a typical federated query: +// +// query { topProducts { id name price } } +// +// Root fetch returns 10 products with __typename+id, then a batch entity fetch resolves name+price. +func buildBenchResponse(rootDS, entityDS DataSource, caching FetchCacheConfiguration) *GraphQLResponse { + entityRepRenderer := NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }) + + return &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://products","body":{"query":"{topProducts{__typename id}}"}}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + RootFields: []GraphCoordinate{ + {TypeName: "Query", FieldName: "topProducts"}, + }, + }, + }, "query"), + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://products","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){... on Product{name price}}}","variables":{"representations":[`), SegmentType: StaticSegmentType}, + }, + }, + Items: []InputTemplate{ + {Segments: []TemplateSegment{ + {SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: entityRepRenderer}, + }}, + }, + Separator: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`,`), SegmentType: StaticSegmentType}, + }, + }, + Footer: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`]}}}`), SegmentType: StaticSegmentType}, + }, + }, + }, + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities"}, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + RootFields: []GraphCoordinate{ + {TypeName: "Product", FieldName: "_entities"}, + }, + ProvidesData: &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}}}, + {Name: []byte("price"), Value: &Scalar{Path: []string{"price"}}}, + }, + }, + }, + Caching: caching, + }, "query.topProducts", ArrayPath("topProducts")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("topProducts"), + Value: &Array{ + Path: []string{"topProducts"}, + Item: &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + {Name: []byte("price"), Value: &Float{Path: []string{"price"}}}, + }, + }, + }, + }, + }, + }, + } +} + +// buildParallelBenchResponse constructs a GraphQLResponse with parallel entity fetches +// to exercise the 4-phase parallel execution path. +// +// query { topProducts { id name price } reviews { id body rating } } +// +// Root fetch returns products+reviews, then two parallel batch entity fetches resolve details. +func buildParallelBenchResponse(rootDS, productDS, reviewDS DataSource, productCaching, reviewCaching FetchCacheConfiguration) *GraphQLResponse { + productRepRenderer := NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }) + reviewRepRenderer := NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }) + + return &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://root","body":{"query":"{topProducts{__typename id} reviews{__typename id}}"}}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + Info: &FetchInfo{ + DataSourceID: "root", + DataSourceName: "root", + OperationType: ast.OperationTypeQuery, + RootFields: []GraphCoordinate{ + {TypeName: "Query", FieldName: "topProducts"}, + {TypeName: "Query", FieldName: "reviews"}, + }, + }, + }, "query"), + Parallel( + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://products","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){... on Product{name price}}}","variables":{"representations":[`), SegmentType: StaticSegmentType}, + }}, + Items: []InputTemplate{{Segments: []TemplateSegment{ + {SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: productRepRenderer}, + }}}, + Separator: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`,`), SegmentType: StaticSegmentType}}}, + Footer: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`]}}}`), SegmentType: StaticSegmentType}}}, + }, + DataSource: productDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities"}, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + RootFields: []GraphCoordinate{{TypeName: "Product", FieldName: "_entities"}}, + ProvidesData: &Object{Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}}}, + {Name: []byte("price"), Value: &Scalar{Path: []string{"price"}}}, + }}, + }, + Caching: productCaching, + }, "query.topProducts", ArrayPath("topProducts")), + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://reviews","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){... on Review{body rating}}}","variables":{"representations":[`), SegmentType: StaticSegmentType}, + }}, + Items: []InputTemplate{{Segments: []TemplateSegment{ + {SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: reviewRepRenderer}, + }}}, + Separator: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`,`), SegmentType: StaticSegmentType}}}, + Footer: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`]}}}`), SegmentType: StaticSegmentType}}}, + }, + DataSource: reviewDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities"}, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + Info: &FetchInfo{ + DataSourceID: "reviews", + DataSourceName: "reviews", + OperationType: ast.OperationTypeQuery, + RootFields: []GraphCoordinate{{TypeName: "Review", FieldName: "_entities"}}, + ProvidesData: &Object{Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}}}, + {Name: []byte("body"), Value: &Scalar{Path: []string{"body"}}}, + {Name: []byte("rating"), Value: &Scalar{Path: []string{"rating"}}}, + }}, + }, + Caching: reviewCaching, + }, "query.reviews", ArrayPath("reviews")), + ), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("topProducts"), + Value: &Array{ + Path: []string{"topProducts"}, + Item: &Object{Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + {Name: []byte("price"), Value: &Float{Path: []string{"price"}}}, + }}, + }, + }, + { + Name: []byte("reviews"), + Value: &Array{ + Path: []string{"reviews"}, + Item: &Object{Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("body"), Value: &String{Path: []string{"body"}}}, + {Name: []byte("rating"), Value: &Integer{Path: []string{"rating"}}}, + }}, + }, + }, + }, + }, + } +} + +func entityCacheKeyTemplate() *EntityQueryCacheKeyTemplate { + return &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } +} + +// --- Sequential benchmarks (root fetch → batch entity fetch) --- + +// BenchmarkCachingOverhead_Sequential measures the full Loader.LoadGraphQLResponseData path +// for a sequential fetch tree (root → batch entity) under different caching configurations. +// +// Sub-benchmarks: +// - Disabled: L1=off, L2=off, no CacheKeyTemplate — measures true zero-overhead baseline +// - ConfiguredButDisabled: L1=off, L2=off, but CacheKeyTemplate IS set — detects any +// work done even when caching flags are off +// - L1Only: L1=on, L2=off — measures L1 overhead (sync.Map, key rendering) +// - L1L2_Miss: L1=on, L2=on, empty cache — measures L2 miss overhead (Get call, key prefix) +// - L1L2_Hit: L1=on, L2=on, pre-populated cache — measures L2 hit path (Get, parse, merge) +func BenchmarkCachingOverhead_Sequential(b *testing.B) { + rootData := []byte(`{"data":{"topProducts":[` + + `{"__typename":"Product","id":"p1"},` + + `{"__typename":"Product","id":"p2"},` + + `{"__typename":"Product","id":"p3"},` + + `{"__typename":"Product","id":"p4"},` + + `{"__typename":"Product","id":"p5"},` + + `{"__typename":"Product","id":"p6"},` + + `{"__typename":"Product","id":"p7"},` + + `{"__typename":"Product","id":"p8"},` + + `{"__typename":"Product","id":"p9"},` + + `{"__typename":"Product","id":"p10"}` + + `]}}`) + + entityData := []byte(`{"data":{"_entities":[` + + `{"__typename":"Product","id":"p1","name":"Product 1","price":10.00},` + + `{"__typename":"Product","id":"p2","name":"Product 2","price":20.00},` + + `{"__typename":"Product","id":"p3","name":"Product 3","price":30.00},` + + `{"__typename":"Product","id":"p4","name":"Product 4","price":40.00},` + + `{"__typename":"Product","id":"p5","name":"Product 5","price":50.00},` + + `{"__typename":"Product","id":"p6","name":"Product 6","price":60.00},` + + `{"__typename":"Product","id":"p7","name":"Product 7","price":70.00},` + + `{"__typename":"Product","id":"p8","name":"Product 8","price":80.00},` + + `{"__typename":"Product","id":"p9","name":"Product 9","price":90.00},` + + `{"__typename":"Product","id":"p10","name":"Product 10","price":100.00}` + + `]}}`) + + rootDS := &benchDataSource{data: rootData} + entityDS := &benchDataSource{data: entityData} + + b.Run("Disabled", func(b *testing.B) { + // No CacheKeyTemplate, L1=off, L2=off — true baseline + response := buildBenchResponse(rootDS, entityDS, FetchCacheConfiguration{}) + benchResolveSequential(b, response, false, false, nil) + }) + + b.Run("ConfiguredButDisabled", func(b *testing.B) { + // CacheKeyTemplate IS set but L1=off, L2=off — detects leaky guard checks + caching := FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: entityCacheKeyTemplate(), + UseL1Cache: true, + } + response := buildBenchResponse(rootDS, entityDS, caching) + benchResolveSequential(b, response, false, false, nil) + }) + + b.Run("L1Only", func(b *testing.B) { + caching := FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: entityCacheKeyTemplate(), + UseL1Cache: true, + } + response := buildBenchResponse(rootDS, entityDS, caching) + benchResolveSequential(b, response, true, false, nil) + }) + + b.Run("L1L2_Miss", func(b *testing.B) { + cache := newBenchCache() + caching := FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: entityCacheKeyTemplate(), + UseL1Cache: true, + } + response := buildBenchResponse(rootDS, entityDS, caching) + benchResolveSequential(b, response, true, true, cache) + }) + + b.Run("L1L2_Hit", func(b *testing.B) { + cache := newBenchCache() + // Pre-populate cache with all 10 entities + for i := range 10 { + id := "p" + itoa(i+1) + key := `{"__typename":"Product","key":{"id":"` + id + `"}}` + val := []byte(`{"__typename":"Product","id":"` + id + `","name":"Product ` + itoa(i+1) + `","price":` + itoa((i+1)*10) + `}`) + cache.storage[key] = val + } + caching := FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: entityCacheKeyTemplate(), + UseL1Cache: true, + } + response := buildBenchResponse(rootDS, entityDS, caching) + benchResolveSequential(b, response, true, true, cache) + }) +} + +func benchResolveSequential(b *testing.B, response *GraphQLResponse, enableL1, enableL2 bool, cache LoaderCache) { + b.Helper() + + caches := map[string]LoaderCache{} + if cache != nil { + caches["default"] = cache + } + + var buf bytes.Buffer + b.ReportAllocs() + b.ResetTimer() + + for b.Loop() { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + loader := &Loader{ + caches: caches, + jsonArena: ar, + } + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = enableL1 + ctx.ExecutionOptions.Caching.EnableL2Cache = enableL2 + + _ = resolvable.Init(ctx, nil, ast.OperationTypeQuery) + _ = loader.LoadGraphQLResponseData(ctx, response, resolvable) + + buf.Reset() + _ = resolvable.Resolve(ctx.ctx, response.Data, response.Fetches, &buf) + + loader.Free() + ar.Reset() + } +} + +// --- Parallel benchmarks (root → 2 parallel entity fetches) --- + +// BenchmarkCachingOverhead_Parallel measures the 4-phase parallel execution path under +// different caching configurations. +// +// The parallel path exercises Phase 1 (main thread L1 check), Phase 2 (goroutine L2+fetch), +// Phase 3 (analytics merge), and Phase 4 (result merge + cache population). +func BenchmarkCachingOverhead_Parallel(b *testing.B) { + rootData := []byte(`{"data":{"topProducts":[` + + `{"__typename":"Product","id":"p1"},` + + `{"__typename":"Product","id":"p2"},` + + `{"__typename":"Product","id":"p3"},` + + `{"__typename":"Product","id":"p4"},` + + `{"__typename":"Product","id":"p5"}` + + `],"reviews":[` + + `{"__typename":"Review","id":"r1"},` + + `{"__typename":"Review","id":"r2"},` + + `{"__typename":"Review","id":"r3"},` + + `{"__typename":"Review","id":"r4"},` + + `{"__typename":"Review","id":"r5"}` + + `]}}`) + + productData := []byte(`{"data":{"_entities":[` + + `{"__typename":"Product","id":"p1","name":"Product 1","price":10.00},` + + `{"__typename":"Product","id":"p2","name":"Product 2","price":20.00},` + + `{"__typename":"Product","id":"p3","name":"Product 3","price":30.00},` + + `{"__typename":"Product","id":"p4","name":"Product 4","price":40.00},` + + `{"__typename":"Product","id":"p5","name":"Product 5","price":50.00}` + + `]}}`) + + reviewData := []byte(`{"data":{"_entities":[` + + `{"__typename":"Review","id":"r1","body":"Great","rating":5},` + + `{"__typename":"Review","id":"r2","body":"Good","rating":4},` + + `{"__typename":"Review","id":"r3","body":"Okay","rating":3},` + + `{"__typename":"Review","id":"r4","body":"Meh","rating":2},` + + `{"__typename":"Review","id":"r5","body":"Bad","rating":1}` + + `]}}`) + + rootDS := &benchDataSource{data: rootData} + productDS := &benchDataSource{data: productData} + reviewDS := &benchDataSource{data: reviewData} + + noCaching := FetchCacheConfiguration{} + + b.Run("Disabled", func(b *testing.B) { + response := buildParallelBenchResponse(rootDS, productDS, reviewDS, noCaching, noCaching) + benchResolveParallel(b, response, false, false, nil) + }) + + b.Run("L1Only", func(b *testing.B) { + caching := FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: entityCacheKeyTemplate(), + UseL1Cache: true, + } + response := buildParallelBenchResponse(rootDS, productDS, reviewDS, caching, caching) + benchResolveParallel(b, response, true, false, nil) + }) + + b.Run("L1L2_Miss", func(b *testing.B) { + cache := newBenchCache() + caching := FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: entityCacheKeyTemplate(), + UseL1Cache: true, + } + response := buildParallelBenchResponse(rootDS, productDS, reviewDS, caching, caching) + benchResolveParallel(b, response, true, true, cache) + }) + + b.Run("L1L2_Hit", func(b *testing.B) { + cache := newBenchCache() + for i := range 5 { + pid := "p" + itoa(i+1) + pKey := `{"__typename":"Product","key":{"id":"` + pid + `"}}` + pVal := []byte(`{"__typename":"Product","id":"` + pid + `","name":"Product ` + itoa(i+1) + `","price":` + itoa((i+1)*10) + `}`) + cache.storage[pKey] = pVal + + rid := "r" + itoa(i+1) + rKey := `{"__typename":"Review","key":{"id":"` + rid + `"}}` + rVal := []byte(`{"__typename":"Review","id":"` + rid + `","body":"Review ` + itoa(i+1) + `","rating":` + itoa(i+1) + `}`) + cache.storage[rKey] = rVal + } + caching := FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: entityCacheKeyTemplate(), + UseL1Cache: true, + } + response := buildParallelBenchResponse(rootDS, productDS, reviewDS, caching, caching) + benchResolveParallel(b, response, true, true, cache) + }) +} + +func benchResolveParallel(b *testing.B, response *GraphQLResponse, enableL1, enableL2 bool, cache LoaderCache) { + b.Helper() + + caches := map[string]LoaderCache{} + if cache != nil { + caches["default"] = cache + } + + var buf bytes.Buffer + b.ReportAllocs() + b.ResetTimer() + + for b.Loop() { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + loader := &Loader{ + caches: caches, + jsonArena: ar, + } + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = enableL1 + ctx.ExecutionOptions.Caching.EnableL2Cache = enableL2 + + _ = resolvable.Init(ctx, nil, ast.OperationTypeQuery) + _ = loader.LoadGraphQLResponseData(ctx, response, resolvable) + + buf.Reset() + _ = resolvable.Resolve(ctx.ctx, response.Data, response.Fetches, &buf) + + loader.Free() + ar.Reset() + } +} + +// --- Analytics overhead benchmark --- + +// BenchmarkCachingOverhead_Analytics measures the additional overhead of EnableCacheAnalytics +// on top of L1+L2 caching. Analytics collects per-entity events, field hashes, and timing data. +func BenchmarkCachingOverhead_Analytics(b *testing.B) { + rootData := []byte(`{"data":{"topProducts":[` + + `{"__typename":"Product","id":"p1"},` + + `{"__typename":"Product","id":"p2"},` + + `{"__typename":"Product","id":"p3"},` + + `{"__typename":"Product","id":"p4"},` + + `{"__typename":"Product","id":"p5"}` + + `]}}`) + + entityData := []byte(`{"data":{"_entities":[` + + `{"__typename":"Product","id":"p1","name":"Product 1","price":10.00},` + + `{"__typename":"Product","id":"p2","name":"Product 2","price":20.00},` + + `{"__typename":"Product","id":"p3","name":"Product 3","price":30.00},` + + `{"__typename":"Product","id":"p4","name":"Product 4","price":40.00},` + + `{"__typename":"Product","id":"p5","name":"Product 5","price":50.00}` + + `]}}`) + + rootDS := &benchDataSource{data: rootData} + entityDS := &benchDataSource{data: entityData} + + cache := newBenchCache() + caching := FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: entityCacheKeyTemplate(), + UseL1Cache: true, + } + response := buildBenchResponse(rootDS, entityDS, caching) + + caches := map[string]LoaderCache{"default": cache} + + b.Run("AnalyticsOff", func(b *testing.B) { + var buf bytes.Buffer + b.ReportAllocs() + b.ResetTimer() + for b.Loop() { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + loader := &Loader{caches: caches, jsonArena: ar} + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = false + + _ = resolvable.Init(ctx, nil, ast.OperationTypeQuery) + _ = loader.LoadGraphQLResponseData(ctx, response, resolvable) + + buf.Reset() + _ = resolvable.Resolve(ctx.ctx, response.Data, response.Fetches, &buf) + + loader.Free() + ar.Reset() + } + }) + + b.Run("AnalyticsOn", func(b *testing.B) { + var buf bytes.Buffer + b.ReportAllocs() + b.ResetTimer() + for b.Loop() { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + loader := &Loader{caches: caches, jsonArena: ar} + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + + _ = resolvable.Init(ctx, nil, ast.OperationTypeQuery) + _ = loader.LoadGraphQLResponseData(ctx, response, resolvable) + + buf.Reset() + _ = resolvable.Resolve(ctx.ctx, response.Data, response.Fetches, &buf) + + loader.Free() + ar.Reset() + } + }) +} + +func itoa(n int) string { + return strconv.Itoa(n) +} diff --git a/v2/pkg/engine/resolve/circuit_breaker.go b/v2/pkg/engine/resolve/circuit_breaker.go new file mode 100644 index 0000000000..fc45aaa3e9 --- /dev/null +++ b/v2/pkg/engine/resolve/circuit_breaker.go @@ -0,0 +1,250 @@ +package resolve + +import ( + "context" + "errors" + "maps" + "sync/atomic" + "time" +) + +// ErrCircuitBreakerOpen is returned by the circuit breaker cache wrappers +// (Get / Set / Delete) when the breaker is open. It lets callers distinguish +// a breaker short-circuit from either a true backend error or a genuine cache +// miss. Callers that do not care can continue to treat any non-nil error as a +// soft failure; callers that want to suppress analytics noise from a breaker +// skip should check it with errors.Is. +var ErrCircuitBreakerOpen = errors.New("circuit breaker open") + +// Default circuit breaker parameters applied by wrapCachesWithCircuitBreakers +// when CircuitBreakerConfig values are zero or unset. +const ( + // DefaultFailureThreshold is the number of consecutive failures that trips + // the breaker when CircuitBreakerConfig.FailureThreshold is not set. + DefaultFailureThreshold = 5 + // DefaultCooldownPeriod is how long the breaker stays open before allowing + // a probe request when CircuitBreakerConfig.CooldownPeriod is not set. + DefaultCooldownPeriod = 10 * time.Second +) + +// CircuitBreakerConfig configures the L2 cache circuit breaker for a named cache instance. +// When the circuit is open, all L2 operations (Get/Set/Delete) are skipped and the engine +// falls back to subgraph fetches. This prevents cascading latency when the cache backend +// (e.g., Redis) is slow or unavailable. +type CircuitBreakerConfig struct { + // Enabled activates the circuit breaker for this cache instance. + Enabled bool + + // FailureThreshold is the number of consecutive failures that trips the breaker. + // Default: 5 + FailureThreshold int + + // CooldownPeriod is how long the breaker stays open before allowing a probe request. + // Default: 10s + CooldownPeriod time.Duration +} + +// cbSnapshot is the immutable state of a circuit breaker, swapped atomically. +// A single atomic.Pointer load on the fast path (closed state) avoids multiple +// atomic loads and ensures readers always see a consistent state. +type cbSnapshot struct { + consecutiveFailures int64 + openedAt int64 // unix nano timestamp, 0 = closed + probeInFlight bool +} + +// closed is the shared zero-value snapshot for the closed state. +// Since snapshots are immutable, all closed breakers can share this pointer. +var closedSnapshot = &cbSnapshot{} + +// circuitBreakerState tracks the state of one circuit breaker instance. +// State is stored as an immutable snapshot behind an atomic pointer, so all +// reads see a consistent view and the fast path (breaker closed) is a single +// atomic load + nil-like check. +// +// States: +// - Closed: openedAt == 0. All operations pass through. +// - Open: openedAt != 0 && now < openedAt + cooldown. All operations are skipped. +// - Half-Open: openedAt != 0 && now >= openedAt + cooldown. One probe request allowed. +type circuitBreakerState struct { + snap atomic.Pointer[cbSnapshot] + config CircuitBreakerConfig +} + +func newCircuitBreakerState(config CircuitBreakerConfig) *circuitBreakerState { + s := &circuitBreakerState{config: config} + s.snap.Store(closedSnapshot) + return s +} + +// shouldAllow returns true if the operation should proceed. +// Fast path: single atomic load, check openedAt == 0. +// In half-open state, uses CAS on the snapshot pointer to allow exactly one probe. +func (cb *circuitBreakerState) shouldAllow() bool { + snap := cb.snap.Load() + if snap.openedAt == 0 { + return true // closed — single atomic load on hot path + } + + elapsed := time.Since(time.Unix(0, snap.openedAt)) + if elapsed < cb.config.CooldownPeriod { + return false // open, cooldown not elapsed + } + + // Half-open: allow exactly one probe via CAS on the snapshot pointer. + // Only the goroutine that wins the CAS gets to probe. + if snap.probeInFlight { + return false // another probe already in flight + } + probing := &cbSnapshot{ + consecutiveFailures: snap.consecutiveFailures, + openedAt: snap.openedAt, + probeInFlight: true, + } + return cb.snap.CompareAndSwap(snap, probing) +} + +// recordSuccess resets the breaker to closed state with a single atomic store. +func (cb *circuitBreakerState) recordSuccess() { + snap := cb.snap.Load() + if snap.openedAt == 0 && snap.consecutiveFailures == 0 { + return // already closed — single atomic load on fast path + } + cb.snap.Store(closedSnapshot) +} + +// recordFailure increments the failure counter and trips the breaker if threshold is reached. +func (cb *circuitBreakerState) recordFailure() { + for { + snap := cb.snap.Load() + if snap.probeInFlight { + // Half-open probe failed — reopen immediately with fresh timestamp. + reopened := &cbSnapshot{ + consecutiveFailures: snap.consecutiveFailures, + openedAt: time.Now().UnixNano(), + } + if cb.snap.CompareAndSwap(snap, reopened) { + return + } + continue // snapshot changed, retry + } + newFailures := snap.consecutiveFailures + 1 + next := &cbSnapshot{ + consecutiveFailures: newFailures, + openedAt: snap.openedAt, + } + if newFailures >= int64(cb.config.FailureThreshold) { + next.openedAt = time.Now().UnixNano() + } + if cb.snap.CompareAndSwap(snap, next) { + return + } + // snapshot changed concurrently, retry + } +} + +// isOpen returns true if the breaker is currently open (not allowing operations). +func (cb *circuitBreakerState) isOpen() bool { + snap := cb.snap.Load() + if snap.openedAt == 0 { + return false + } + elapsed := time.Since(time.Unix(0, snap.openedAt)) + return elapsed < cb.config.CooldownPeriod +} + +// forceOpen sets the breaker to open state with the given timestamp. +// Used only in tests to set up initial conditions. +func (cb *circuitBreakerState) forceOpen(openedAt int64, failures int64) { + cb.snap.Store(&cbSnapshot{ + consecutiveFailures: failures, + openedAt: openedAt, + }) +} + +// failures returns the current consecutive failure count. Used in tests. +func (cb *circuitBreakerState) failures() int64 { + return cb.snap.Load().consecutiveFailures +} + +// circuitBreakerCache wraps a LoaderCache with circuit breaker protection. +// When the breaker is open: +// - Get returns (nil, ErrCircuitBreakerOpen) — callers treat via errors.Is as a clean skip +// - Set returns ErrCircuitBreakerOpen — same, analytics should not record as a backend error +// - Delete returns ErrCircuitBreakerOpen — same +// +// Returning the sentinel (instead of nil) preserves the "fall back to subgraph" +// behavior for callers that only check for a non-nil value/error, while letting +// callers that care distinguish a breaker-skip from a real backend failure. +// The sentinel is a package-level singleton so the open path stays allocation-free. +type circuitBreakerCache struct { + inner LoaderCache + state *circuitBreakerState +} + +func (c *circuitBreakerCache) Get(ctx context.Context, keys []string) ([]*CacheEntry, error) { + if !c.state.shouldAllow() { + return nil, ErrCircuitBreakerOpen + } + entries, err := c.inner.Get(ctx, keys) + if err != nil { + c.state.recordFailure() + return nil, err + } + c.state.recordSuccess() + return entries, nil +} + +func (c *circuitBreakerCache) Set(ctx context.Context, entries []*CacheEntry) error { + if !c.state.shouldAllow() { + return ErrCircuitBreakerOpen + } + err := c.inner.Set(ctx, entries) + if err != nil { + c.state.recordFailure() + return err + } + c.state.recordSuccess() + return nil +} + +func (c *circuitBreakerCache) Delete(ctx context.Context, keys []string) error { + if !c.state.shouldAllow() { + return ErrCircuitBreakerOpen + } + err := c.inner.Delete(ctx, keys) + if err != nil { + c.state.recordFailure() + return err + } + c.state.recordSuccess() + return nil +} + +// wrapCachesWithCircuitBreakers returns a shallow copy of caches with circuit breaker +// wrappers applied where configured. The original map is not mutated. +// Called once during Resolver.New(). +func wrapCachesWithCircuitBreakers(caches map[string]LoaderCache, configs map[string]CircuitBreakerConfig) map[string]LoaderCache { + if caches == nil || configs == nil { + return caches + } + wrapped := make(map[string]LoaderCache, len(caches)) + maps.Copy(wrapped, caches) + for name, cbConfig := range configs { + cache, ok := wrapped[name] + if !ok || !cbConfig.Enabled { + continue + } + if cbConfig.FailureThreshold <= 0 { + cbConfig.FailureThreshold = DefaultFailureThreshold + } + if cbConfig.CooldownPeriod <= 0 { + cbConfig.CooldownPeriod = DefaultCooldownPeriod + } + wrapped[name] = &circuitBreakerCache{ + inner: cache, + state: newCircuitBreakerState(cbConfig), + } + } + return wrapped +} diff --git a/v2/pkg/engine/resolve/circuit_breaker_test.go b/v2/pkg/engine/resolve/circuit_breaker_test.go new file mode 100644 index 0000000000..acedc8c1d9 --- /dev/null +++ b/v2/pkg/engine/resolve/circuit_breaker_test.go @@ -0,0 +1,435 @@ +package resolve + +import ( + "context" + "errors" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// failingCache is a test LoaderCache that fails on demand. +// Uses atomic counters for goroutine safety in concurrent tests. +type failingCache struct { + getErr error + setErr error + deleteErr error + getCalls atomic.Int64 + setCalls atomic.Int64 + delCalls atomic.Int64 +} + +func (c *failingCache) Get(_ context.Context, _ []string) ([]*CacheEntry, error) { + c.getCalls.Add(1) + if c.getErr != nil { + return nil, c.getErr + } + return []*CacheEntry{{Key: "k", Value: []byte("v")}}, nil +} + +func (c *failingCache) Set(_ context.Context, _ []*CacheEntry) error { + c.setCalls.Add(1) + return c.setErr +} + +func (c *failingCache) Delete(_ context.Context, _ []string) error { + c.delCalls.Add(1) + return c.deleteErr +} + +// TestCircuitBreaker_OpenCloseTransitions verifies circuit breaker state machine transitions +// (closed/open/half-open) for L2 cache wrappers. Without this, cache outages could cascade +// into subgraph overload or silent data loss. +func TestCircuitBreaker_OpenCloseTransitions(t *testing.T) { + cacheErr := errors.New("redis: connection refused") + + t.Run("closed - passes through on success", func(t *testing.T) { + inner := &failingCache{} + cb := &circuitBreakerCache{ + inner: inner, + state: newCircuitBreakerState(CircuitBreakerConfig{ + Enabled: true, + FailureThreshold: 3, + CooldownPeriod: time.Second, + }), + } + + ctx := t.Context() + entries, err := cb.Get(ctx, []string{"k1"}) + require.NoError(t, err) + assert.Len(t, entries, 1) + assert.Equal(t, int64(1), inner.getCalls.Load()) + + err = cb.Set(ctx, []*CacheEntry{{Key: "k1", TTL: time.Minute}}) + require.NoError(t, err) + assert.Equal(t, int64(1), inner.setCalls.Load()) + + err = cb.Delete(ctx, []string{"k1"}) + require.NoError(t, err) + assert.Equal(t, int64(1), inner.delCalls.Load()) + }) + + t.Run("stays closed below threshold", func(t *testing.T) { + inner := &failingCache{getErr: cacheErr} + cb := &circuitBreakerCache{ + inner: inner, + state: newCircuitBreakerState(CircuitBreakerConfig{ + Enabled: true, + FailureThreshold: 3, + CooldownPeriod: time.Second, + }), + } + + ctx := t.Context() + // Two failures — below threshold of 3 + _, _ = cb.Get(ctx, []string{"k1"}) + _, _ = cb.Get(ctx, []string{"k1"}) + + // Two failures below threshold of 3 — still closed + assert.Equal(t, int64(2), inner.getCalls.Load()) + assert.False(t, cb.state.isOpen()) + + // Third call passes through (threshold reached ON this call) + _, _ = cb.Get(ctx, []string{"k1"}) + assert.Equal(t, int64(3), inner.getCalls.Load()) + assert.True(t, cb.state.isOpen()) + }) + + t.Run("opens after consecutive failures reach threshold", func(t *testing.T) { + inner := &failingCache{getErr: cacheErr} + cb := &circuitBreakerCache{ + inner: inner, + state: newCircuitBreakerState(CircuitBreakerConfig{ + Enabled: true, + FailureThreshold: 2, + CooldownPeriod: time.Second, + }), + } + + ctx := t.Context() + _, _ = cb.Get(ctx, []string{"k1"}) + _, _ = cb.Get(ctx, []string{"k1"}) + assert.True(t, cb.state.isOpen()) + + // While open: Get returns nil + ErrCircuitBreakerOpen, inner is not called + entries, err := cb.Get(ctx, []string{"k1"}) + assert.Equal(t, ErrCircuitBreakerOpen, err) + assert.True(t, errors.Is(err, ErrCircuitBreakerOpen)) + assert.Nil(t, entries) + assert.Equal(t, int64(2), inner.getCalls.Load()) + }) + + t.Run("open breaker skips Set and Delete", func(t *testing.T) { + inner := &failingCache{setErr: cacheErr, deleteErr: cacheErr} + state := newCircuitBreakerState(CircuitBreakerConfig{ + Enabled: true, + FailureThreshold: 1, + CooldownPeriod: time.Second, + }) + // Force open + state.forceOpen(time.Now().UnixNano(), 0) + + cb := &circuitBreakerCache{inner: inner, state: state} + + ctx := t.Context() + // Open breaker: Set and Delete return ErrCircuitBreakerOpen and skip the inner cache + err := cb.Set(ctx, []*CacheEntry{{Key: "k1", TTL: time.Minute}}) + assert.Equal(t, ErrCircuitBreakerOpen, err) + assert.True(t, errors.Is(err, ErrCircuitBreakerOpen)) + assert.Equal(t, int64(0), inner.setCalls.Load()) + + err = cb.Delete(ctx, []string{"k1"}) + assert.Equal(t, ErrCircuitBreakerOpen, err) + assert.True(t, errors.Is(err, ErrCircuitBreakerOpen)) + assert.Equal(t, int64(0), inner.delCalls.Load()) + }) + + t.Run("half-open probe success closes breaker", func(t *testing.T) { + inner := &failingCache{} // no errors — probe succeeds + state := newCircuitBreakerState(CircuitBreakerConfig{ + Enabled: true, + FailureThreshold: 2, + CooldownPeriod: 10 * time.Millisecond, + }) + // Open the breaker in the past so cooldown has elapsed + state.forceOpen(time.Now().Add(-50*time.Millisecond).UnixNano(), 2) + + cb := &circuitBreakerCache{inner: inner, state: state} + + ctx := t.Context() + entries, err := cb.Get(ctx, []string{"k1"}) + require.NoError(t, err) + // Successful probe: breaker closes, failures reset + assert.Len(t, entries, 1) + assert.Equal(t, int64(1), inner.getCalls.Load()) + assert.False(t, cb.state.isOpen()) + assert.Equal(t, int64(0), cb.state.failures()) + }) + + t.Run("half-open probe failure re-opens breaker", func(t *testing.T) { + inner := &failingCache{getErr: cacheErr} + state := newCircuitBreakerState(CircuitBreakerConfig{ + Enabled: true, + FailureThreshold: 1, + CooldownPeriod: 10 * time.Millisecond, + }) + // Open the breaker in the past so cooldown has elapsed + state.forceOpen(time.Now().Add(-50*time.Millisecond).UnixNano(), 0) + + cb := &circuitBreakerCache{inner: inner, state: state} + + ctx := t.Context() + // Failed probe: breaker re-opens + _, err := cb.Get(ctx, []string{"k1"}) + assert.Error(t, err) + assert.Equal(t, int64(1), inner.getCalls.Load()) + assert.True(t, cb.state.isOpen()) + }) + + t.Run("success resets consecutive failure count", func(t *testing.T) { + inner := &failingCache{} + state := newCircuitBreakerState(CircuitBreakerConfig{ + Enabled: true, + FailureThreshold: 3, + CooldownPeriod: time.Second, + }) + + cb := &circuitBreakerCache{inner: inner, state: state} + + ctx := t.Context() + + // Two failures + inner.getErr = cacheErr + _, _ = cb.Get(ctx, []string{"k1"}) + _, _ = cb.Get(ctx, []string{"k1"}) + assert.Equal(t, int64(2), state.failures()) + + // One success resets count + inner.getErr = nil + // One success resets the failure counter + _, err := cb.Get(ctx, []string{"k1"}) + require.NoError(t, err) + assert.Equal(t, int64(0), state.failures()) + assert.False(t, state.isOpen()) + }) + + t.Run("concurrent failures trip breaker exactly once", func(t *testing.T) { + // 100 goroutines all failing concurrently with threshold=5. + // The breaker must end up open, and the failure count must be + // between threshold and goroutine count (CAS retries may cause + // some increments to be lost, but the threshold crossing is never missed). + inner := &failingCache{getErr: cacheErr} + state := newCircuitBreakerState(CircuitBreakerConfig{ + Enabled: true, + FailureThreshold: 5, + CooldownPeriod: time.Second, + }) + cb := &circuitBreakerCache{inner: inner, state: state} + + ctx := t.Context() + var wg sync.WaitGroup + for range 100 { + wg.Go(func() { + _, _ = cb.Get(ctx, []string{"k1"}) + }) + } + wg.Wait() + + assert.True(t, state.isOpen()) + if inner.getCalls.Load() < int64(5) { + t.Fatalf("expected at least 5 inner calls before breaker opened, got %d", inner.getCalls.Load()) + } + }) + + t.Run("concurrent half-open allows exactly one probe", func(t *testing.T) { + // Open the breaker with expired cooldown, then race 50 goroutines + // calling shouldAllow. Exactly one should win the CAS probe. + // We do NOT call recordSuccess so the breaker stays in half-open + // with probeInFlight=true — this isolates the CAS behavior. + var probeCount atomic.Int64 + state := newCircuitBreakerState(CircuitBreakerConfig{ + Enabled: true, + FailureThreshold: 1, + CooldownPeriod: 10 * time.Millisecond, + }) + // Open in the past so cooldown has elapsed → half-open + state.forceOpen(time.Now().Add(-50*time.Millisecond).UnixNano(), 1) + + var wg sync.WaitGroup + for range 50 { + wg.Go(func() { + if state.shouldAllow() { + probeCount.Add(1) + // Intentionally do NOT call recordSuccess — we're testing + // that exactly one goroutine wins the CAS, not the reset path. + } + }) + } + wg.Wait() + + // Exactly one goroutine should have won the CAS probe + assert.Equal(t, int64(1), probeCount.Load()) + }) + + t.Run("concurrent mixed success and failure", func(t *testing.T) { + // 50 goroutines succeed, 50 fail concurrently. Threshold is 100. + // The breaker must remain closed because the success calls reset + // the failure counter before it can reach 100. + state := newCircuitBreakerState(CircuitBreakerConfig{ + Enabled: true, + FailureThreshold: 100, + CooldownPeriod: time.Second, + }) + + var wg sync.WaitGroup + for range 50 { + wg.Go(func() { + state.recordSuccess() + }) + } + for range 50 { + wg.Go(func() { + state.recordFailure() + }) + } + wg.Wait() + + // With interleaved success resets, the breaker should not have tripped + assert.False(t, state.isOpen()) + }) + + t.Run("concurrent probe failure re-opens correctly", func(t *testing.T) { + // Open the breaker with expired cooldown → half-open. + // One goroutine wins the probe, but the probe fails. + // Verify the breaker re-opens and subsequent calls are blocked. + inner := &failingCache{getErr: cacheErr} + state := newCircuitBreakerState(CircuitBreakerConfig{ + Enabled: true, + FailureThreshold: 1, + CooldownPeriod: 10 * time.Millisecond, // short cooldown so initial state is half-open + }) + // Open 50ms ago with 10ms cooldown → cooldown elapsed → half-open + state.forceOpen(time.Now().Add(-50*time.Millisecond).UnixNano(), 1) + + cb := &circuitBreakerCache{inner: inner, state: state} + + ctx := t.Context() + var wg sync.WaitGroup + var probeResults sync.Map + + for i := range 20 { + wg.Go(func() { + _, err := cb.Get(ctx, []string{"k1"}) + switch { + case err == nil: + // Probe succeeded — should not happen here because inner always fails. + probeResults.Store(i, "probed-succeeded") + case errors.Is(err, ErrCircuitBreakerOpen): + // Breaker blocked the call before reaching inner. + probeResults.Store(i, "blocked") + default: + // Inner cache returned an error (the one goroutine that won the probe). + probeResults.Store(i, "probed-failed") + } + }) + } + wg.Wait() + + // Count how many actually probed (got an error back from inner) + var probedCount int + probeResults.Range(func(_, v any) bool { + if v == "probed-failed" { + probedCount++ + } + return true + }) + + assert.Equal(t, 1, probedCount) + // After probe failure, recordFailure re-opens with a fresh timestamp. + // The new openedAt is ~now, so with 10ms cooldown it's still in the open window. + assert.True(t, state.isOpen()) + }) + + t.Run("wrapCachesWithCircuitBreakers applies defaults", func(t *testing.T) { + inner := &failingCache{} + caches := map[string]LoaderCache{"default": inner} + configs := map[string]CircuitBreakerConfig{ + "default": {Enabled: true}, // no threshold or cooldown set + } + + result := wrapCachesWithCircuitBreakers(caches, configs) + + wrapped, ok := result["default"].(*circuitBreakerCache) + // Verify defaults applied and original map not mutated + require.True(t, ok) + assert.Equal(t, 5, wrapped.state.config.FailureThreshold) + assert.Equal(t, 10*time.Second, wrapped.state.config.CooldownPeriod) + _, originalWrapped := caches["default"].(*circuitBreakerCache) + assert.False(t, originalWrapped) + }) + + t.Run("wrapCachesWithCircuitBreakers skips disabled", func(t *testing.T) { + inner := &failingCache{} + caches := map[string]LoaderCache{"default": inner} + configs := map[string]CircuitBreakerConfig{ + "default": {Enabled: false}, + } + + result := wrapCachesWithCircuitBreakers(caches, configs) + + _, ok := result["default"].(*circuitBreakerCache) + assert.False(t, ok) + }) + + t.Run("wrapCachesWithCircuitBreakers ignores missing cache names", func(t *testing.T) { + caches := map[string]LoaderCache{"default": &failingCache{}} + configs := map[string]CircuitBreakerConfig{ + "nonexistent": {Enabled: true}, + } + + result := wrapCachesWithCircuitBreakers(caches, configs) + + _, ok := result["default"].(*circuitBreakerCache) + assert.False(t, ok) + }) +} + +// TestCircuitBreaker_OpenReturnsSentinel verifies that open-breaker Get/Set/Delete +// return ErrCircuitBreakerOpen so callers can distinguish a breaker-skip from a +// real backend error via errors.Is. This is the signal used by loader_cache.go +// call sites to suppress analytics/trace error recording when the breaker trips. +func TestCircuitBreaker_OpenReturnsSentinel(t *testing.T) { + inner := &failingCache{} + state := newCircuitBreakerState(CircuitBreakerConfig{ + Enabled: true, + FailureThreshold: 1, + CooldownPeriod: time.Second, + }) + // Force open so every call short-circuits. + state.forceOpen(time.Now().UnixNano(), 1) + cb := &circuitBreakerCache{inner: inner, state: state} + + ctx := t.Context() + + entries, getErr := cb.Get(ctx, []string{"k1", "k2"}) + assert.Nil(t, entries) + assert.Equal(t, ErrCircuitBreakerOpen, getErr) + assert.True(t, errors.Is(getErr, ErrCircuitBreakerOpen)) + + setErr := cb.Set(ctx, []*CacheEntry{{Key: "k1", TTL: time.Minute}}) + assert.Equal(t, ErrCircuitBreakerOpen, setErr) + assert.True(t, errors.Is(setErr, ErrCircuitBreakerOpen)) + + delErr := cb.Delete(ctx, []string{"k1"}) + assert.Equal(t, ErrCircuitBreakerOpen, delErr) + assert.True(t, errors.Is(delErr, ErrCircuitBreakerOpen)) + + // Inner cache was never called. + assert.Equal(t, int64(0), inner.getCalls.Load()) + assert.Equal(t, int64(0), inner.setCalls.Load()) + assert.Equal(t, int64(0), inner.delCalls.Load()) +} diff --git a/v2/pkg/engine/resolve/const.go b/v2/pkg/engine/resolve/const.go index 8702e93a06..a08b5d38b4 100644 --- a/v2/pkg/engine/resolve/const.go +++ b/v2/pkg/engine/resolve/const.go @@ -32,12 +32,10 @@ var ( literalRateLimit = []byte("rateLimit") literalAuthorization = []byte("authorization") - emptyArray = []byte("[]") emptyObject = []byte("{}") ) var ( - errNonNullableFieldValueIsNull = errors.New("non Nullable field value is null") - errHeaderPathInvalid = errors.New("invalid header path: header variables must be of this format: .request.header.{{ key }} ") - ErrUnableToResolve = errors.New("unable to resolve operation") + errHeaderPathInvalid = errors.New("invalid header path: header variables must be of this format: .request.header.{{ key }} ") + ErrUnableToResolve = errors.New("unable to resolve operation") ) diff --git a/v2/pkg/engine/resolve/context.go b/v2/pkg/engine/resolve/context.go index 0b3e626f1b..518b0e1557 100644 --- a/v2/pkg/engine/resolve/context.go +++ b/v2/pkg/engine/resolve/context.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "io" + "maps" "net/http" "sort" "time" @@ -45,6 +46,14 @@ type Context struct { SubgraphHeadersBuilder SubgraphHeadersBuilder + // Debug enables enrichment of context with debug metadata (e.g., cache fetch info). + // Zero overhead when disabled (production default). Tests opt in via engine.WithDebugMode(). + Debug bool + + // cacheAnalytics collects detailed cache analytics when EnableCacheAnalytics is true. + // Nil when analytics is disabled. Use cacheAnalyticsEnabled() as a fast guard. + cacheAnalytics *CacheAnalyticsCollector + // ActualListSizes is populated by the resolver after resolution completes, // before the response body is written. Maps JSON path to actual list size. // Used to compute the actual cost. @@ -126,6 +135,77 @@ type ExecutionOptions struct { // However, if you're benchmarking internals of the engine, it can be helpful to switch it off // When disabled (set to true) the code becomes a no-op DisableInboundRequestDeduplication bool + // Caching configures L1 (per-request) and L2 (external) entity caching. + Caching CachingOptions + // ErrorBehavior controls error handling during resolution. + // Only effective when OnErrorEnabled is true in ResolverOptions. + // Default is ErrorBehaviorPropagate for backward compatibility. + ErrorBehavior ErrorBehavior +} + +// CachingOptions configures the L1/L2 entity caching behavior. +// +// L1 Cache (Per-Request, In-Memory): +// - Stored in Loader as sync.Map +// - Lifecycle: Single GraphQL request +// - Key format: Entity cache key WITHOUT subgraph header prefix +// - Thread-safe via sync.Map for parallel fetch support +// - Purpose: Prevents redundant fetches for same entity at different paths +// - IMPORTANT: Only used for entity fetches, NOT root fetches. +// Root fields have no prior entity data to look up. +// +// L2 Cache (External, Cross-Request): +// - Uses LoaderCache interface implementations (e.g., Redis) +// - Lifecycle: Configured TTL, shared across requests +// - Key format: Entity cache key WITH optional subgraph header prefix +// - Purpose: Reduces subgraph load by caching across requests +// - Applies to both root fetches and entity fetches +// +// Lookup Order (entity fetches): L1 -> L2 -> Subgraph Fetch +// Lookup Order (root fetches): L2 -> Subgraph Fetch (no L1) +// L2CacheKeyInterceptorInfo provides metadata about the cache key being transformed. +type L2CacheKeyInterceptorInfo struct { + SubgraphName string + CacheName string +} + +// L2CacheKeyInterceptor transforms L2 cache key strings before they are used +// for cache lookups and writes. Called once per cache key during key preparation. +// The ctx parameter is the request's context.Context, allowing access to +// request-scoped values (e.g., tenant ID from middleware). +type L2CacheKeyInterceptor func(ctx context.Context, key string, info L2CacheKeyInterceptorInfo) string + +type CachingOptions struct { + // EnableL1Cache enables per-request in-memory entity caching. + // L1 prevents redundant fetches for the same entity within a single request. + // Only applies to entity fetches (not root queries) since root queries + // have no prior entity data to use as a cache key. + // Default: false (must be explicitly enabled) + EnableL1Cache bool + // EnableL2Cache enables external cache lookups (e.g., Redis). + // L2 allows sharing entity data across requests. + // Default: false (must be explicitly enabled) + // Note: When false, existing FetchCacheConfiguration.Enabled still controls + // per-fetch L2 behavior for backward compatibility. + EnableL2Cache bool + // EnableCacheAnalytics enables detailed cache analytics collection. + // When true, per-key cache events, write events, field value hashes, + // entity counts, and partial hit tracking are recorded. + // When false (default), GetCacheStats() returns an empty snapshot. + // The analytics collector is nil-guarded so the disabled path has zero overhead. + EnableCacheAnalytics bool + // L2CacheKeyInterceptor, when set, transforms L2 cache key strings before + // they are used for lookups, writes, and deletions. This allows library users + // to add custom prefixes/suffixes (e.g., tenant isolation) without modifying + // graphql-go-tools internals. Does not affect L1 cache keys. + // Default: nil (no transformation) + L2CacheKeyInterceptor L2CacheKeyInterceptor + // GlobalCacheKeyPrefix is prepended to all L2 cache keys (before header hash prefix). + // Use this for schema versioning: set to a schema hash so that schema changes + // automatically separate cache entries without requiring a cache flush. + // Format: "{prefix}:{rest_of_key}". Empty string means no prefix. + // Applied in order: global prefix → header hash prefix → interceptor. + GlobalCacheKeyPrefix string } type FieldValue struct { @@ -251,6 +331,36 @@ func (c *Context) appendSubgraphErrors(ds DataSourceInfo, errs ...error) { c.subgraphErrors[ds.Name] = errors.Join(c.subgraphErrors[ds.Name], errors.Join(errs...)) } +// GetCacheStats returns a snapshot of the cache statistics for the current request +// and releases the collector back to the pool. After this call, cacheAnalyticsEnabled() +// returns false and further Record* calls are no-ops. Callers must take the snapshot +// exactly once per request; all downstream analytics consumers operate on the returned +// CacheAnalyticsSnapshot (a plain value that holds its own copies). +func (c *Context) GetCacheStats() CacheAnalyticsSnapshot { + if c.cacheAnalytics != nil { + snap := c.cacheAnalytics.Snapshot() + ReleaseCacheAnalyticsCollector(c.cacheAnalytics) + c.cacheAnalytics = nil + return snap + } + return CacheAnalyticsSnapshot{} +} + +// cacheAnalyticsEnabled returns true if the cache analytics collector is active. +// Used as a fast nil-pointer guard throughout the instrumentation code. +func (c *Context) cacheAnalyticsEnabled() bool { + return c.cacheAnalytics != nil +} + +// initCacheAnalytics obtains a pooled analytics collector if EnableCacheAnalytics is set. +// The collector is returned to the pool by Context.Free(). +// Called once at the start of LoadGraphQLResponseData. +func (c *Context) initCacheAnalytics() { + if c.ExecutionOptions.Caching.EnableCacheAnalytics { + c.cacheAnalytics = AcquireCacheAnalyticsCollector() + } +} + type Request struct { ID uint64 Header http.Header @@ -281,26 +391,22 @@ func (c *Context) WithContext(ctx context.Context) *Context { func (c *Context) clone(ctx context.Context) *Context { cpy := *c cpy.ctx = ctx - if c.Variables != nil { - variablesData := c.Variables.MarshalTo(nil) - cpy.Variables = astjson.MustParseBytes(variablesData) - } + // DeepCopy with a nil arena returns a heap-allocated deep copy, isolating + // the clone from the source arena's *astjson.Value. Returns nil when input + // is nil, so no separate guard is needed. + cpy.Variables = astjson.DeepCopy(nil, c.Variables) cpy.Files = append([]*httpclient.FileUpload(nil), c.Files...) cpy.Request.Header = c.Request.Header.Clone() cpy.RenameTypeNames = append([]RenameTypeName(nil), c.RenameTypeNames...) if c.RemapVariables != nil { cpy.RemapVariables = make(map[string]string, len(c.RemapVariables)) - for k, v := range c.RemapVariables { - cpy.RemapVariables[k] = v - } + maps.Copy(cpy.RemapVariables, c.RemapVariables) } if c.subgraphErrors != nil { cpy.subgraphErrors = make(map[string]error, len(c.subgraphErrors)) - for k, v := range c.subgraphErrors { - cpy.subgraphErrors[k] = v - } + maps.Copy(cpy.subgraphErrors, c.subgraphErrors) } return &cpy @@ -318,6 +424,10 @@ func (c *Context) Free() { c.subgraphErrors = nil c.authorizer = nil c.LoaderHooks = nil + if c.cacheAnalytics != nil { + ReleaseCacheAnalyticsCollector(c.cacheAnalytics) + c.cacheAnalytics = nil + } c.GetDeduplicationData = nil c.SetDeduplicationData = nil c.ActualListSizes = nil diff --git a/v2/pkg/engine/resolve/entity_cache_hit_bench_test.go b/v2/pkg/engine/resolve/entity_cache_hit_bench_test.go new file mode 100644 index 0000000000..031e8e311f --- /dev/null +++ b/v2/pkg/engine/resolve/entity_cache_hit_bench_test.go @@ -0,0 +1,319 @@ +package resolve + +import ( + "context" + "strconv" + "testing" + "time" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" +) + +func BenchmarkEntityCacheHitPath(b *testing.B) { + providesData := benchArticleProvidesData(2) + + for _, entityCount := range []int{1, 32} { + b.Run("entities="+strconv.Itoa(entityCount), func(b *testing.B) { + for _, tracing := range []bool{false, true} { + tracingLabel := "tracing=off" + if tracing { + tracingLabel = "tracing=on" + } + + b.Run("L1/"+tracingLabel, func(b *testing.B) { + benchTryL1CacheLoadHitPath(b, entityCount, tracing, providesData) + }) + b.Run("L2/"+tracingLabel, func(b *testing.B) { + benchTryL2CacheLoadHitPath(b, entityCount, tracing, providesData) + }) + } + }) + } +} + +func benchTryL1CacheLoadHitPath(b *testing.B, entityCount int, tracing bool, providesData *Object) { + requestArena := arena.NewMonotonicArena(arena.WithMinBufferSize(128 * 1024)) + // Cache-backing arena: holds cached *astjson.Value across benchmark + // iterations. We never Reset it so stored pointers stay valid. + cacheArena := arena.NewMonotonicArena(arena.WithMinBufferSize(128 * 1024)) + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.TracingOptions.Enable = tracing + + loader := &Loader{ + jsonArena: requestArena, + ctx: ctx, + l1Cache: map[string]*astjson.Value{}, + } + + cacheKeys := make([]*CacheKey, 0, entityCount) + for i := range entityCount { + id := "article-" + strconv.Itoa(i) + cacheKey := "Article:" + id + parsed, err := astjson.ParseBytesWithArena(cacheArena, benchArticleJSON(id)) + if err != nil { + b.Fatalf("parse bench article: %v", err) + } + loader.l1Cache[cacheKey] = parsed + cacheKeys = append(cacheKeys, &CacheKey{ + Keys: []string{cacheKey}, + }) + } + + info := &FetchInfo{ + OperationType: ast.OperationTypeQuery, + DataSourceName: "bench-subgraph", + RootFields: []GraphCoordinate{ + {TypeName: "Article", FieldName: "_entities"}, + }, + ProvidesData: providesData, + } + + res := &result{} + + b.ReportAllocs() + b.ResetTimer() + for b.Loop() { + requestArena.Reset() + resetCacheKeyState(cacheKeys) + resetCacheResult(res) + if !loader.tryL1CacheLoad(info, cacheKeys, res) { + b.Fatal("expected complete L1 cache hit") + } + } +} + +func benchTryL2CacheLoadHitPath(b *testing.B, entityCount int, tracing bool, providesData *Object) { + requestArena := arena.NewMonotonicArena(arena.WithMinBufferSize(128 * 1024)) + cache := newBenchCache() + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.TracingOptions.Enable = tracing + + loader := &Loader{ + jsonArena: requestArena, + ctx: ctx, + } + + l1Keys := make([]*CacheKey, 0, entityCount) + l2Keys := make([]*CacheKey, 0, entityCount) + for i := range entityCount { + id := "article-" + strconv.Itoa(i) + cacheKey := "Article:" + id + cache.storage[cacheKey] = benchArticleJSON(id) + l1Keys = append(l1Keys, &CacheKey{ + Keys: []string{cacheKey}, + }) + l2Keys = append(l2Keys, &CacheKey{ + Keys: []string{cacheKey}, + }) + } + + info := &FetchInfo{ + OperationType: ast.OperationTypeQuery, + DataSourceName: "bench-subgraph", + RootFields: []GraphCoordinate{ + {TypeName: "Article", FieldName: "_entities"}, + }, + ProvidesData: providesData, + } + + res := &result{ + cache: cache, + cacheConfig: FetchCacheConfiguration{TTL: time.Minute}, + l1CacheKeys: l1Keys, + l2CacheKeys: l2Keys, + } + + b.ReportAllocs() + b.ResetTimer() + for b.Loop() { + requestArena.Reset() + resetCacheKeyState(l1Keys) + resetCacheKeyState(l2Keys) + resetCacheResult(res) + res.cache = cache + res.cacheConfig = FetchCacheConfiguration{TTL: time.Minute} + res.l1CacheKeys = l1Keys + res.l2CacheKeys = l2Keys + skipFetch, err := loader.tryL2CacheLoad(context.Background(), info, res) + if err != nil { + b.Fatal(err) + } + if !skipFetch { + b.Fatal("expected complete L2 cache hit") + } + } +} + +func resetCacheKeyState(keys []*CacheKey) { + for _, key := range keys { + if key == nil { + continue + } + key.FromCache = nil + key.missingKeys = nil + key.cachedData = cachedData{} + } +} + +func resetCacheResult(res *result) { + res.cachedItemIndices = nil + res.fetchItemIndices = nil + res.cacheSkipFetch = false + res.cacheMustBeUpdated = false + res.cacheTraceDurationSinceStartNano = 0 + res.cacheTraceDurationNano = 0 + res.cacheTraceEntityCount = 0 + res.cacheTraceL2GetAttempted = false + res.cacheTraceL2SetAttempted = false + res.cacheTraceL2SetNegAttempted = false + res.cacheTraceL2GetDuration = 0 + res.cacheTraceL2SetDuration = 0 + res.cacheTraceL2SetNegDuration = 0 + res.cacheTraceL2GetError = "" + res.cacheTraceL2SetError = "" + res.cacheTraceL2SetNegError = "" + res.cacheTraceL1Hits = 0 + res.cacheTraceL1Misses = 0 + res.cacheTraceRequestScopedHits = 0 + res.cacheTraceL2Hits = 0 + res.cacheTraceL2Misses = 0 + res.cacheTraceNegativeHits = 0 + res.cacheTraceShadowHit = false + res.cacheTraceEntityDetails = nil +} + +func benchArticleProvidesData(relatedDepth int) *Object { + viewer := &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Nullable: true}}, + {Name: []byte("name"), Value: &Scalar{Nullable: true}}, + {Name: []byte("email"), Value: &Scalar{Nullable: true}}, + }, + } + + article := &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("__typename"), Value: &Scalar{Nullable: true}}, + {Name: []byte("id"), Value: &Scalar{Nullable: true}}, + {Name: []byte("title"), Value: &Scalar{Nullable: true}}, + {Name: []byte("body"), Value: &Scalar{Nullable: true}}, + {Name: []byte("tags"), Value: &Array{Nullable: true, Item: &Scalar{Nullable: true}}}, + {Name: []byte("viewCount"), Value: &Scalar{Nullable: true}}, + {Name: []byte("rating"), Value: &Scalar{Nullable: true}}, + {Name: []byte("reviewSummary"), Value: &Scalar{Nullable: true}}, + {Name: []byte("personalizedRecommendation"), Value: &Scalar{Nullable: true}}, + {Name: []byte("currentViewer"), Value: viewer}, + }, + } + + if relatedDepth > 0 { + article.Fields = append(article.Fields, &Field{ + Name: []byte("relatedArticles"), + Value: &Array{ + Nullable: true, + Item: benchArticleProvidesData(relatedDepth - 1), + }, + }) + } + + ComputeHasAliases(article) + return article +} + +func benchArticleJSON(id string) []byte { + return []byte(`{ + "__typename":"Article", + "id":"` + id + `", + "title":"Title ` + id + `", + "body":"Body for ` + id + `", + "tags":["graphql","cache","router"], + "viewCount":12345, + "rating":4.7, + "reviewSummary":"Strong engagement and stable recommendation quality.", + "personalizedRecommendation":"Recommended because the current viewer follows router performance topics.", + "currentViewer":{ + "id":"viewer-1", + "name":"Alice", + "email":"alice@example.com" + }, + "relatedArticles":[ + { + "__typename":"Article", + "id":"` + id + `-rel-1", + "title":"Related 1", + "body":"Nested body 1", + "tags":["perf"], + "viewCount":7, + "rating":4.2, + "reviewSummary":"Nested review 1", + "personalizedRecommendation":"Nested recommendation 1", + "currentViewer":{ + "id":"viewer-1", + "name":"Alice", + "email":"alice@example.com" + }, + "relatedArticles":[ + { + "__typename":"Article", + "id":"` + id + `-rel-1a", + "title":"Nested 1A", + "body":"Deep body 1A", + "tags":["deep"], + "viewCount":3, + "rating":4.0, + "reviewSummary":"Deep review 1A", + "personalizedRecommendation":"Deep recommendation 1A", + "currentViewer":{ + "id":"viewer-1", + "name":"Alice", + "email":"alice@example.com" + } + } + ] + }, + { + "__typename":"Article", + "id":"` + id + `-rel-2", + "title":"Related 2", + "body":"Nested body 2", + "tags":["entity"], + "viewCount":9, + "rating":4.4, + "reviewSummary":"Nested review 2", + "personalizedRecommendation":"Nested recommendation 2", + "currentViewer":{ + "id":"viewer-1", + "name":"Alice", + "email":"alice@example.com" + }, + "relatedArticles":[ + { + "__typename":"Article", + "id":"` + id + `-rel-2a", + "title":"Nested 2A", + "body":"Deep body 2A", + "tags":["deep"], + "viewCount":4, + "rating":4.1, + "reviewSummary":"Deep review 2A", + "personalizedRecommendation":"Deep recommendation 2A", + "currentViewer":{ + "id":"viewer-1", + "name":"Alice", + "email":"alice@example.com" + } + } + ] + } + ] + }`) +} diff --git a/v2/pkg/engine/resolve/entity_cache_partial_writeback_regression_test.go b/v2/pkg/engine/resolve/entity_cache_partial_writeback_regression_test.go new file mode 100644 index 0000000000..fe485bf0ad --- /dev/null +++ b/v2/pkg/engine/resolve/entity_cache_partial_writeback_regression_test.go @@ -0,0 +1,420 @@ +package resolve + +import ( + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/fastjsonext" +) + +// TestEntityFetchWritebackPreservesExistingCachedFields verifies that partial entity fetches +// merge new fields into existing cached entries instead of overwriting them. +// Without this, a narrow projection (e.g. only "brand") would wipe previously cached fields (e.g. "title"). +func TestEntityFetchWritebackPreservesExistingCachedFields(t *testing.T) { + cache := NewFakeLoaderCache() + productKey := `{"__typename":"Product","key":{"id":"prod-1"}}` + + // Seed the shared Product entity key with one partial projection. + out1 := runSingleProductEntityFieldRequest(t, cache, []productFieldSpec{ + {name: "title", value: "Alpha Widget"}, + }) + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1","title":"Alpha Widget"}}}`, out1) + assert.Equal(t, `{"__typename":"Product","id":"prod-1","title":"Alpha Widget"}`, string(cache.GetValue(productKey))) + + cache.ClearLog() + + // Re-fetch the same entity through the same cache key, but with a narrower projection. + // The response should still only contain `brand`, while the cache writeback must merge + // that fresh field into the previously cached `title` payload instead of replacing it. + out2 := runSingleProductEntityFieldRequest(t, cache, []productFieldSpec{ + {name: "brand", value: "Acme Corp"}, + }) + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1","brand":"Acme Corp"}}}`, out2) + assert.Equal(t, []CacheLogEntry{ + // L2 hit on the existing entity entry. + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: true}}}, + // Writeback merges the new projection into the cached object under the same key. + {Operation: "set", Items: []CacheLogItem{{Key: productKey, TTL: 30 * time.Second}}}, + }, cache.GetLog()) + assert.Equal(t, `{"__typename":"Product","id":"prod-1","title":"Alpha Widget","brand":"Acme Corp"}`, string(cache.GetValue(productKey))) + + cache.ClearLog() + + // A later request for both fields should now be a pure cache hit. If the previous + // writeback had overwritten `title`, this request would have to fetch again. + out3 := runSingleProductEntityFieldRequest(t, cache, []productFieldSpec{ + {name: "title", value: "Alpha Widget"}, + {name: "brand", value: "Acme Corp"}, + }) + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1","title":"Alpha Widget","brand":"Acme Corp"}}}`, out3) + assert.Equal(t, []CacheLogEntry{ + // No writeback on the final request: the merged cache entry is already complete. + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: true}}}, + }, cache.GetLog()) +} + +// TestRootFieldEntityCacheEntrySurvivesLaterPartialEntityFetch verifies that a root field's +// cache entry (stored via EntityKeyMappings) is not overwritten when a later entity fetch +// writes a narrower projection to the same shared entity key. +func TestRootFieldEntityCacheEntrySurvivesLaterPartialEntityFetch(t *testing.T) { + cache := NewFakeLoaderCache() + productKey := `{"__typename":"Product","key":{"id":"prod-1"}}` + + // First populate the shared Product entity key from a root-field cache write. + out1 := runProductByIDRootRequest(t, cache) + assert.Equal(t, `{"data":{"productById":{"__typename":"Product","id":"prod-1","sku":"ABC","title":"Alpha Widget"}}}`, out1) + assert.Equal(t, `{"__typename":"Product","id":"prod-1","sku":"ABC","title":"Alpha Widget"}`, string(cache.GetValue(productKey))) + + cache.ClearLog() + + // Then resolve the same entity through a different root field that only asks the entity + // subgraph for `brand`. This reproduces the cross-path regression: the narrower entity + // fetch must extend the existing shared entry instead of wiping out `sku` and `title`. + out2 := runProductBySKUWithBrandRequest(t, cache) + assert.Equal(t, `{"data":{"productBySku":{"__typename":"Product","id":"prod-1","brand":"Acme Corp"}}}`, out2) + assert.Equal(t, []CacheLogEntry{ + // Read the shared entity key created by the first root-field request. + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: true}}}, + // Rewrite that same key with the merged view of old root-field data plus new entity data. + {Operation: "set", Items: []CacheLogItem{{Key: productKey, TTL: 30 * time.Second}}}, + }, cache.GetLog()) + assert.Equal(t, `{"__typename":"Product","id":"prod-1","sku":"ABC","title":"Alpha Widget","brand":"Acme Corp"}`, string(cache.GetValue(productKey))) +} + +type productFieldSpec struct { + name string + value string +} + +func runSingleProductEntityFieldRequest(t *testing.T, cache LoaderCache, fields []productFieldSpec) string { + t.Helper() + + // The root fetch only contributes the entity identity. The second fetch requests the + // actual field projection and is the one that exercises partial entity-cache writeback. + rootDS := &staticDataSource{data: []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`)} + entityDS := &staticDataSource{data: productEntityResponse(fields)} + response := buildSingleProductFieldResponse(rootDS, entityDS, fields) + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(t.Context()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + return fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) +} + +func buildSingleProductFieldResponse(rootDS, entityDS DataSource, fields []productFieldSpec) *GraphQLResponse { + fieldInfos := make([]GraphCoordinate, 0, len(fields)) + responseFields := make([]*Field, 0, len(fields)+1) + providesFields := make([]*Field, 0, len(fields)) + + responseFields = append(responseFields, &Field{ + Name: []byte("id"), + Value: &String{Path: []string{"id"}}, + }) + + for _, field := range fields { + fieldInfos = append(fieldInfos, GraphCoordinate{TypeName: "Product", FieldName: field.name}) + providesFields = append(providesFields, &Field{ + Name: []byte(field.name), + Value: &Scalar{Path: []string{field.name}, Nullable: false}, + }) + responseFields = append(responseFields, &Field{ + Name: []byte(field.name), + Value: &String{Path: []string{field.name}}, + }) + } + + return &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + RootFields: []GraphCoordinate{{TypeName: "Query", FieldName: "product"}}, + OperationType: ast.OperationTypeQuery, + }, + }, "query"), + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities", "0"}}, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newProductCacheKeyTemplate(), + UseL1Cache: true, + }, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","body":{"query":"..."}}`), SegmentType: StaticSegmentType}, + }}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + Info: &FetchInfo{ + DataSourceID: "details", + DataSourceName: "details", + RootFields: fieldInfos, + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{Fields: providesFields}, + }, + }, "query.product", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Fields: responseFields, + }, + }, + }, + }, + } +} + +func productEntityResponse(fields []productFieldSpec) []byte { + var payload strings.Builder + payload.WriteString(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1"`) + for _, field := range fields { + payload.WriteString(`,"`) + payload.WriteString(field.name) + payload.WriteString(`":"`) + payload.WriteString(field.value) + payload.WriteString(`"`) + } + payload.WriteString(`}]}}`) + return []byte(payload.String()) +} + +func runProductByIDRootRequest(t *testing.T, cache LoaderCache) string { + t.Helper() + + // This root query caches a full Product object and maps it onto the shared Product + // entity key, which lets later entity fetches hit and update the same cache entry. + rootDS := &staticDataSource{data: []byte(`{"data":{"productById":{"__typename":"Product","id":"prod-1","sku":"ABC","title":"Alpha Widget"}}}`)} + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: NewRootQueryCacheKeyTemplate( + []QueryField{{ + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "productById"}, + ResponseKey: "productById", + Args: []FieldArgument{{ + Name: "id", + Variable: &ContextVariable{ + Path: []string{"id"}, + Renderer: NewPlainVariableRenderer(), + }, + }}, + }}, + []EntityKeyMappingConfig{{ + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{{ + EntityKeyField: "id", + ArgumentPath: []string{"id"}, + }}, + }}, + ), + }, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + Info: &FetchInfo{ + DataSourceID: "items", + DataSourceName: "items", + RootFields: []GraphCoordinate{{TypeName: "Query", FieldName: "productById"}}, + OperationType: ast.OperationTypeQuery, + }, + }, "query"), + ), + Data: &Object{ + Fields: []*Field{{ + Name: []byte("productById"), + Value: &Object{ + Path: []string{"productById"}, + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("sku"), Value: &String{Path: []string{"sku"}}}, + {Name: []byte("title"), Value: &String{Path: []string{"title"}}}, + }, + }, + }}, + }, + } + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(t.Context()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"id":"prod-1"}`)) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + return fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) +} + +func runProductBySKUWithBrandRequest(t *testing.T, cache LoaderCache) string { + t.Helper() + + // The root fetch finds the entity identity by SKU. The follow-up entity fetch asks only + // for `brand`, which is enough to reproduce the bug if writeback overwrites the cache. + rootDS := &staticDataSource{data: []byte(`{"data":{"productBySku":{"__typename":"Product","id":"prod-1"}}}`)} + entityDS := &staticDataSource{data: []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","brand":"Acme Corp"}]}}`)} + rootFieldEntityTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Path: []string{"productBySku"}, + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + Caching: FetchCacheConfiguration{ + Enabled: true, + UseL1Cache: true, + RootFieldL1EntityCacheKeyTemplates: map[string]CacheKeyTemplate{ + "productBySku:Product": rootFieldEntityTemplate, + }, + }, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + Info: &FetchInfo{ + DataSourceID: "items", + DataSourceName: "items", + RootFields: []GraphCoordinate{{TypeName: "Query", FieldName: "productBySku"}}, + OperationType: ast.OperationTypeQuery, + }, + }, "query"), + SingleWithPath(&EntityFetch{ + Input: EntityInput{ + Header: InputTemplate{Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){... on Product {brand}}}","variables":{"representations":[`), SegmentType: StaticSegmentType}, + }}, + Item: InputTemplate{Segments: []TemplateSegment{ + { + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + }, + }}, + Footer: InputTemplate{Segments: []TemplateSegment{ + {Data: []byte(`]}}}`), SegmentType: StaticSegmentType}, + }}, + SkipErrItem: true, + }, + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities", "0"}}, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newProductCacheKeyTemplate(), + UseL1Cache: true, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + Info: &FetchInfo{ + DataSourceID: "details", + DataSourceName: "details", + RootFields: []GraphCoordinate{{TypeName: "Product", FieldName: "brand"}}, + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + {Name: []byte("brand"), Value: &Scalar{Path: []string{"brand"}, Nullable: false}}, + }, + }, + }, + }, "query.productBySku", ObjectPath("productBySku")), + ), + Data: &Object{ + Fields: []*Field{{ + Name: []byte("productBySku"), + Value: &Object{ + Path: []string{"productBySku"}, + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("brand"), Value: &String{Path: []string{"brand"}}}, + }, + }, + }}, + }, + } + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(t.Context()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"sku":"ABC","region":"US"}`)) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + return fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) +} diff --git a/v2/pkg/engine/resolve/entity_merge_path_test.go b/v2/pkg/engine/resolve/entity_merge_path_test.go new file mode 100644 index 0000000000..37fce5c01d --- /dev/null +++ b/v2/pkg/engine/resolve/entity_merge_path_test.go @@ -0,0 +1,914 @@ +package resolve + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" +) + +// TestEntityMergePath tests the EntityMergePath mechanism, which enables cache +// sharing between root field fetches and entity fetches. +// +// Problem: A root field fetch (e.g. Query.user(id:"1234")) returns response-level +// data like {"user":{"id":"1234","username":"Me"}}. An entity fetch for the same +// entity returns entity-level data like {"id":"1234","username":"Me"} (no wrapper). +// When both use the same cache key (derived entity key), the stored format must be +// consistent so either fetch type can read the other's cache entries. +// +// Solution: EntityMergePath records the JSON path (e.g. ["user"]) at which the +// entity data is nested in the root field response. On store, cacheKeysToEntries +// strips the wrapper. On load, tryL2CacheLoad re-wraps the entity data. +func TestEntityMergePath_AllPathVariants(t *testing.T) { + + // Group 1: prepareCacheKeys — EntityMergePath assignment + + t.Run("prepareCacheKeys", func(t *testing.T) { + t.Run("root field with EntityKeyMappings single field sets EntityMergePath from field name", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.Variables = astjson.MustParseBytes([]byte(`{"id":"1234"}`)) + + loader := &Loader{ + ctx: ctx, + jsonArena: ar, + } + + cfg := FetchCacheConfiguration{ + CacheKeyTemplate: &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []FieldArgument{ + { + Name: "id", + Variable: &ContextVariable{ + Path: []string{"id"}, + Renderer: NewPlainVariableRenderer(), + }, + }, + }, + }, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + }, + } + + item := astjson.MustParseBytes([]byte(`{"user":{"id":"1234","username":"Me"}}`)) + inputItems := []*astjson.Value{item} + res := &result{} + + isEntity, err := loader.prepareCacheKeys(&FetchInfo{}, cfg, inputItems, res) + require.NoError(t, err) + assert.False(t, isEntity) + require.Equal(t, 1, len(res.l1CacheKeys)) + assert.Equal(t, []string{"user"}, res.l1CacheKeys[0].EntityMergePath) + }) + + t.Run("root field with EntityKeyMappings sets EntityMergePath from explicit MergePath", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.Variables = astjson.MustParseBytes([]byte(`{"id":"1234"}`)) + + loader := &Loader{ + ctx: ctx, + jsonArena: ar, + } + + cfg := FetchCacheConfiguration{ + CacheKeyTemplate: &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []FieldArgument{ + { + Name: "id", + Variable: &ContextVariable{ + Path: []string{"id"}, + Renderer: NewPlainVariableRenderer(), + }, + }, + }, + }, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + }, + } + + item := astjson.MustParseBytes([]byte(`{"data":{"user":{"id":"1234"}}}`)) + inputItems := []*astjson.Value{item} + res := &result{ + postProcessing: PostProcessingConfiguration{ + MergePath: []string{"data", "user"}, + }, + } + + isEntity, err := loader.prepareCacheKeys(&FetchInfo{}, cfg, inputItems, res) + require.NoError(t, err) + assert.False(t, isEntity) + require.Equal(t, 1, len(res.l1CacheKeys)) + assert.Equal(t, []string{"data", "user"}, res.l1CacheKeys[0].EntityMergePath) + }) + + t.Run("root field without EntityKeyMappings does not set EntityMergePath", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.Variables = astjson.MustParseBytes([]byte(`{"id":"1234"}`)) + + loader := &Loader{ + ctx: ctx, + jsonArena: ar, + } + + cfg := FetchCacheConfiguration{ + CacheKeyTemplate: &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []FieldArgument{ + { + Name: "id", + Variable: &ContextVariable{ + Path: []string{"id"}, + Renderer: NewPlainVariableRenderer(), + }, + }, + }, + }, + }, + // No EntityKeyMappings + }, + } + + item := astjson.MustParseBytes([]byte(`{"user":{"id":"1234"}}`)) + inputItems := []*astjson.Value{item} + res := &result{} + + _, err := loader.prepareCacheKeys(&FetchInfo{}, cfg, inputItems, res) + require.NoError(t, err) + require.Equal(t, 1, len(res.l1CacheKeys)) + assert.Equal(t, []string(nil), res.l1CacheKeys[0].EntityMergePath) + }) + + t.Run("entity fetch template does not set EntityMergePath", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + + loader := &Loader{ + ctx: ctx, + jsonArena: ar, + } + + cfg := FetchCacheConfiguration{ + CacheKeyTemplate: &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + { + Name: []byte("__typename"), + Value: &String{ + Path: []string{"__typename"}, + }, + }, + { + Name: []byte("id"), + Value: &String{ + Path: []string{"id"}, + }, + }, + }, + }), + }, + } + + item := astjson.MustParseBytes([]byte(`{"__typename":"User","id":"1234"}`)) + inputItems := []*astjson.Value{item} + res := &result{} + + isEntity, err := loader.prepareCacheKeys(&FetchInfo{}, cfg, inputItems, res) + require.NoError(t, err) + assert.True(t, isEntity) + require.Equal(t, 1, len(res.l1CacheKeys)) + assert.Equal(t, []string(nil), res.l1CacheKeys[0].EntityMergePath) + }) + + // When there are multiple root fields, EntityMergePath cannot be derived from a single + // field name (ambiguous), so it falls back to res.postProcessing.MergePath if available. + t.Run("multiple root fields without MergePath does not set EntityMergePath", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.Variables = astjson.MustParseBytes([]byte(`{"id":"1234"}`)) + + loader := &Loader{ + ctx: ctx, + jsonArena: ar, + } + + cfg := FetchCacheConfiguration{ + CacheKeyTemplate: &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []FieldArgument{ + { + Name: "id", + Variable: &ContextVariable{ + Path: []string{"id"}, + Renderer: NewPlainVariableRenderer(), + }, + }, + }, + }, + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "account"}, + ResponseKey: "account", + Args: []FieldArgument{ + { + Name: "id", + Variable: &ContextVariable{ + Path: []string{"id"}, + Renderer: NewPlainVariableRenderer(), + }, + }, + }, + }, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + }, + } + + item := astjson.MustParseBytes([]byte(`{"user":{"id":"1234"}}`)) + inputItems := []*astjson.Value{item} + res := &result{} + + _, err := loader.prepareCacheKeys(&FetchInfo{}, cfg, inputItems, res) + require.NoError(t, err) + require.Equal(t, 1, len(res.l1CacheKeys)) + assert.Equal(t, []string(nil), res.l1CacheKeys[0].EntityMergePath) + }) + + t.Run("multiple root fields with MergePath sets EntityMergePath", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.Variables = astjson.MustParseBytes([]byte(`{"id":"1234"}`)) + + loader := &Loader{ + ctx: ctx, + jsonArena: ar, + } + + cfg := FetchCacheConfiguration{ + CacheKeyTemplate: &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []FieldArgument{ + { + Name: "id", + Variable: &ContextVariable{ + Path: []string{"id"}, + Renderer: NewPlainVariableRenderer(), + }, + }, + }, + }, + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "account"}, + ResponseKey: "account", + Args: []FieldArgument{ + { + Name: "id", + Variable: &ContextVariable{ + Path: []string{"id"}, + Renderer: NewPlainVariableRenderer(), + }, + }, + }, + }, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + }, + } + + item := astjson.MustParseBytes([]byte(`{"user":{"id":"1234"}}`)) + inputItems := []*astjson.Value{item} + res := &result{ + postProcessing: PostProcessingConfiguration{ + MergePath: []string{"user"}, + }, + } + + _, err := loader.prepareCacheKeys(&FetchInfo{}, cfg, inputItems, res) + require.NoError(t, err) + require.Equal(t, 1, len(res.l1CacheKeys)) + assert.Equal(t, []string{"user"}, res.l1CacheKeys[0].EntityMergePath) + }) + }) + + // Group 2: cacheKeysToEntries — Extract entity data for storage + + t.Run("cacheKeysToEntries", func(t *testing.T) { + t.Run("EntityMergePath set extracts entity data only", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{ + jsonArena: ar, + } + + item := astjson.MustParseBytes([]byte(`{"user":{"id":"1234","username":"Me"}}`)) + cacheKeys := []*CacheKey{ + { + Item: item, + Keys: []string{`{"__typename":"User","key":{"id":"1234"}}`}, + EntityMergePath: []string{"user"}, + }, + } + + entries, err := loader.cacheKeysToEntries(ar, cacheKeys) + require.NoError(t, err) + require.Equal(t, 1, len(entries)) + assert.Equal(t, `{"__typename":"User","key":{"id":"1234"}}`, entries[0].Key) + assert.Equal(t, `{"id":"1234","username":"Me"}`, string(entries[0].Value)) + }) + + t.Run("EntityMergePath not set stores full response", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{ + jsonArena: ar, + } + + item := astjson.MustParseBytes([]byte(`{"user":{"id":"1234","username":"Me"}}`)) + cacheKeys := []*CacheKey{ + { + Item: item, + Keys: []string{`root:user:1234`}, + }, + } + + entries, err := loader.cacheKeysToEntries(ar, cacheKeys) + require.NoError(t, err) + require.Equal(t, 1, len(entries)) + assert.Equal(t, `root:user:1234`, entries[0].Key) + assert.Equal(t, `{"user":{"id":"1234","username":"Me"}}`, string(entries[0].Value)) + }) + + t.Run("EntityMergePath set but data not found at path stores full response", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{ + jsonArena: ar, + } + + item := astjson.MustParseBytes([]byte(`{"user":{"id":"1234"}}`)) + cacheKeys := []*CacheKey{ + { + Item: item, + Keys: []string{`key1`}, + EntityMergePath: []string{"nonexistent"}, + }, + } + + entries, err := loader.cacheKeysToEntries(ar, cacheKeys) + require.NoError(t, err) + require.Equal(t, 1, len(entries)) + assert.Equal(t, `{"user":{"id":"1234"}}`, string(entries[0].Value)) + }) + + t.Run("multi-segment EntityMergePath extracts at nested path", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{ + jsonArena: ar, + } + + item := astjson.MustParseBytes([]byte(`{"data":{"user":{"id":"1234"}}}`)) + cacheKeys := []*CacheKey{ + { + Item: item, + Keys: []string{`key1`}, + EntityMergePath: []string{"data", "user"}, + }, + } + + entries, err := loader.cacheKeysToEntries(ar, cacheKeys) + require.NoError(t, err) + require.Equal(t, 1, len(entries)) + assert.Equal(t, `{"id":"1234"}`, string(entries[0].Value)) + }) + + t.Run("partial writeback merges cached entity fields before storing", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{ + jsonArena: ar, + } + + cacheKeys := []*CacheKey{ + { + Item: astjson.MustParseBytes([]byte(`{"id":"1234","brand":"Acme"}`)), + FromCache: astjson.MustParseBytes([]byte(`{"id":"1234","name":"Table","sku":"sku-1234"}`)), + Keys: []string{`{"__typename":"Product","key":{"id":"1234"}}`}, + }, + } + + entries, err := loader.cacheKeysToEntries(ar, cacheKeys) + require.NoError(t, err) + assert.Equal(t, []*CacheEntry{ + { + Key: `{"__typename":"Product","key":{"id":"1234"}}`, + Value: []byte(`{"id":"1234","name":"Table","sku":"sku-1234","brand":"Acme"}`), + }, + }, entries) + }) + }) + + // Group 3: tryL2CacheLoad — Wrap cached entity data on load + + t.Run("tryL2CacheLoad wrapping", func(t *testing.T) { + t.Run("EntityMergePath set and cache hit wraps entity data", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + cache := NewFakeLoaderCache() + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + ctx.initCacheAnalytics() + + loader := &Loader{ + ctx: ctx, + jsonArena: ar, + caches: map[string]LoaderCache{"default": cache}, + } + + // Pre-populate cache with entity-level data (as stored by cacheKeysToEntries with EntityMergePath) + cacheKey := `{"__typename":"User","key":{"id":"1234"}}` + err := cache.Set(context.Background(), withCacheEntryTTL([]*CacheEntry{ + {Key: cacheKey, Value: []byte(`{"id":"1234","username":"Me"}`)}, + }, 30*time.Second)) + require.NoError(t, err) + + // Set up result with L2 cache keys that have EntityMergePath + res := &result{ + cache: cache, + l2CacheKeys: []*CacheKey{ + { + Keys: []string{cacheKey}, + EntityMergePath: []string{"user"}, + }, + }, + l1CacheKeys: []*CacheKey{ + { + Keys: []string{cacheKey}, + EntityMergePath: []string{"user"}, + }, + }, + } + + // Call tryL2CacheLoad + // ProvidesData must match the wrapped response shape for validation to pass + skipFetch, err := loader.tryL2CacheLoad(context.Background(), &FetchInfo{ + ProvidesData: &Object{ + Fields: []*Field{ + {Name: []byte("user"), Value: &Object{ + Path: []string{"user"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}}}, + {Name: []byte("username"), Value: &Scalar{Path: []string{"username"}}}, + }, + }}, + }, + }, + }, res) + require.NoError(t, err) + assert.True(t, skipFetch, "all items cached, should skip fetch") + + // Verify the L2 cache key's FromCache was wrapped + require.NotNil(t, res.l2CacheKeys[0].FromCache) + wrapped := string(res.l2CacheKeys[0].FromCache.MarshalTo(nil)) + assert.Equal(t, `{"user":{"id":"1234","username":"Me"}}`, wrapped) + + // Verify L1 cache key also received the wrapped value (L2-to-L1 copy) + require.NotNil(t, res.l1CacheKeys[0].FromCache) + l1Wrapped := string(res.l1CacheKeys[0].FromCache.MarshalTo(nil)) + assert.Equal(t, `{"user":{"id":"1234","username":"Me"}}`, l1Wrapped) + + // L2 events are accumulated on res.l2AnalyticsEvents (merged to ctx in main resolve loop only) + }) + + t.Run("EntityMergePath not set and cache hit returns data as-is", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + cache := NewFakeLoaderCache() + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + ctx.initCacheAnalytics() + + loader := &Loader{ + ctx: ctx, + jsonArena: ar, + caches: map[string]LoaderCache{"default": cache}, + } + + cacheKey := `root:user:1234` + err := cache.Set(context.Background(), withCacheEntryTTL([]*CacheEntry{ + {Key: cacheKey, Value: []byte(`{"user":{"id":"1234","username":"Me"}}`)}, + }, 30*time.Second)) + require.NoError(t, err) + + res := &result{ + cache: cache, + l2CacheKeys: []*CacheKey{ + { + Keys: []string{cacheKey}, + // No EntityMergePath + }, + }, + l1CacheKeys: []*CacheKey{ + { + Keys: []string{cacheKey}, + }, + }, + } + + skipFetch, err := loader.tryL2CacheLoad(context.Background(), &FetchInfo{ + ProvidesData: &Object{ + Fields: []*Field{ + {Name: []byte("user"), Value: &Object{ + Path: []string{"user"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}}}, + {Name: []byte("username"), Value: &Scalar{Path: []string{"username"}}}, + }, + }}, + }, + }, + }, res) + require.NoError(t, err) + assert.True(t, skipFetch, "all items cached, should skip fetch") + + require.NotNil(t, res.l2CacheKeys[0].FromCache) + unwrapped := string(res.l2CacheKeys[0].FromCache.MarshalTo(nil)) + assert.Equal(t, `{"user":{"id":"1234","username":"Me"}}`, unwrapped) + + // Verify L1 cache key also received the value (L2-to-L1 copy) + require.NotNil(t, res.l1CacheKeys[0].FromCache) + l1Value := string(res.l1CacheKeys[0].FromCache.MarshalTo(nil)) + assert.Equal(t, `{"user":{"id":"1234","username":"Me"}}`, l1Value) + + // L2 events are accumulated on res.l2AnalyticsEvents (merged to ctx in main resolve loop only) + }) + + t.Run("EntityMergePath set but cache miss stays nil", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + cache := NewFakeLoaderCache() + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + ctx.initCacheAnalytics() + + loader := &Loader{ + ctx: ctx, + jsonArena: ar, + caches: map[string]LoaderCache{"default": cache}, + } + + // Don't populate cache — miss + + res := &result{ + cache: cache, + l2CacheKeys: []*CacheKey{ + { + Keys: []string{`{"__typename":"User","key":{"id":"9999"}}`}, + EntityMergePath: []string{"user"}, + }, + }, + l1CacheKeys: []*CacheKey{ + { + Keys: []string{`{"__typename":"User","key":{"id":"9999"}}`}, + EntityMergePath: []string{"user"}, + }, + }, + } + + skipFetch, err := loader.tryL2CacheLoad(context.Background(), &FetchInfo{ + ProvidesData: &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}}}, + }, + }, + }, res) + require.NoError(t, err) + assert.False(t, skipFetch, "cache miss, should not skip fetch") + + assert.Nil(t, res.l2CacheKeys[0].FromCache) + + // L2 events are accumulated on res.l2AnalyticsEvents (merged to ctx in main resolve loop only) + }) + + t.Run("multi-segment EntityMergePath wraps at each level", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + cache := NewFakeLoaderCache() + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + ctx.initCacheAnalytics() + + loader := &Loader{ + ctx: ctx, + jsonArena: ar, + caches: map[string]LoaderCache{"default": cache}, + } + + cacheKey := `key1` + err := cache.Set(context.Background(), withCacheEntryTTL([]*CacheEntry{ + {Key: cacheKey, Value: []byte(`{"id":"1234"}`)}, + }, 30*time.Second)) + require.NoError(t, err) + + res := &result{ + cache: cache, + l2CacheKeys: []*CacheKey{ + { + Keys: []string{cacheKey}, + EntityMergePath: []string{"data", "user"}, + }, + }, + l1CacheKeys: []*CacheKey{ + { + Keys: []string{cacheKey}, + EntityMergePath: []string{"data", "user"}, + }, + }, + } + + skipFetch, err := loader.tryL2CacheLoad(context.Background(), &FetchInfo{ + ProvidesData: &Object{ + Fields: []*Field{ + {Name: []byte("data"), Value: &Object{ + Path: []string{"data"}, + Fields: []*Field{ + {Name: []byte("user"), Value: &Object{ + Path: []string{"user"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}}}, + }, + }}, + }, + }}, + }, + }, + }, res) + require.NoError(t, err) + assert.True(t, skipFetch, "all items cached, should skip fetch") + + require.NotNil(t, res.l2CacheKeys[0].FromCache) + wrapped := string(res.l2CacheKeys[0].FromCache.MarshalTo(nil)) + assert.Equal(t, `{"data":{"user":{"id":"1234"}}}`, wrapped) + + // Verify L1 cache key also received the wrapped value (L2-to-L1 copy) + require.NotNil(t, res.l1CacheKeys[0].FromCache) + l1Wrapped := string(res.l1CacheKeys[0].FromCache.MarshalTo(nil)) + assert.Equal(t, `{"data":{"user":{"id":"1234"}}}`, l1Wrapped) + + // L2 events are accumulated on res.l2AnalyticsEvents (merged to ctx in main resolve loop only) + }) + }) + + // Group 4: Roundtrip consistency + + t.Run("roundtrip", func(t *testing.T) { + t.Run("store then load via EntityMergePath produces original data", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + cache := NewFakeLoaderCache() + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + ctx.initCacheAnalytics() + + loader := &Loader{ + ctx: ctx, + jsonArena: ar, + caches: map[string]LoaderCache{"default": cache}, + } + + originalJSON := `{"user":{"id":"1234","username":"Me"}}` + item := astjson.MustParseBytes([]byte(originalJSON)) + + // Step 1: Create cache keys with EntityMergePath and convert to entries (store) + cacheKey := `{"__typename":"User","key":{"id":"1234"}}` + storeKeys := []*CacheKey{ + { + Item: item, + Keys: []string{cacheKey}, + EntityMergePath: []string{"user"}, + }, + } + + entries, err := loader.cacheKeysToEntries(ar, storeKeys) + require.NoError(t, err) + require.Equal(t, 1, len(entries)) + // Verify it stored entity-level data + assert.Equal(t, `{"id":"1234","username":"Me"}`, string(entries[0].Value)) + + // Step 2: Store in L2 cache + err = cache.Set(context.Background(), withCacheEntryTTL(entries, 30*time.Second)) + require.NoError(t, err) + + // Step 3: Load from L2 cache with EntityMergePath wrapping + loadRes := &result{ + cache: cache, + l2CacheKeys: []*CacheKey{ + { + Keys: []string{cacheKey}, + EntityMergePath: []string{"user"}, + }, + }, + l1CacheKeys: []*CacheKey{ + { + Keys: []string{cacheKey}, + EntityMergePath: []string{"user"}, + }, + }, + } + + _, err = loader.tryL2CacheLoad(context.Background(), &FetchInfo{ + ProvidesData: &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}}}, + {Name: []byte("username"), Value: &Scalar{Path: []string{"username"}}}, + }, + }, + }, loadRes) + require.NoError(t, err) + + // Verify roundtrip: loaded data should match original + require.NotNil(t, loadRes.l2CacheKeys[0].FromCache) + loaded := string(loadRes.l2CacheKeys[0].FromCache.MarshalTo(nil)) + assert.Equal(t, originalJSON, loaded) + }) + + t.Run("root field store is loadable by entity fetch using same derived key", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + cache := NewFakeLoaderCache() + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + ctx.Variables = astjson.MustParseBytes([]byte(`{"id":"1234"}`)) + + loader := &Loader{ + ctx: ctx, + jsonArena: ar, + caches: map[string]LoaderCache{"default": cache}, + } + + // Step 1: Root field fetch produces response with wrapper + rootItem := astjson.MustParseBytes([]byte(`{"user":{"__typename":"User","id":"1234","username":"Me"}}`)) + + // prepareCacheKeys for root field with EntityKeyMappings + rootCfg := FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: &RootQueryCacheKeyTemplate{ + RootFields: []QueryField{ + { + Coordinate: GraphCoordinate{TypeName: "Query", FieldName: "user"}, + ResponseKey: "user", + Args: []FieldArgument{ + { + Name: "id", + Variable: &ContextVariable{ + Path: []string{"id"}, + Renderer: NewPlainVariableRenderer(), + }, + }, + }, + }, + }, + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "User", + FieldMappings: []EntityFieldMappingConfig{ + {EntityKeyField: "id", ArgumentPath: []string{"id"}}, + }, + }, + }, + }, + } + + rootRes := &result{} + _, err := loader.prepareCacheKeys(&FetchInfo{}, rootCfg, []*astjson.Value{rootItem}, rootRes) + require.NoError(t, err) + require.Equal(t, 1, len(rootRes.l1CacheKeys)) + assert.Equal(t, []string{"user"}, rootRes.l1CacheKeys[0].EntityMergePath) + + // Store: cacheKeysToEntries should extract entity-level data + entries, err := loader.cacheKeysToEntries(ar, rootRes.l1CacheKeys) + require.NoError(t, err) + require.Equal(t, 1, len(entries)) + // Entity-level data (stripped of the "user" wrapper) + assert.Equal(t, `{"__typename":"User","id":"1234","username":"Me"}`, string(entries[0].Value)) + + // Store in L2 + err = cache.Set(context.Background(), withCacheEntryTTL(entries, 30*time.Second)) + require.NoError(t, err) + + // Step 2: Entity fetch tries to load from cache using same key format + // Entity fetches use EntityQueryCacheKeyTemplate which produces the same key + entityItem := astjson.MustParseBytes([]byte(`{"__typename":"User","id":"1234"}`)) + entityCfg := FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + { + Name: []byte("__typename"), + Value: &String{ + Path: []string{"__typename"}, + }, + }, + { + Name: []byte("id"), + Value: &String{ + Path: []string{"id"}, + }, + }, + }, + }), + }, + } + + entityRes := &result{} + isEntity, err := loader.prepareCacheKeys(&FetchInfo{}, entityCfg, []*astjson.Value{entityItem}, entityRes) + require.NoError(t, err) + assert.True(t, isEntity) + require.Equal(t, 1, len(entityRes.l1CacheKeys)) + // Entity fetch should NOT have EntityMergePath + assert.Equal(t, []string(nil), entityRes.l1CacheKeys[0].EntityMergePath) + + // Verify key format matches between root (derived entity key) and entity fetch + rootKeyStr := rootRes.l1CacheKeys[0].Keys[0] + entityKeyStr := entityRes.l1CacheKeys[0].Keys[0] + assert.Equal(t, rootKeyStr, entityKeyStr, "root field derived entity key should match entity fetch key") + + // The entity fetch can now find the cache entry stored by the root field + cacheEntries, err := cache.Get(context.Background(), []string{entityKeyStr}) + require.NoError(t, err) + require.Equal(t, 1, len(cacheEntries)) + require.NotNil(t, cacheEntries[0]) + assert.Equal(t, `{"__typename":"User","id":"1234","username":"Me"}`, string(cacheEntries[0].Value)) + }) + }) +} diff --git a/v2/pkg/engine/resolve/error_behavior.go b/v2/pkg/engine/resolve/error_behavior.go new file mode 100644 index 0000000000..3a0a668556 --- /dev/null +++ b/v2/pkg/engine/resolve/error_behavior.go @@ -0,0 +1,53 @@ +package resolve + +import "strings" + +// ErrorBehavior controls how errors are handled during GraphQL resolution. +// This implements the proposed GraphQL spec change from PR #1163. +type ErrorBehavior int + +const ( + // ErrorBehaviorPropagate is the default behavior (traditional null bubbling). + // When a non-nullable field returns null due to an error, the null value + // propagates up to the nearest nullable parent. + ErrorBehaviorPropagate ErrorBehavior = iota + + // ErrorBehaviorNull stops null propagation at the error site. + // Even non-nullable fields return null without bubbling up. + // Errors are still recorded but don't cause parent nullification. + ErrorBehaviorNull + + // ErrorBehaviorHalt stops execution on the first error. + // The entire data field becomes null, and only the first error is returned. + ErrorBehaviorHalt +) + +// String returns the string representation of the ErrorBehavior. +func (e ErrorBehavior) String() string { + switch e { + case ErrorBehaviorPropagate: + return "PROPAGATE" + case ErrorBehaviorNull: + return "NULL" + case ErrorBehaviorHalt: + return "HALT" + default: + return "PROPAGATE" + } +} + +// ParseErrorBehavior parses a string into an ErrorBehavior. +// Returns the parsed value and true if valid, or ErrorBehaviorPropagate and false if invalid. +// The parsing is case-insensitive. +func ParseErrorBehavior(s string) (ErrorBehavior, bool) { + switch strings.ToUpper(strings.TrimSpace(s)) { + case "PROPAGATE": + return ErrorBehaviorPropagate, true + case "NULL": + return ErrorBehaviorNull, true + case "HALT": + return ErrorBehaviorHalt, true + default: + return ErrorBehaviorPropagate, false + } +} diff --git a/v2/pkg/engine/resolve/error_behavior_test.go b/v2/pkg/engine/resolve/error_behavior_test.go new file mode 100644 index 0000000000..927593cd27 --- /dev/null +++ b/v2/pkg/engine/resolve/error_behavior_test.go @@ -0,0 +1,377 @@ +package resolve + +import ( + "bytes" + "context" + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" +) + +func compactJSONForAssert(t testing.TB, input string) string { + t.Helper() + + var value any + err := json.Unmarshal([]byte(input), &value) + assert.NoError(t, err) + + normalized, err := json.Marshal(value) + assert.NoError(t, err) + return string(normalized) +} + +// TestParseErrorBehavior verifies case-insensitive parsing of error behavior +// strings, including whitespace trimming and unknown value rejection. +func TestParseErrorBehavior(t *testing.T) { + tests := []struct { + input string + expected ErrorBehavior + ok bool + }{ + {"PROPAGATE", ErrorBehaviorPropagate, true}, + {"propagate", ErrorBehaviorPropagate, true}, + {"Propagate", ErrorBehaviorPropagate, true}, + {" PROPAGATE ", ErrorBehaviorPropagate, true}, + {"NULL", ErrorBehaviorNull, true}, + {"null", ErrorBehaviorNull, true}, + {"Null", ErrorBehaviorNull, true}, + {"HALT", ErrorBehaviorHalt, true}, + {"halt", ErrorBehaviorHalt, true}, + {"Halt", ErrorBehaviorHalt, true}, + {"", ErrorBehaviorPropagate, false}, + {"INVALID", ErrorBehaviorPropagate, false}, + {"nullify", ErrorBehaviorPropagate, false}, + } + + for _, tc := range tests { + t.Run(tc.input, func(t *testing.T) { + result, ok := ParseErrorBehavior(tc.input) + assert.Equal(t, tc.expected, result) + assert.Equal(t, tc.ok, ok) + }) + } +} + +// TestErrorBehaviorString verifies String() output for all error behavior +// values, including the default for unknown values. +func TestErrorBehaviorString(t *testing.T) { + assert.Equal(t, "PROPAGATE", ErrorBehaviorPropagate.String()) + assert.Equal(t, "NULL", ErrorBehaviorNull.String()) + assert.Equal(t, "HALT", ErrorBehaviorHalt.String()) + assert.Equal(t, "PROPAGATE", ErrorBehavior(99).String()) // unknown defaults to PROPAGATE +} + +// TestErrorBehaviorPropagate verifies PROPAGATE mode (default): a null +// non-nullable field bubbles up to the nearest nullable parent. +func TestErrorBehaviorPropagate(t *testing.T) { + data := `{"user":{"name":null}}` + res := NewResolvable(nil, ResolvableOptions{}) + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.ErrorBehavior = ErrorBehaviorPropagate + + err := res.Init(ctx, []byte(data), ast.OperationTypeQuery) + assert.NoError(t, err) + + // user is nullable, name is non-nullable + // When name is null, user should become null (bubbling) + object := &Object{ + Fields: []*Field{ + { + Name: []byte("user"), + Value: &Object{ + Path: []string{"user"}, + Nullable: true, + TypeName: "User", + Fields: []*Field{ + { + Name: []byte("name"), + Value: &String{ + Path: []string{"name"}, + Nullable: false, + }, + }, + }, + }, + }, + }, + } + + out := &bytes.Buffer{} + err = res.Resolve(context.Background(), object, nil, out) + assert.NoError(t, err) + + // In PROPAGATE mode, the null bubbles up to user + expected := `{"errors":[{"message":"Cannot return null for non-nullable field 'Query.user.name'.","path":["user","name"]}],"data":{"user":null}}` + assert.Equal(t, compactJSONForAssert(t, expected), compactJSONForAssert(t, out.String())) +} + +// TestErrorBehaviorNull verifies NULL mode: non-nullable fields return null +// at the error site without bubbling up to the parent. +func TestErrorBehaviorNull(t *testing.T) { + data := `{"user":{"name":null}}` + res := NewResolvable(nil, ResolvableOptions{}) + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.ErrorBehavior = ErrorBehaviorNull + + err := res.Init(ctx, []byte(data), ast.OperationTypeQuery) + assert.NoError(t, err) + + // user is nullable, name is non-nullable + // In NULL mode, name returns null but user should NOT become null + object := &Object{ + Fields: []*Field{ + { + Name: []byte("user"), + Value: &Object{ + Path: []string{"user"}, + Nullable: true, + TypeName: "User", + Fields: []*Field{ + { + Name: []byte("name"), + Value: &String{ + Path: []string{"name"}, + Nullable: false, + }, + }, + }, + }, + }, + }, + } + + out := &bytes.Buffer{} + err = res.Resolve(context.Background(), object, nil, out) + assert.NoError(t, err) + + // In NULL mode, the null does NOT bubble up - user has a name field with null + expected := `{"errors":[{"message":"Cannot return null for non-nullable field 'Query.user.name'.","path":["user","name"]}],"data":{"user":{"name":null}}}` + assert.Equal(t, compactJSONForAssert(t, expected), compactJSONForAssert(t, out.String())) +} + +// TestErrorBehaviorHalt verifies HALT mode: the first null non-nullable +// field makes the entire data field null. +func TestErrorBehaviorHalt(t *testing.T) { + data := `{"user":{"name":null}}` + res := NewResolvable(nil, ResolvableOptions{}) + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.ErrorBehavior = ErrorBehaviorHalt + + err := res.Init(ctx, []byte(data), ast.OperationTypeQuery) + assert.NoError(t, err) + + // user is nullable, name is non-nullable + // In HALT mode, data becomes null on the first error + object := &Object{ + Fields: []*Field{ + { + Name: []byte("user"), + Value: &Object{ + Path: []string{"user"}, + Nullable: true, + TypeName: "User", + Fields: []*Field{ + { + Name: []byte("name"), + Value: &String{ + Path: []string{"name"}, + Nullable: false, + }, + }, + }, + }, + }, + }, + } + + out := &bytes.Buffer{} + err = res.Resolve(context.Background(), object, nil, out) + assert.NoError(t, err) + + // In HALT mode, data becomes null + expected := `{"errors":[{"message":"Cannot return null for non-nullable field 'Query.user.name'.","path":["user","name"]}],"data":null}` + assert.Equal(t, compactJSONForAssert(t, expected), compactJSONForAssert(t, out.String())) +} + +// TestErrorBehaviorNullWithMultipleFields verifies NULL mode collects +// multiple errors from different non-nullable fields without propagating +// any of them to the parent object. +func TestErrorBehaviorNullWithMultipleFields(t *testing.T) { + data := `{"user":{"name":null,"email":"test@example.com","age":null}}` + res := NewResolvable(nil, ResolvableOptions{}) + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.ErrorBehavior = ErrorBehaviorNull + + err := res.Init(ctx, []byte(data), ast.OperationTypeQuery) + assert.NoError(t, err) + + object := &Object{ + Fields: []*Field{ + { + Name: []byte("user"), + Value: &Object{ + Path: []string{"user"}, + Nullable: true, + TypeName: "User", + Fields: []*Field{ + { + Name: []byte("name"), + Value: &String{ + Path: []string{"name"}, + Nullable: false, // non-nullable but null -> error, no bubbling in NULL mode + }, + }, + { + Name: []byte("email"), + Value: &String{ + Path: []string{"email"}, + Nullable: true, + }, + }, + { + Name: []byte("age"), + Value: &Integer{ + Path: []string{"age"}, + Nullable: false, // non-nullable but null -> error, no bubbling in NULL mode + }, + }, + }, + }, + }, + }, + } + + out := &bytes.Buffer{} + err = res.Resolve(context.Background(), object, nil, out) + assert.NoError(t, err) + + // In NULL mode, the user object should still exist with both errors collected + expected := `{"errors":[{"message":"Cannot return null for non-nullable field 'Query.user.name'.","path":["user","name"]},{"message":"Cannot return null for non-nullable field 'Query.user.age'.","path":["user","age"]}],"data":{"user":{"name":null,"email":"test@example.com","age":null}}}` + assert.Equal(t, compactJSONForAssert(t, expected), compactJSONForAssert(t, out.String())) +} + +// TestErrorBehaviorWithNestedObjects verifies NULL mode with deeply nested +// objects: the null stays at the leaf and does not bubble through +// intermediate nullable parents. +func TestErrorBehaviorWithNestedObjects(t *testing.T) { + data := `{"user":{"profile":{"address":{"city":null}}}}` + res := NewResolvable(nil, ResolvableOptions{}) + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.ErrorBehavior = ErrorBehaviorNull + + err := res.Init(ctx, []byte(data), ast.OperationTypeQuery) + assert.NoError(t, err) + + object := &Object{ + Fields: []*Field{ + { + Name: []byte("user"), + Value: &Object{ + Path: []string{"user"}, + Nullable: true, + TypeName: "User", + Fields: []*Field{ + { + Name: []byte("profile"), + Value: &Object{ + Path: []string{"profile"}, + Nullable: true, + TypeName: "Profile", + Fields: []*Field{ + { + Name: []byte("address"), + Value: &Object{ + Path: []string{"address"}, + Nullable: true, + TypeName: "Address", + Fields: []*Field{ + { + Name: []byte("city"), + Value: &String{ + Path: []string{"city"}, + Nullable: false, // non-nullable at deep level + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + out := &bytes.Buffer{} + err = res.Resolve(context.Background(), object, nil, out) + assert.NoError(t, err) + + // In NULL mode, the null doesn't bubble up through address, profile, or user + expected := `{"errors":[{"message":"Cannot return null for non-nullable field 'Query.user.profile.address.city'.","path":["user","profile","address","city"]}],"data":{"user":{"profile":{"address":{"city":null}}}}}` + assert.Equal(t, compactJSONForAssert(t, expected), compactJSONForAssert(t, out.String())) +} + +// TestErrorBehaviorWithArrays verifies NULL mode with arrays: a null +// non-nullable field in one array item does not affect other items. +func TestErrorBehaviorWithArrays(t *testing.T) { + data := `{"users":[{"name":"Alice"},{"name":null},{"name":"Charlie"}]}` + res := NewResolvable(nil, ResolvableOptions{}) + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.ErrorBehavior = ErrorBehaviorNull + + err := res.Init(ctx, []byte(data), ast.OperationTypeQuery) + assert.NoError(t, err) + + object := &Object{ + Fields: []*Field{ + { + Name: []byte("users"), + Value: &Array{ + Path: []string{"users"}, + Nullable: true, + Item: &Object{ + Nullable: true, + TypeName: "User", + Fields: []*Field{ + { + Name: []byte("name"), + Value: &String{ + Path: []string{"name"}, + Nullable: false, // non-nullable + }, + }, + }, + }, + }, + }, + }, + } + + out := &bytes.Buffer{} + err = res.Resolve(context.Background(), object, nil, out) + assert.NoError(t, err) + + // In NULL mode, the array should still contain all items + // The second item's name will be null (error) but the item itself should remain + expected := `{"errors":[{"message":"Cannot return null for non-nullable field 'Query.users.name'.","path":["users",1,"name"]}],"data":{"users":[{"name":"Alice"},{"name":null},{"name":"Charlie"}]}}` + assert.Equal(t, compactJSONForAssert(t, expected), compactJSONForAssert(t, out.String())) +} + +// TestHaltExecution verifies the HaltExecution flag on Resolvable: set by +// HALT mode on first error, cleared by Reset(). +func TestHaltExecution(t *testing.T) { + res := NewResolvable(nil, ResolvableOptions{}) + assert.False(t, res.HaltExecution()) + + res.haltExecution = true + assert.True(t, res.HaltExecution()) + + // Reset should clear the flag + res.Reset() + assert.False(t, res.HaltExecution()) +} diff --git a/v2/pkg/engine/resolve/extensions_cache_invalidation_test.go b/v2/pkg/engine/resolve/extensions_cache_invalidation_test.go new file mode 100644 index 0000000000..11983b01e1 --- /dev/null +++ b/v2/pkg/engine/resolve/extensions_cache_invalidation_test.go @@ -0,0 +1,523 @@ +package resolve + +import ( + "context" + "net/http" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/go-arena" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/fastjsonext" +) + +// TestExtensionsCacheInvalidation verifies that subgraph cacheInvalidation extensions +// correctly delete L2 entries, with the optimization that same-entity deletes are +// skipped when updateL2Cache will immediately write fresh data for that key. +func TestExtensionsCacheInvalidation(t *testing.T) { + // ------------------------------------------------------------------------- + // Delete-before-set optimization: when the invalidated entity is the SAME + // entity being fetched, the L2 delete is skipped because updateL2Cache + // will immediately set it with fresh data. + // ------------------------------------------------------------------------- + + t.Run("same entity fetched and invalidated — delete skipped", func(t *testing.T) { + // User:1 is fetched AND invalidated in the same response. + // updateL2Cache will set User:1, so the delete is redundant and skipped. + env := newExtInvEnv(t, + `{"data":{"_entities":[{"__typename":"User","id":"1","username":"Alice"}]},"extensions":{"cacheInvalidation":{"keys":[{"typename":"User","key":{"id":"1"}}]}}}`, + ) + env.run() + assert.False(t, env.hasDeletes(), "delete skipped — same key about to be set by updateL2Cache") + }) + + t.Run("same entity with header prefix — delete still skipped", func(t *testing.T) { + // Same optimization applies even when keys are prefixed (e.g. "33333:User:1"). + // Both the invalidation key and the L2 set key go through the same prefix transform. + env := newExtInvEnv(t, + `{"data":{"_entities":[{"__typename":"User","id":"1","username":"Alice"}]},"extensions":{"cacheInvalidation":{"keys":[{"typename":"User","key":{"id":"1"}}]}}}`, + withExtInvHeaderPrefix(33333), + ) + env.run() + assert.False(t, env.hasDeletes(), "delete skipped — prefixed key also about to be set") + }) + + t.Run("same entity with L2CacheKeyInterceptor — delete still skipped", func(t *testing.T) { + // Same optimization applies when keys are transformed by an interceptor. + env := newExtInvEnv(t, + `{"data":{"_entities":[{"__typename":"User","id":"1","username":"Alice"}]},"extensions":{"cacheInvalidation":{"keys":[{"typename":"User","key":{"id":"1"}}]}}}`, + withExtInvInterceptor(func(_ context.Context, key string, _ L2CacheKeyInterceptorInfo) string { + return "tenant-X:" + key + }), + ) + env.run() + assert.False(t, env.hasDeletes(), "delete skipped — intercepted key also about to be set") + }) + + t.Run("same entity with both prefix and interceptor — delete still skipped", func(t *testing.T) { + // Both transforms applied: prefix + interceptor. Delete is still redundant. + env := newExtInvEnv(t, + `{"data":{"_entities":[{"__typename":"User","id":"1","username":"Alice"}]},"extensions":{"cacheInvalidation":{"keys":[{"typename":"User","key":{"id":"1"}}]}}}`, + withExtInvHeaderPrefix(33333), + withExtInvInterceptor(func(_ context.Context, key string, _ L2CacheKeyInterceptorInfo) string { + return "tenant-X:" + key + }), + ) + env.run() + assert.False(t, env.hasDeletes(), "delete skipped — both prefix and interceptor applied, key still about to be set") + }) + + // ------------------------------------------------------------------------- + // Different entity invalidated: the delete MUST happen because the key + // being invalidated is NOT the same key being set by updateL2Cache. + // ------------------------------------------------------------------------- + + t.Run("different entity invalidated — only that entity deleted", func(t *testing.T) { + // Invalidation targets User:1 (same as fetched → skipped) AND User:2 (different → deleted). + // This proves the optimization is per-key, not all-or-nothing. + env := newExtInvEnv(t, + `{"data":{"_entities":[{"__typename":"User","id":"1","username":"Alice"}]},"extensions":{"cacheInvalidation":{"keys":[{"typename":"User","key":{"id":"1"}},{"typename":"User","key":{"id":"2"}}]}}}`, + ) + env.run() + + deleteKeys := env.deleteKeys() + require.Len(t, deleteKeys, 1, "User:1 skipped (about to be set), User:2 deleted") + assert.Equal(t, `{"__typename":"User","key":{"id":"2"}}`, deleteKeys[0]) + }) + + t.Run("composite key fields — different key shape is not skipped", func(t *testing.T) { + // Invalidation key has composite fields {id:"1", orgId:"42"} which differs + // from the fetched entity key {id:"1"}. No match → delete happens. + env := newExtInvEnv(t, + `{"data":{"_entities":[{"__typename":"User","id":"1","username":"Alice"}]},"extensions":{"cacheInvalidation":{"keys":[{"typename":"User","key":{"id":"1","orgId":"42"}}]}}}`, + ) + env.run() + + deleteKeys := env.deleteKeys() + require.Len(t, deleteKeys, 1, "composite key differs from fetch key — delete not skipped") + assert.Equal(t, `{"__typename":"User","key":{"id":"1","orgId":"42"}}`, deleteKeys[0]) + }) + + // ------------------------------------------------------------------------- + // No-op cases: various scenarios where no delete should happen. + // ------------------------------------------------------------------------- + + t.Run("no extensions in response — no delete", func(t *testing.T) { + // Response has no extensions at all. Nothing to invalidate. + env := newExtInvEnv(t, + `{"data":{"_entities":[{"__typename":"User","id":"1","username":"Alice"}]}}`, + ) + env.run() + assert.False(t, env.hasDeletes(), "no extensions → no invalidation") + }) + + t.Run("extensions without cacheInvalidation key — no delete", func(t *testing.T) { + // Extensions present but contain only tracing data, not cacheInvalidation. + env := newExtInvEnv(t, + `{"data":{"_entities":[{"__typename":"User","id":"1","username":"Alice"}]},"extensions":{"tracing":{"version":1}}}`, + ) + env.run() + assert.False(t, env.hasDeletes(), "no cacheInvalidation key → no invalidation") + }) + + t.Run("empty keys array — no delete", func(t *testing.T) { + // cacheInvalidation present but keys array is empty. + env := newExtInvEnv(t, + `{"data":{"_entities":[{"__typename":"User","id":"1","username":"Alice"}]},"extensions":{"cacheInvalidation":{"keys":[]}}}`, + ) + env.run() + assert.False(t, env.hasDeletes(), "empty keys array → no invalidation") + }) + + t.Run("unknown typename — silently skipped, no delete", func(t *testing.T) { + // Typename "UnknownType" has no entity cache config → skipped. + env := newExtInvEnv(t, + `{"data":{"_entities":[{"__typename":"User","id":"1","username":"Alice"}]},"extensions":{"cacheInvalidation":{"keys":[{"typename":"UnknownType","key":{"id":"1"}}]}}}`, + ) + env.run() + assert.False(t, env.hasDeletes(), "unknown typename has no cache config → skipped") + }) + + t.Run("L2 cache disabled — no delete", func(t *testing.T) { + // With L2 disabled, processExtensionsCacheInvalidation returns early. + env := newExtInvEnv(t, + `{"data":{"_entities":[{"__typename":"User","id":"1","username":"Alice"}]},"extensions":{"cacheInvalidation":{"keys":[{"typename":"User","key":{"id":"1"}}]}}}`, + withExtInvL2Disabled(), + ) + env.run() + assert.False(t, env.hasDeletes(), "L2 disabled → invalidation skipped entirely") + }) + + // ------------------------------------------------------------------------- + // Malformed extensions: gracefully handled, no panics, no deletes. + // ------------------------------------------------------------------------- + + t.Run("malformed — keys not an array", func(t *testing.T) { + env := newExtInvEnv(t, + `{"data":{"_entities":[{"__typename":"User","id":"1","username":"Alice"}]},"extensions":{"cacheInvalidation":{"keys":"invalid"}}}`, + ) + env.run() + assert.False(t, env.hasDeletes(), "malformed keys field → gracefully ignored") + }) + + t.Run("malformed — entry missing typename", func(t *testing.T) { + env := newExtInvEnv(t, + `{"data":{"_entities":[{"__typename":"User","id":"1","username":"Alice"}]},"extensions":{"cacheInvalidation":{"keys":[{"key":{"id":"1"}}]}}}`, + ) + env.run() + assert.False(t, env.hasDeletes(), "missing typename → entry skipped") + }) + + t.Run("malformed — entry missing key", func(t *testing.T) { + env := newExtInvEnv(t, + `{"data":{"_entities":[{"__typename":"User","id":"1","username":"Alice"}]},"extensions":{"cacheInvalidation":{"keys":[{"typename":"User"}]}}}`, + ) + env.run() + assert.False(t, env.hasDeletes(), "missing key → entry skipped") + }) + + // ------------------------------------------------------------------------- + // Interceptor metadata: verify the L2CacheKeyInterceptor receives correct + // SubgraphName and CacheName for both regular cache operations and + // invalidation key construction. + // ------------------------------------------------------------------------- + + t.Run("interceptor receives correct SubgraphName and CacheName", func(t *testing.T) { + // The interceptor is called twice: once for the L2 cache set (regular flow) + // and once for the invalidation key construction. + var capturedInfos []L2CacheKeyInterceptorInfo + env := newExtInvEnv(t, + `{"data":{"_entities":[{"__typename":"User","id":"1","username":"Alice"}]},"extensions":{"cacheInvalidation":{"keys":[{"typename":"User","key":{"id":"1"}}]}}}`, + withExtInvInterceptor(func(_ context.Context, key string, info L2CacheKeyInterceptorInfo) string { + capturedInfos = append(capturedInfos, info) + return key + }), + ) + env.run() + + require.Len(t, capturedInfos, 2, "interceptor called for L2 set + invalidation key") + assert.Equal(t, L2CacheKeyInterceptorInfo{SubgraphName: "accounts", CacheName: "default"}, capturedInfos[0]) + assert.Equal(t, L2CacheKeyInterceptorInfo{SubgraphName: "accounts", CacheName: "default"}, capturedInfos[1]) + }) +} + +func TestExtensionsCacheInvalidationAnalytics(t *testing.T) { + t.Run("records MutationEvent for extension-driven delete", func(t *testing.T) { + // newExtInvEnv fetches User:1; invalidating User:2 targets a different key, + // so the delete is not deduped as "about to be set" and analytics records it. + env := newExtInvEnv(t, + `{"data":{"_entities":[{"__typename":"User","id":"1","username":"Alice"}]},"extensions":{"cacheInvalidation":{"keys":[{"typename":"User","key":{"id":"2"}}]}}}`, + ) + env.ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + + env.run() + stats := env.ctx.GetCacheStats() + + assert.Equal(t, []MutationEvent{ + { + EntityType: "User", // Extension entry invalidates typename User + EntityCacheKey: `{"__typename":"User","key":{"id":"2"}}`, // User:2 is the key that survives dedupe and is deleted + HadCachedValue: false, // Extension invalidation does not issue an L2 Get + IsStale: false, // No cached-vs-fresh comparison is performed + Source: CacheSourceQuery, // Emitted from a query response, not a mutation + }, + }, stats.MutationEvents) + }) + + t.Run("records no MutationEvent when extension delete is skipped", func(t *testing.T) { + // newExtInvEnv fetches User:1; invalidating User:1 is skipped before the + // analytics call because updateL2Cache is about to write the same key. + env := newExtInvEnv(t, + `{"data":{"_entities":[{"__typename":"User","id":"1","username":"Alice"}]},"extensions":{"cacheInvalidation":{"keys":[{"typename":"User","key":{"id":"1"}}]}}}`, + ) + env.ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + + env.run() + stats := env.ctx.GetCacheStats() + + // Snapshot's slices.Clone returns nil when the underlying slice is nil + // (no events appended). Assert the count rather than DeepEqual against + // []MutationEvent{}, which would mismatch a nil slice. + assert.Equal(t, 0, len(stats.MutationEvents)) + }) +} + +// --------------------------------------------------------------------------- +// Schema building blocks for User entity tests +// --------------------------------------------------------------------------- + +// newUserCacheKeyTemplate returns a cache key template for User entities with @key(fields: "id"). +func newUserCacheKeyTemplate() *EntityQueryCacheKeyTemplate { + return &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } +} + +// newUserProvidesData describes the fields provided by a User entity fetch. +func newUserProvidesData() *Object { + return &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("username"), Value: &Scalar{Path: []string{"username"}, Nullable: false}}, + }, + } +} + +// newUserEntityFetchSegments returns the input template segments for a User _entities fetch. +func newUserEntityFetchSegments() []TemplateSegment { + return []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://accounts.service","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){__typename ... on User {id username}}}","variables":{"representations":[`), + SegmentType: StaticSegmentType, + }, + { + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + }, + { + Data: []byte(`]}}}`), + SegmentType: StaticSegmentType, + }, + } +} + +// --------------------------------------------------------------------------- +// extInvOption — functional options for extInvEnv configuration +// --------------------------------------------------------------------------- + +type extInvOption func(*extInvConfig) + +type extInvConfig struct { + enableHeaderPrefix bool + headerHash uint64 + l2KeyInterceptor func(context.Context, string, L2CacheKeyInterceptorInfo) string + disableL2 bool +} + +// withExtInvHeaderPrefix enables IncludeSubgraphHeaderPrefix on the entity cache config +// and fetch configuration, and sets up a mockSubgraphHeadersBuilder with the given hash. +func withExtInvHeaderPrefix(hash uint64) extInvOption { + return func(c *extInvConfig) { + c.enableHeaderPrefix = true + c.headerHash = hash + } +} + +// withExtInvInterceptor sets an L2CacheKeyInterceptor on the caching options. +func withExtInvInterceptor(fn func(context.Context, string, L2CacheKeyInterceptorInfo) string) extInvOption { + return func(c *extInvConfig) { + c.l2KeyInterceptor = fn + } +} + +// withExtInvL2Disabled disables L2 caching. +func withExtInvL2Disabled() extInvOption { + return func(c *extInvConfig) { + c.disableL2 = true + } +} + +// --------------------------------------------------------------------------- +// extInvEnv — test environment for extensions cache invalidation unit tests +// --------------------------------------------------------------------------- + +// extInvEnv encapsulates all test infrastructure for a single invalidation test. +// Tests only need to specify the entity response (with/without extensions) and +// any configuration options — all boilerplate is handled here. +type extInvEnv struct { + t *testing.T + loader *Loader + ctx *Context + response *GraphQLResponse + cache *FakeLoaderCache +} + +// newExtInvEnv creates a standard test environment: one root fetch returning +// User:1, one entity fetch returning the given entityResponse. +func newExtInvEnv(t *testing.T, entityResponse string, opts ...extInvOption) *extInvEnv { + t.Helper() + + var cfg extInvConfig + for _, opt := range opts { + opt(&cfg) + } + + ctrl := gomock.NewController(t) + t.Cleanup(ctrl.Finish) + + cache := NewFakeLoaderCache() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, _ any, _ []byte) ([]byte, error) { + return []byte(`{"data":{"user":{"__typename":"User","id":"1"}}}`), nil + }).Times(1) + + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, _ any, _ []byte) ([]byte, error) { + return []byte(entityResponse), nil + }).Times(1) + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{user {__typename id}}"}}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newUserCacheKeyTemplate(), + UseL1Cache: true, + IncludeSubgraphHeaderPrefix: cfg.enableHeaderPrefix, + }, + }, + InputTemplate: InputTemplate{Segments: newUserEntityFetchSegments()}, + Info: &FetchInfo{ + DataSourceID: "accounts", + DataSourceName: "accounts", + OperationType: ast.OperationTypeQuery, + ProvidesData: newUserProvidesData(), + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.user", ObjectPath("user")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("user"), + Value: &Object{ + Path: []string{"user"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("username"), Value: &String{Path: []string{"username"}}}, + }, + }, + }, + }, + }, + } + + loader := &Loader{ + caches: map[string]LoaderCache{"default": cache}, + entityCacheConfigs: map[string]map[string]*EntityCacheInvalidationConfig{ + "accounts": { + "User": {CacheName: "default", IncludeSubgraphHeaderPrefix: cfg.enableHeaderPrefix}, + }, + }, + } + + ctx := NewContext(t.Context()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = !cfg.disableL2 + + if cfg.enableHeaderPrefix { + ctx.SubgraphHeadersBuilder = &mockSubgraphHeadersBuilder{ + hashes: map[string]uint64{"accounts": cfg.headerHash}, + } + } + if cfg.l2KeyInterceptor != nil { + ctx.ExecutionOptions.Caching.L2CacheKeyInterceptor = cfg.l2KeyInterceptor + } + + return &extInvEnv{ + t: t, + loader: loader, + ctx: ctx, + response: response, + cache: cache, + } +} + +// run executes the loader and returns the GraphQL response string. +func (e *extInvEnv) run() string { + e.t.Helper() + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(e.ctx, nil, ast.OperationTypeQuery) + require.NoError(e.t, err) + + err = e.loader.LoadGraphQLResponseData(e.ctx, e.response, resolvable) + require.NoError(e.t, err) + + return fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) +} + +// deleteKeys returns all keys that were passed to cache.Delete() calls. +func (e *extInvEnv) deleteKeys() []string { + var keys []string + for _, entry := range e.cache.GetLog() { + if entry.Operation == "delete" { + for _, item := range entry.Items { + keys = append(keys, item.Key) + } + } + } + return keys +} + +// hasDeletes returns true if any cache.Delete() calls were recorded. +func (e *extInvEnv) hasDeletes() bool { + for _, entry := range e.cache.GetLog() { + if entry.Operation == "delete" { + return true + } + } + return false +} + +// --------------------------------------------------------------------------- +// mockSubgraphHeadersBuilder — test mock for SubgraphHeadersBuilder +// --------------------------------------------------------------------------- + +type mockSubgraphHeadersBuilder struct { + hashes map[string]uint64 +} + +func (m *mockSubgraphHeadersBuilder) HeadersForSubgraph(subgraphName string) (http.Header, uint64) { + return nil, m.hashes[subgraphName] +} + +func (m *mockSubgraphHeadersBuilder) HashAll() uint64 { + return 0 +} + +var _ SubgraphHeadersBuilder = (*mockSubgraphHeadersBuilder)(nil) diff --git a/v2/pkg/engine/resolve/fetch.go b/v2/pkg/engine/resolve/fetch.go index 622e731c4b..6b7f7955c8 100644 --- a/v2/pkg/engine/resolve/fetch.go +++ b/v2/pkg/engine/resolve/fetch.go @@ -4,6 +4,7 @@ import ( "encoding/json" "slices" "strings" + "time" "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" ) @@ -165,6 +166,7 @@ type BatchEntityFetch struct { DataSourceIdentifier []byte Trace *DataSourceLoadTrace Info *FetchInfo + Caching FetchCacheConfiguration } func (b *BatchEntityFetch) Dependencies() *FetchDependencies { @@ -205,6 +207,7 @@ type EntityFetch struct { DataSourceIdentifier []byte Trace *DataSourceLoadTrace Info *FetchInfo + Caching FetchCacheConfiguration } func (e *EntityFetch) Dependencies() *FetchDependencies { @@ -271,13 +274,15 @@ type FetchConfiguration struct { // OperationName is non-empty when the operation name is propagated to the upstream subgraph fetch. OperationName string + + Caching FetchCacheConfiguration } -func (fc *FetchConfiguration) Equals(other *FetchConfiguration) bool { - if fc.Input != other.Input { +func (f *FetchConfiguration) Equals(other *FetchConfiguration) bool { + if f.Input != other.Input { return false } - if !slices.EqualFunc(fc.Variables, other.Variables, func(a, b Variable) bool { + if !slices.EqualFunc(f.Variables, other.Variables, func(a, b Variable) bool { return a.Equals(b) }) { return false @@ -285,22 +290,212 @@ func (fc *FetchConfiguration) Equals(other *FetchConfiguration) bool { // Note: we do not compare datasources, as they will always be a different instance. - if fc.RequiresEntityFetch != other.RequiresEntityFetch { + if f.RequiresEntityFetch != other.RequiresEntityFetch { return false } - if fc.RequiresEntityBatchFetch != other.RequiresEntityBatchFetch { + if f.RequiresEntityBatchFetch != other.RequiresEntityBatchFetch { return false } - if !fc.PostProcessing.Equals(&other.PostProcessing) { + if !f.PostProcessing.Equals(&other.PostProcessing) { return false } - if fc.SetTemplateOutputToNullOnVariableNull != other.SetTemplateOutputToNullOnVariableNull { + if f.SetTemplateOutputToNullOnVariableNull != other.SetTemplateOutputToNullOnVariableNull { return false } + return f.Caching.Equals(&other.Caching) +} +func (f *FetchCacheConfiguration) Equals(other *FetchCacheConfiguration) bool { + if f.Enabled != other.Enabled { + return false + } + if f.CacheName != other.CacheName { + return false + } + if f.TTL != other.TTL { + return false + } + if f.IncludeSubgraphHeaderPrefix != other.IncludeSubgraphHeaderPrefix { + return false + } + if f.EnablePartialCacheLoad != other.EnablePartialCacheLoad { + return false + } + if f.ShadowMode != other.ShadowMode { + return false + } + if f.EnableMutationL2CachePopulation != other.EnableMutationL2CachePopulation { + return false + } + if f.MutationCacheTTLOverride != other.MutationCacheTTLOverride { + return false + } + if f.NegativeCacheTTL != other.NegativeCacheTTL { + return false + } + if f.PartialBatchLoad != other.PartialBatchLoad { + return false + } + if !slices.Equal(f.BatchEntityKeyArgumentPathHint, other.BatchEntityKeyArgumentPathHint) { + return false + } return true } +type FetchCacheConfiguration struct { + // Enabled indicates if L2 caching is enabled for this fetch. + // L1 caching is controlled separately via ctx.ExecutionOptions.Caching.EnableL1Cache. + Enabled bool + // CacheName is the name of the cache to use for this fetch + CacheName string + // TTL is the time to live which will be set for new cache entries + TTL time.Duration + // CacheKeyTemplate can be used to render a cache key for the fetch. + // In case of a root fetch, the variables will be one or more field arguments + // For entity fetches, the variables will be a single Object Variable with only @key fields + CacheKeyTemplate CacheKeyTemplate + // IncludeSubgraphHeaderPrefix indicates if cache keys should be prefixed with the subgraph header hash. + // The prefix format is "id:cacheKey" where id is the hash from HeadersForSubgraph. + // Defaults to true. + IncludeSubgraphHeaderPrefix bool + // RootFieldL1EntityCacheKeyTemplates holds L1 cache key templates for entities returned by root fields. + RootFieldL1EntityCacheKeyTemplates map[string]CacheKeyTemplate + + // EnablePartialCacheLoad enables fetching only cache-missed entities. + // When true and some entities are cached while others are not, only the missing + // entities are fetched from the subgraph. Cached entities are served directly. + // This is propagated from EntityCacheConfiguration during planning. + EnablePartialCacheLoad bool + + // UseL1Cache controls whether this fetch uses L1 (per-request) cache. + // Set by postprocessor based on whether a prior fetch can populate L1 + // for this entity type. Defaults to true for backward compatibility. + UseL1Cache bool + + // HashAnalyticsKeys controls whether entity keys are hashed (true) or stored raw (false) + // in cache analytics EntityFieldHash entries. Propagated from EntityCacheConfiguration. + HashAnalyticsKeys bool + + // KeyFields holds the full @key structure, pre-extracted at plan time. + // Used for entity source tracking during cache analytics. + KeyFields []KeyField + + // ShadowMode enables shadow caching for this fetch. + // When true, L2 cache reads and writes still occur, but cached data is never served. + // Fresh data is always fetched from the subgraph and compared against the cached value + // to detect staleness. L1 cache works normally (not affected by shadow mode). + ShadowMode bool + + // MutationEntityImpactConfig is set when this fetch is a mutation that returns a cached entity. + // Used by detectMutationEntityImpact() to proactively compare mutation response with L2 cache. + MutationEntityImpactConfig *MutationEntityImpactConfig + + // EnableMutationL2CachePopulation allows mutation entity fetches to write + // to the L2 cache. Propagated from MutationFieldCacheConfiguration. + // By default, mutations do NOT populate L2. + EnableMutationL2CachePopulation bool + + // MutationCacheTTLOverride overrides the entity TTL for mutation-triggered L2 writes. + // Propagated from MutationFieldCacheConfiguration.TTL. + // When zero, the entity's default TTL is used. + MutationCacheTTLOverride time.Duration + + // NegativeCacheTTL is the TTL for caching null entity results (entity not found). + // When > 0, null responses (entity returned null without errors) are cached to avoid + // repeated subgraph lookups for non-existent entities. + // When 0 (default), null entities are not cached. + NegativeCacheTTL time.Duration + + // PartialBatchLoad enables partial fetch mode for batch arguments (ArgumentIsEntityKey + list). + // When false (default), batch cache is all-or-nothing: any miss fetches the full list. + // When true, only missing IDs are fetched; cached entities are served directly. + PartialBatchLoad bool + // BatchEntityKeyArgumentPathHint describes the root-field argument that acts as the entity key list. + // This enables batch short-circuiting and partial variable filtering even when cache reads are disabled. + BatchEntityKeyArgumentPathHint []string + + // RequestScopedFields lists fields annotated with @requestScoped whose values are + // identical for all entities in a request. Each field participates in per-request + // L1 caching symmetrically: it can be injected from L1 (skipping the fetch) AND + // exported to L1 (populating the cache after a fetch). + RequestScopedFields []RequestScopedField +} + +// RequestScopedField describes a field that participates in per-request L1 caching. +// +// Symmetric model: every @requestScoped field is both a reader (inject from L1 +// before fetch) and a writer (export to L1 after fetch). There is no separate +// hint/export distinction. +// +// The L1 cache stores values in normalized form (schema field names + arg hashes). +// ProvidesData describes the shape the query expects AT THIS FETCH LOCATION, +// using response-side field names (aliases). The resolver uses ProvidesData for: +// - Injection: `validateItemHasRequiredData` + `structuralCopyProjected` +// - Export: `structuralCopyNormalized` (alias → schema name, arg → arg-hash) +type RequestScopedField struct { + // FieldName is the response key at the entity-fetch location (alias if present, + // else the schema field name). Used when writing the injected value onto entity items. + FieldName string + // FieldPath is the path in the response data (e.g. ["currentViewer"]). + // Uses response keys (aliases) as they appear in the current fetch's output. + FieldPath []string + // L1Key is the coordinate-based L1 cache key (e.g. "viewer.Personalized.currentViewer"). + L1Key string + // ProvidesData describes the field's value shape at this fetch location, + // including nested sub-fields, aliases, and arg variants. + ProvidesData *Object +} + +func (f FetchCacheConfiguration) isEntityFetch() bool { + if f.CacheKeyTemplate == nil { + return false + } + return f.CacheKeyTemplate.IsEntityFetch() +} + +func (f FetchCacheConfiguration) batchEntityKeyArgumentPath() []string { + if len(f.BatchEntityKeyArgumentPathHint) > 0 { + return f.BatchEntityKeyArgumentPathHint + } + if f.CacheKeyTemplate == nil { + return nil + } + return f.CacheKeyTemplate.BatchEntityKeyArgumentPath() +} + +func (f FetchCacheConfiguration) hasBatchEntityKey() bool { + return len(f.batchEntityKeyArgumentPath()) > 0 +} + +func (f FetchCacheConfiguration) entityMergePath(postProcessing PostProcessingConfiguration) []string { + if f.CacheKeyTemplate == nil { + return nil + } + return f.CacheKeyTemplate.EntityMergePath(postProcessing) +} + +// MutationEntityImpactConfig holds information for detecting entity cache changes from mutations. +// Set at plan time when a mutation returns a federation entity with L2 caching configured. +type MutationEntityImpactConfig struct { + EntityTypeName string // "User" + KeyFields []KeyField // [{Name: "id"}] + CacheName string // "default" + IncludeSubgraphHeaderPrefix bool + // InvalidateCache when true causes the L2 cache entry for this entity to be deleted + // after the mutation completes. Configured per mutation field via MutationCacheInvalidationConfiguration. + InvalidateCache bool + // PopulateCache when true causes the L2 cache entry for this entity to be written + // directly from the mutation response payload after the mutation completes. Use case: + // `@cachePopulate` on a single-subgraph mutation that returns the full entity, where + // no follow-up entity fetch exists to inherit EnableMutationL2CachePopulation. + // Mutually informative with InvalidateCache (a single mutation field is annotated with + // only one or the other in composition). + PopulateCache bool + // PopulateTTL is the TTL to use when writing under PopulateCache. When zero the cache + // implementation's default TTL applies. + PopulateTTL time.Duration +} + // FetchDependency explains how a GraphCoordinate depends on other GraphCoordinates from other fetches type FetchDependency struct { // Coordinate is the type+field which depends on one or more FetchDependencyOrigin @@ -363,6 +558,7 @@ type FetchInfo struct { // with the request to the subgraph as part of the "fetch_reason" extension. // Specifically, it is created only for fields stored in the DataSource.RequireFetchReasons(). PropagatedFetchReasons []FetchReason + ProvidesData *Object } type GraphCoordinate struct { @@ -384,6 +580,7 @@ type DataSourceLoadTrace struct { SingleFlightSharedResponse bool `json:"single_flight_shared_response"` LoadSkipped bool `json:"load_skipped"` LoadStats *LoadStats `json:"load_stats,omitempty"` + CacheTrace *CacheTrace `json:"cache_trace,omitempty"` Path string `json:"-"` } diff --git a/v2/pkg/engine/resolve/fetch_configuration_equals_test.go b/v2/pkg/engine/resolve/fetch_configuration_equals_test.go new file mode 100644 index 0000000000..396eba15f9 --- /dev/null +++ b/v2/pkg/engine/resolve/fetch_configuration_equals_test.go @@ -0,0 +1,125 @@ +package resolve + +import ( + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +// TestFetchConfigurationEquals_CachingDifference verifies that FetchCacheConfiguration.Equals +// detects differences in every compared field. The field count guard ensures that adding a new +// field to FetchCacheConfiguration forces an update to both Equals() and this test. +func TestFetchConfigurationEquals_CachingDifference(t *testing.T) { + base := FetchConfiguration{ + Input: `{"query":"{ user { id } }"}`, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + IncludeSubgraphHeaderPrefix: true, + EnablePartialCacheLoad: true, + ShadowMode: false, + EnableMutationL2CachePopulation: false, + MutationCacheTTLOverride: 0, + NegativeCacheTTL: 0, + }, + } + + tests := []struct { + name string + mutate func(fc *FetchConfiguration) + }{ + { + name: "Enabled differs", + mutate: func(fc *FetchConfiguration) { + fc.Caching.Enabled = false + }, + }, + { + name: "CacheName differs", + mutate: func(fc *FetchConfiguration) { + fc.Caching.CacheName = "other" + }, + }, + { + name: "TTL differs", + mutate: func(fc *FetchConfiguration) { + fc.Caching.TTL = 60 * time.Second + }, + }, + { + name: "IncludeSubgraphHeaderPrefix differs", + mutate: func(fc *FetchConfiguration) { + fc.Caching.IncludeSubgraphHeaderPrefix = false + }, + }, + { + name: "EnablePartialCacheLoad differs", + mutate: func(fc *FetchConfiguration) { + fc.Caching.EnablePartialCacheLoad = false + }, + }, + { + name: "ShadowMode differs", + mutate: func(fc *FetchConfiguration) { + fc.Caching.ShadowMode = true + }, + }, + { + name: "EnableMutationL2CachePopulation differs", + mutate: func(fc *FetchConfiguration) { + fc.Caching.EnableMutationL2CachePopulation = true + }, + }, + { + name: "MutationCacheTTLOverride differs", + mutate: func(fc *FetchConfiguration) { + fc.Caching.MutationCacheTTLOverride = 10 * time.Second + }, + }, + { + name: "NegativeCacheTTL differs", + mutate: func(fc *FetchConfiguration) { + fc.Caching.NegativeCacheTTL = 5 * time.Second + }, + }, + { + name: "PartialBatchLoad differs", + mutate: func(fc *FetchConfiguration) { + fc.Caching.PartialBatchLoad = true + }, + }, + { + name: "BatchEntityKeyArgumentPathHint differs", + mutate: func(fc *FetchConfiguration) { + fc.Caching.BatchEntityKeyArgumentPathHint = []string{"upcs"} + }, + }, + } + + // Fields intentionally not compared by Equals (not relevant for fetch deduplication): + // CacheKeyTemplate, RootFieldL1EntityCacheKeyTemplates, UseL1Cache, + // HashAnalyticsKeys, KeyFields, MutationEntityImpactConfig, + // RequestScopedFields + skippedFields := 7 + + totalFields := reflect.TypeFor[FetchCacheConfiguration]().NumField() + assert.Equal(t, totalFields, len(tests)+skippedFields, + "FetchCacheConfiguration has %d fields but test covers %d and skips %d — update this test and Equals() for new fields", + totalFields, len(tests), skippedFields) + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + other := base // copy + tc.mutate(&other) + assert.False(t, base.Equals(&other), "expected Equals to return false when %s", tc.name) + }) + } + + t.Run("identical configs are equal", func(t *testing.T) { + other := base // copy + assert.True(t, base.Equals(&other)) + }) +} diff --git a/v2/pkg/engine/resolve/inbound_request_singleflight_test.go b/v2/pkg/engine/resolve/inbound_request_singleflight_test.go index 8198b8723d..805e9dfb00 100644 --- a/v2/pkg/engine/resolve/inbound_request_singleflight_test.go +++ b/v2/pkg/engine/resolve/inbound_request_singleflight_test.go @@ -37,7 +37,7 @@ func TestInboundSingleFlight_ConcurrentFollowerTimeout(t *testing.T) { var wg sync.WaitGroup wg.Add(numFollowers) - for i := 0; i < numFollowers; i++ { + for range numFollowers { go func() { defer wg.Done() ctx, cancel := context.WithCancel(context.Background()) @@ -78,10 +78,8 @@ func TestInboundSingleFlight_FollowerReceivesLeaderError(t *testing.T) { // The follower calls GetOrCreate which blocks on inflight.Done. // We wait for followerCount to confirm it has entered before calling FinishErr. var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { followerCtx := NewContext(context.Background()) followerCtx.Request.ID = 2 @@ -89,7 +87,7 @@ func TestInboundSingleFlight_FollowerReceivesLeaderError(t *testing.T) { if followerErr == nil { t.Error("expected error from follower after leader FinishErr") } - }() + }) // Poll until the follower has actually registered inside GetOrCreate. deadline := time.After(3 * time.Second) diff --git a/v2/pkg/engine/resolve/l1_cache_normalize_test.go b/v2/pkg/engine/resolve/l1_cache_normalize_test.go new file mode 100644 index 0000000000..f41c1b8626 --- /dev/null +++ b/v2/pkg/engine/resolve/l1_cache_normalize_test.go @@ -0,0 +1,762 @@ +package resolve + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" +) + +// TestL1Cache_ValidateFieldDataWithAliases verifies that field validation uses the +// original (non-aliased) name when checking normalized cache data. +func TestL1Cache_ValidateFieldDataWithAliases(t *testing.T) { + t.Run("validates using original name on normalized data", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{ + jsonArena: ar, + } + + field := &Field{ + Name: []byte("userName"), + OriginalName: []byte("username"), + Value: &Scalar{}, + } + + // Cache data is normalized (uses original name "username") + item := mustParseJSON(ar, `{"username":"Alice"}`) + + result := loader.validateFieldData(item, field) + // Validates using original name from normalized cache data + assert.True(t, result) + }) + + t.Run("fails when original name missing from cached data", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{ + jsonArena: ar, + } + + field := &Field{ + Name: []byte("userName"), + OriginalName: []byte("username"), + Value: &Scalar{}, + } + + // Cache data doesn't have "username" + item := mustParseJSON(ar, `{"realName":"Alice"}`) + + result := loader.validateFieldData(item, field) + // Missing original field name in cache data + assert.False(t, result) + }) +} + +// TestL1Cache_ProjectedCopyWithAliases verifies that projected copy reads from the +// original field name in cache and writes to the alias name in the output. +func TestL1Cache_ProjectedCopyWithAliases(t *testing.T) { + t.Run("reads original name writes alias", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{ + jsonArena: ar, + } + + obj := &Object{ + HasAliases: true, + Fields: []*Field{ + {Name: []byte("userName"), OriginalName: []byte("username"), Value: &Scalar{}}, + }, + } + + // Cache stores data with original field name + cached := mustParseJSON(ar, `{"username":"Alice"}`) + result := loader.structuralCopyProjected(cached, obj) + + resultJSON := string(result.MarshalTo(nil)) + assert.Equal(t, `{"userName":"Alice"}`, resultJSON) + }) +} + +// TestL1Cache_ComputeHasAliases verifies detection of aliased fields at any depth +// in the response plan tree, used to decide if normalize/denormalize is needed. +func TestL1Cache_ComputeHasAliases(t *testing.T) { + t.Run("no aliases", func(t *testing.T) { + obj := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{}}, + {Name: []byte("name"), Value: &Scalar{}}, + }, + } + result := ComputeHasAliases(obj) + assert.False(t, result) + assert.False(t, obj.HasAliases) + }) + + t.Run("direct alias", func(t *testing.T) { + obj := &Object{ + Fields: []*Field{ + {Name: []byte("myId"), OriginalName: []byte("id"), Value: &Scalar{}}, + }, + } + result := ComputeHasAliases(obj) + assert.True(t, result) + assert.True(t, obj.HasAliases) + }) + + t.Run("nested alias", func(t *testing.T) { + innerObj := &Object{ + Fields: []*Field{ + {Name: []byte("n"), OriginalName: []byte("name"), Value: &Scalar{}}, + }, + } + obj := &Object{ + Fields: []*Field{ + {Name: []byte("product"), Value: innerObj}, + }, + } + result := ComputeHasAliases(obj) + assert.True(t, result) + assert.True(t, obj.HasAliases) + assert.True(t, innerObj.HasAliases) + }) + + t.Run("alias in array item", func(t *testing.T) { + innerObj := &Object{ + Fields: []*Field{ + {Name: []byte("n"), OriginalName: []byte("name"), Value: &Scalar{}}, + }, + } + obj := &Object{ + Fields: []*Field{ + {Name: []byte("items"), Value: &Array{Item: innerObj}}, + }, + } + result := ComputeHasAliases(obj) + assert.True(t, result) + assert.True(t, obj.HasAliases) + }) +} + +// TestPopulateL1CacheForRootFieldEntities_MissingKeyFields verifies that root field +// entity population skips entities that are missing @key fields. +// When the client's query doesn't select the @key fields (e.g., "id"), RenderCacheKeys +// produces a key with empty key object (e.g., {"__typename":"Product","key":{}}). +// These degraded keys would collide for all entities of the same type, so we skip storage. +func TestL1Cache_PopulateRootFieldEntities_MissingKeyFields(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.Variables = astjson.MustParse(`{}`) + + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + // Set response data: entity with __typename but missing @key field "id" + resolvable.data, err = astjson.ParseBytesWithArena(ar, []byte(`{"topProducts":[{"__typename":"Product","name":"Widget"}]}`)) + require.NoError(t, err) + + l1Cache := map[string]*astjson.Value{} + + l := &Loader{ + jsonArena: ar, + ctx: ctx, + resolvable: resolvable, + l1Cache: l1Cache, + } + + // Template expects @key field "id" which is NOT in the entity data. + // Path points to where entities live in the response. + entityTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Path: []string{"topProducts"}, + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + fetchItem := &FetchItem{ + Fetch: &SingleFetch{ + FetchConfiguration: FetchConfiguration{ + Caching: FetchCacheConfiguration{ + Enabled: true, + UseL1Cache: true, + RootFieldL1EntityCacheKeyTemplates: map[string]CacheKeyTemplate{ + "topProducts:Product": entityTemplate, + }, + }, + }, + Info: &FetchInfo{ + RootFields: []GraphCoordinate{ + {TypeName: "Query", FieldName: "topProducts"}, + }, + }, + }, + } + + l.populateL1CacheForRootFieldEntities(fetchItem) + + // Entity should NOT be stored because key fields are missing. + // A degraded key like {"__typename":"Product","key":{}} would collide for all + // Product entities, so populateL1CacheForRootFieldEntities skips storage. + degradedKey := `{"__typename":"Product","key":{}}` + _, loaded := l1Cache[degradedKey] + // Entity with missing @key fields should not be stored + assert.False(t, loaded) + + // A proper entity cache key won't find anything either + _, loaded = l1Cache[`{"__typename":"Product","key":{"id":"123"}}`] + // Proper entity key should not find the degraded entry + assert.False(t, loaded) +} + +func mustParseJSON(a arena.Arena, jsonStr string) *astjson.Value { + v, err := astjson.ParseBytesWithArena(a, []byte(jsonStr)) + if err != nil { + panic(err) + } + return v +} + +// --- P1: validateItemHasRequiredData unit tests --- + +// TestL1Cache_ValidateItemHasRequiredData exercises all branches of field validation: +// missing fields, null on nullable/non-nullable, nested objects, arrays, and CacheArgs. +// Without correct validation, stale or incomplete cache entries would be served. +func TestL1Cache_ValidateItemHasRequiredData(t *testing.T) { + t.Run("nil item returns false", func(t *testing.T) { + loader := &Loader{} + obj := &Object{Fields: []*Field{{Name: []byte("id"), Value: &Scalar{}}}} + assert.False(t, loader.validateItemHasRequiredData(nil, obj)) + }) + + t.Run("all required scalar fields present", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + obj := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{}}, + {Name: []byte("name"), Value: &Scalar{}}, + }, + } + item := mustParseJSON(ar, `{"id":"1","name":"Alice"}`) + assert.True(t, loader.validateItemHasRequiredData(item, obj)) + }) + + t.Run("missing required field", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + obj := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{}}, + {Name: []byte("name"), Value: &Scalar{}}, + }, + } + item := mustParseJSON(ar, `{"id":"1"}`) + assert.False(t, loader.validateItemHasRequiredData(item, obj)) + }) + + t.Run("null value for non-nullable scalar", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + obj := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Nullable: false}}, + }, + } + item := mustParseJSON(ar, `{"id":null}`) + assert.False(t, loader.validateItemHasRequiredData(item, obj)) + }) + + t.Run("null value for nullable scalar", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + obj := &Object{ + Fields: []*Field{ + {Name: []byte("email"), Value: &Scalar{Nullable: true}}, + }, + } + item := mustParseJSON(ar, `{"email":null}`) + assert.True(t, loader.validateItemHasRequiredData(item, obj)) + }) + + t.Run("nested object with all fields", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + innerObj := &Object{ + Fields: []*Field{ + {Name: []byte("street"), Value: &Scalar{}}, + }, + } + obj := &Object{ + Fields: []*Field{ + {Name: []byte("address"), Value: innerObj}, + }, + } + item := mustParseJSON(ar, `{"address":{"street":"Main St"}}`) + assert.True(t, loader.validateItemHasRequiredData(item, obj)) + }) + + t.Run("nested object missing required field", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + innerObj := &Object{ + Fields: []*Field{ + {Name: []byte("street"), Value: &Scalar{}}, + {Name: []byte("city"), Value: &Scalar{}}, + }, + } + obj := &Object{ + Fields: []*Field{ + {Name: []byte("address"), Value: innerObj}, + }, + } + item := mustParseJSON(ar, `{"address":{"street":"Main St"}}`) + assert.False(t, loader.validateItemHasRequiredData(item, obj)) + }) + + t.Run("null for non-nullable object", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + innerObj := &Object{ + Nullable: false, + Fields: []*Field{{Name: []byte("street"), Value: &Scalar{}}}, + } + obj := &Object{ + Fields: []*Field{ + {Name: []byte("address"), Value: innerObj}, + }, + } + item := mustParseJSON(ar, `{"address":null}`) + assert.False(t, loader.validateItemHasRequiredData(item, obj)) + }) + + t.Run("null for nullable object", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + innerObj := &Object{ + Nullable: true, + Fields: []*Field{{Name: []byte("street"), Value: &Scalar{}}}, + } + obj := &Object{ + Fields: []*Field{ + {Name: []byte("address"), Value: innerObj}, + }, + } + item := mustParseJSON(ar, `{"address":null}`) + assert.True(t, loader.validateItemHasRequiredData(item, obj)) + }) + + t.Run("non-object value for object field", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + innerObj := &Object{ + Fields: []*Field{{Name: []byte("street"), Value: &Scalar{}}}, + } + obj := &Object{ + Fields: []*Field{ + {Name: []byte("address"), Value: innerObj}, + }, + } + item := mustParseJSON(ar, `{"address":"not-an-object"}`) + assert.False(t, loader.validateItemHasRequiredData(item, obj)) + }) + + t.Run("array with all valid items", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + arr := &Array{ + Item: &Scalar{}, + } + obj := &Object{ + Fields: []*Field{ + {Name: []byte("tags"), Value: arr}, + }, + } + item := mustParseJSON(ar, `{"tags":["a","b","c"]}`) + assert.True(t, loader.validateItemHasRequiredData(item, obj)) + }) + + t.Run("array with invalid item - non-nullable scalar null", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + arr := &Array{ + Item: &Scalar{Nullable: false}, + } + obj := &Object{ + Fields: []*Field{ + {Name: []byte("tags"), Value: arr}, + }, + } + item := mustParseJSON(ar, `{"tags":["a",null,"c"]}`) + assert.False(t, loader.validateItemHasRequiredData(item, obj)) + }) + + t.Run("array with nullable items allows null", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + arr := &Array{ + Item: &Scalar{Nullable: true}, + } + obj := &Object{ + Fields: []*Field{ + {Name: []byte("tags"), Value: arr}, + }, + } + item := mustParseJSON(ar, `{"tags":["a",null,"c"]}`) + assert.True(t, loader.validateItemHasRequiredData(item, obj)) + }) + + t.Run("null for non-nullable array", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + arr := &Array{ + Nullable: false, + Item: &Scalar{}, + } + obj := &Object{ + Fields: []*Field{ + {Name: []byte("tags"), Value: arr}, + }, + } + item := mustParseJSON(ar, `{"tags":null}`) + assert.False(t, loader.validateItemHasRequiredData(item, obj)) + }) + + t.Run("null for nullable array", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + arr := &Array{ + Nullable: true, + Item: &Scalar{}, + } + obj := &Object{ + Fields: []*Field{ + {Name: []byte("tags"), Value: arr}, + }, + } + item := mustParseJSON(ar, `{"tags":null}`) + assert.True(t, loader.validateItemHasRequiredData(item, obj)) + }) + + t.Run("non-array value for array field", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + arr := &Array{Item: &Scalar{}} + obj := &Object{ + Fields: []*Field{ + {Name: []byte("tags"), Value: arr}, + }, + } + item := mustParseJSON(ar, `{"tags":"not-an-array"}`) + assert.False(t, loader.validateItemHasRequiredData(item, obj)) + }) + + t.Run("empty array is valid", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + arr := &Array{Item: &Scalar{}} + obj := &Object{ + Fields: []*Field{ + {Name: []byte("tags"), Value: arr}, + }, + } + item := mustParseJSON(ar, `{"tags":[]}`) + assert.True(t, loader.validateItemHasRequiredData(item, obj)) + }) + + t.Run("array of objects with valid items", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + itemObj := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{}}, + }, + } + arr := &Array{Item: itemObj} + obj := &Object{ + Fields: []*Field{ + {Name: []byte("items"), Value: arr}, + }, + } + item := mustParseJSON(ar, `{"items":[{"id":"1"},{"id":"2"}]}`) + assert.True(t, loader.validateItemHasRequiredData(item, obj)) + }) + + t.Run("array of objects with invalid item", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + itemObj := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{}}, + {Name: []byte("name"), Value: &Scalar{}}, + }, + } + arr := &Array{Item: itemObj} + obj := &Object{ + Fields: []*Field{ + {Name: []byte("items"), Value: arr}, + }, + } + item := mustParseJSON(ar, `{"items":[{"id":"1","name":"ok"},{"id":"2"}]}`) + assert.False(t, loader.validateItemHasRequiredData(item, obj)) + }) + + t.Run("field with CacheArgs uses suffixed name for lookup", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + ctx := NewContext(t.Context()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"first":"5"}`)) + loader := &Loader{jsonArena: ar, ctx: ctx} + + // Field has CacheArgs, so validation should look for "friends_" not "friends" + field := &Field{ + Name: []byte("friends"), + Value: &Scalar{}, + CacheArgs: []CacheFieldArg{ + {ArgName: "first", VariableName: "first"}, + }, + } + + // Compute expected suffixed name + suffix := loader.computeArgSuffix(field.CacheArgs) + expectedKey := "friends" + suffix + + // Item has the suffixed field name (as normalize would produce) + itemJSON := `{"` + expectedKey + `":"value"}` + item := mustParseJSON(ar, itemJSON) + + obj := &Object{Fields: []*Field{field}} + assert.True(t, loader.validateItemHasRequiredData(item, obj)) + }) + + t.Run("field with CacheArgs fails when only base name present", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + ctx := NewContext(t.Context()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"first":"5"}`)) + loader := &Loader{jsonArena: ar, ctx: ctx} + + field := &Field{ + Name: []byte("friends"), + Value: &Scalar{}, + CacheArgs: []CacheFieldArg{ + {ArgName: "first", VariableName: "first"}, + }, + } + + // Item has only the base name "friends" without suffix + item := mustParseJSON(ar, `{"friends":"value"}`) + + obj := &Object{Fields: []*Field{field}} + assert.False(t, loader.validateItemHasRequiredData(item, obj)) + }) + + t.Run("array with nil Item spec is valid if array exists", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + arr := &Array{Item: nil} + obj := &Object{ + Fields: []*Field{ + {Name: []byte("tags"), Value: arr}, + }, + } + item := mustParseJSON(ar, `{"tags":["a","b"]}`) + assert.True(t, loader.validateItemHasRequiredData(item, obj)) + }) +} + +// --- P3: computeArgSuffix unit tests --- + +// TestL1Cache_ComputeArgSuffix verifies that field argument hashing produces +// deterministic, collision-resistant suffixes for cache key disambiguation. +// Without this, different argument values would share the same cache entry. +func TestL1Cache_ComputeArgSuffix(t *testing.T) { + t.Run("single arg produces deterministic suffix", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + ctx := NewContext(t.Context()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"a":"5"}`)) + loader := &Loader{jsonArena: ar, ctx: ctx} + + suffix1 := loader.computeArgSuffix([]CacheFieldArg{{ArgName: "first", VariableName: "a"}}) + suffix2 := loader.computeArgSuffix([]CacheFieldArg{{ArgName: "first", VariableName: "a"}}) + + assert.Equal(t, suffix1, suffix2) + assert.Equal(t, 17, len(suffix1)) + assert.Equal(t, byte('_'), suffix1[0]) + }) + + t.Run("different values produce different suffixes", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + ctx := NewContext(t.Context()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"a":"5","b":"10"}`)) + loader := &Loader{jsonArena: ar, ctx: ctx} + + suffix1 := loader.computeArgSuffix([]CacheFieldArg{{ArgName: "first", VariableName: "a"}}) + suffix2 := loader.computeArgSuffix([]CacheFieldArg{{ArgName: "first", VariableName: "b"}}) + + assert.NotEqual(t, suffix1, suffix2) + }) + + t.Run("null variable produces null in hash", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + ctx := NewContext(t.Context()) + ctx.Variables = astjson.MustParseBytes([]byte(`{}`)) + loader := &Loader{jsonArena: ar, ctx: ctx} + + // Variable "missing" doesn't exist, so argValue is nil → "null" written + suffix := loader.computeArgSuffix([]CacheFieldArg{{ArgName: "first", VariableName: "missing"}}) + assert.Equal(t, 17, len(suffix)) + }) + + t.Run("null variable differs from string null", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + ctx := NewContext(t.Context()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"a":null,"b":"null"}`)) + loader := &Loader{jsonArena: ar, ctx: ctx} + + suffixNull := loader.computeArgSuffix([]CacheFieldArg{{ArgName: "first", VariableName: "a"}}) + suffixMissing := loader.computeArgSuffix([]CacheFieldArg{{ArgName: "first", VariableName: "missing"}}) + + // Both json null and missing variable produce "null" in the hash, + // so they should be equal + // Both json null and missing variable produce "null" in the hash + assert.Equal(t, suffixNull, suffixMissing) + }) + + t.Run("unsorted args get sorted before hashing", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + ctx := NewContext(t.Context()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"a":"1","b":"2"}`)) + loader := &Loader{jsonArena: ar, ctx: ctx} + + sorted := []CacheFieldArg{ + {ArgName: "alpha", VariableName: "a"}, + {ArgName: "beta", VariableName: "b"}, + } + unsorted := []CacheFieldArg{ + {ArgName: "beta", VariableName: "b"}, + {ArgName: "alpha", VariableName: "a"}, + } + + suffixSorted := loader.computeArgSuffix(sorted) + suffixUnsorted := loader.computeArgSuffix(unsorted) + + assert.Equal(t, suffixSorted, suffixUnsorted) + }) + + t.Run("RemapVariables applied before lookup", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + ctx := NewContext(t.Context()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"original":"42"}`)) + ctx.RemapVariables = map[string]string{"remapped": "original"} + loader := &Loader{jsonArena: ar, ctx: ctx} + + // "remapped" maps to "original" which has value "42" + suffixRemapped := loader.computeArgSuffix([]CacheFieldArg{{ArgName: "first", VariableName: "remapped"}}) + // "original" has value "42" directly + suffixDirect := loader.computeArgSuffix([]CacheFieldArg{{ArgName: "first", VariableName: "original"}}) + + assert.Equal(t, suffixRemapped, suffixDirect) + }) + + t.Run("object arg produces deterministic hash regardless of key order", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + ctx1 := NewContext(t.Context()) + ctx1.Variables = astjson.MustParseBytes([]byte(`{"filter":{"name":"Alice","age":30}}`)) + loader1 := &Loader{jsonArena: ar, ctx: ctx1} + + ctx2 := NewContext(t.Context()) + ctx2.Variables = astjson.MustParseBytes([]byte(`{"filter":{"age":30,"name":"Alice"}}`)) + loader2 := &Loader{jsonArena: ar, ctx: ctx2} + + suffix1 := loader1.computeArgSuffix([]CacheFieldArg{{ArgName: "filter", VariableName: "filter"}}) + suffix2 := loader2.computeArgSuffix([]CacheFieldArg{{ArgName: "filter", VariableName: "filter"}}) + + // Object key order should not affect hash (canonical JSON) + assert.Equal(t, suffix1, suffix2) + }) +} + +// --- P4: mergeEntityFields unit tests --- + +// TestL1Cache_MergeEntityFields verifies that merging entity data from a new fetch +// into an existing L1 cache entry adds new fields without overwriting existing ones. +func TestL1Cache_MergeEntityFields(t *testing.T) { + t.Run("new field added to existing entity", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + + dst := mustParseJSON(ar, `{"id":"1","name":"Alice"}`) + src := mustParseJSON(ar, `{"id":"1","email":"alice@example.com"}`) + + loader.mergeEntityFields(dst, src) + + resultJSON := string(dst.MarshalTo(nil)) + assert.Equal(t, `{"id":"1","name":"Alice","email":"alice@example.com"}`, resultJSON) + }) + + t.Run("existing field preserved not overwritten", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + + dst := mustParseJSON(ar, `{"id":"1","name":"Alice"}`) + src := mustParseJSON(ar, `{"id":"1","name":"Bob"}`) + + loader.mergeEntityFields(dst, src) + + resultJSON := string(dst.MarshalTo(nil)) + // Existing field preserved, not overwritten + assert.Equal(t, `{"id":"1","name":"Alice"}`, resultJSON) + }) + + t.Run("nil dst is no-op", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + src := mustParseJSON(ar, `{"id":"1"}`) + // Should not panic + loader.mergeEntityFields(nil, src) + }) + + t.Run("nil src is no-op", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + dst := mustParseJSON(ar, `{"id":"1"}`) + loader.mergeEntityFields(dst, nil) + resultJSON := string(dst.MarshalTo(nil)) + assert.Equal(t, `{"id":"1"}`, resultJSON) + }) + + t.Run("non-object type is no-op", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + dst := mustParseJSON(ar, `"string-value"`) + src := mustParseJSON(ar, `{"id":"1"}`) + // Should not panic + loader.mergeEntityFields(dst, src) + }) + + t.Run("multiple new and existing fields coexist", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{jsonArena: ar} + + dst := mustParseJSON(ar, `{"id":"1","name":"Alice","age":30}`) + src := mustParseJSON(ar, `{"id":"1","email":"a@b.com","role":"admin","name":"Bob"}`) + + loader.mergeEntityFields(dst, src) + + result := dst + // Existing fields preserved + assert.Equal(t, `"1"`, string(result.Get("id").MarshalTo(nil))) + assert.Equal(t, `"Alice"`, string(result.Get("name").MarshalTo(nil))) + assert.Equal(t, `30`, string(result.Get("age").MarshalTo(nil))) + // New fields added + assert.Equal(t, `"a@b.com"`, string(result.Get("email").MarshalTo(nil))) + assert.Equal(t, `"admin"`, string(result.Get("role").MarshalTo(nil))) + }) +} diff --git a/v2/pkg/engine/resolve/l1_cache_test.go b/v2/pkg/engine/resolve/l1_cache_test.go new file mode 100644 index 0000000000..47f2c524ab --- /dev/null +++ b/v2/pkg/engine/resolve/l1_cache_test.go @@ -0,0 +1,1538 @@ +package resolve + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/go-arena" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/fastjsonext" +) + +// TestL1Cache tests the L1 (per-request, in-memory) entity cache functionality. +// L1 cache stores pointers to entities in the jsonArena, allowing reuse within a single request. +// It only applies to entity fetches (not root fetches) since root fields have no prior entity data. + +// TestL1Cache_SameEntityDeduplication verifies that when the same entity is fetched +// twice within a single request, the second fetch is served from L1 cache. +// Without this, duplicate entity fetches would hit the subgraph unnecessarily. +func TestL1Cache_SameEntityDeduplication(t *testing.T) { + t.Run("L1 hit - same entity fetched twice in same request", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Root datasource - returns initial data + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + // First entity fetch - should be called + entityDS1 := NewMockDataSource(ctrl) + entityDS1.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil + }).Times(1) + + // Second entity fetch - should NOT be called (L1 hit) + entityDS2 := NewMockDataSource(ctrl) + entityDS2.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Times(0) // L1 should prevent this call + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + { + Name: []byte("__typename"), + Value: &String{ + Path: []string{"__typename"}, + }, + }, + { + Name: []byte("id"), + Value: &String{ + Path: []string{"id"}, + }, + }, + }, + }), + } + + providesData := &Object{ + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Scalar{ + Path: []string{"id"}, + Nullable: false, + }, + }, + { + Name: []byte("name"), + Value: &Scalar{ + Path: []string{"name"}, + Nullable: false, + }, + }, + }, + } + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Fetches: Sequence( + // Root fetch + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{product {__typename id}}"}}`), + SegmentType: StaticSegmentType, + }, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + + // First entity fetch - populates L1 cache + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS1, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://products.service","body":{"query":"...","variables":{"representations":[`), + SegmentType: StaticSegmentType, + }, + { + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + }, + { + Data: []byte(`]}}}`), + SegmentType: StaticSegmentType, + }, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + + // Second entity fetch for SAME entity - should hit L1 cache + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS2, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://products.service","body":{"query":"...","variables":{"representations":[`), + SegmentType: StaticSegmentType, + }, + { + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + }, + { + Data: []byte(`]}}}`), + SegmentType: StaticSegmentType, + }, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }, + }, + }, + }, + }, + } + + // Create loader WITHOUT L2 cache - only L1 + loader := &Loader{} + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + // L2 disabled - testing L1 only + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1","name":"Product One"}}}`, out) + }) + + t.Run("L1 disabled - each entity fetch goes to subgraph", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Root datasource + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + // Entity fetch - should be called TWICE (no L1 cache) + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil + }).Times(2) // Called twice because L1 is disabled + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + }, + } + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + + // First entity fetch + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + + // Second entity fetch - should also be called (L1 disabled) + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }, + }, + }, + }, + }, + } + + loader := &Loader{} + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = false // L1 DISABLED + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1","name":"Product One"}}}`, out) + }) + + t.Run("L1 partial data - fetch needed when missing required fields", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Root datasource + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + // First entity fetch - only returns id and name + entityDS1 := NewMockDataSource(ctrl) + entityDS1.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil + }).Times(1) + + // Second entity fetch needs price field - L1 has partial data, so fetch is needed + entityDS2 := NewMockDataSource(ctrl) + entityDS2.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One","price":99.99}]}}`), nil + }).Times(1) // Should be called because L1 doesn't have price field + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesDataIdName := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + }, + } + + providesDataIdNamePrice := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + {Name: []byte("price"), Value: &Scalar{Path: []string{"price"}, Nullable: false}}, + }, + } + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + + // First entity fetch - provides id, name + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS1, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesDataIdName, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + + // Second entity fetch - needs id, name, price (partial miss) + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS2, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesDataIdNamePrice, // Needs price field + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + {Name: []byte("price"), Value: &Float{Path: []string{"price"}}}, + }, + }, + }, + }, + }, + } + + loader := &Loader{} + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1","name":"Product One","price":99.99}}}`, out) + }) +} + +// TestL1Cache_PartialLoading verifies that with EnablePartialCacheLoad=true, +// only cache-missed entities are fetched from the subgraph. +// Without this, a single cache miss would refetch ALL entities in the batch. +func TestL1Cache_PartialLoading(t *testing.T) { + t.Run("partial cache loading with L2 - only missing entities fetched", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + // Pre-populate cache with prod-1 only (prod-2 and prod-3 are NOT cached) + prod1Data := `{"__typename":"Product","id":"prod-1","name":"Cached Product One"}` + err := cache.Set(context.Background(), withCacheEntryTTL([]*CacheEntry{ + {Key: `{"__typename":"Product","key":{"id":"prod-1"}}`, Value: []byte(prod1Data)}, + }, 30*time.Second)) + require.NoError(t, err) + cache.ClearLog() + + // Root datasource - returns 3 products + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"topProducts":[{"__typename":"Product","id":"prod-1"},{"__typename":"Product","id":"prod-2"},{"__typename":"Product","id":"prod-3"}]}}`), nil + }).Times(1) + + // Batch entity fetch - WITH partial cache loading enabled + // Only prod-2 and prod-3 should be fetched (prod-1 is in L2 cache) + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + // Verify exact input - only prod-2 and prod-3, NOT prod-1 (cached) + expectedInput := `{"method":"POST","body":{"query":"...","variables":{"representations":[{"__typename":"Product","id":"prod-2"},{"__typename":"Product","id":"prod-3"}]}}}` + assert.Equal(t, expectedInput, string(input)) + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-2","name":"Fetched Product Two"},{"__typename":"Product","id":"prod-3","name":"Fetched Product Three"}]}}`), nil + }).Times(1) + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + }, + } + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Fetches: Sequence( + // Root fetch + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + + // Batch entity fetch - WITH EnablePartialCacheLoad + // Should only fetch prod-2 and prod-3 (prod-1 is in cache) + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","body":{"query":"...","variables":{"representations":[`), SegmentType: StaticSegmentType}, + }, + }, + Items: []InputTemplate{ + { + Segments: []TemplateSegment{ + { + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + }, + }, + }, + }, + Separator: InputTemplate{ + Segments: []TemplateSegment{{Data: []byte(`,`), SegmentType: StaticSegmentType}}, + }, + Footer: InputTemplate{ + Segments: []TemplateSegment{{Data: []byte(`]}}}`), SegmentType: StaticSegmentType}}, + }, + }, + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities"}, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + EnablePartialCacheLoad: true, // KEY: Enable partial loading + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.topProducts", ArrayPath("topProducts")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("topProducts"), + Value: &Array{ + Path: []string{"topProducts"}, + Item: &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }, + }, + }, + }, + }, + }, + } + + loader := &Loader{ + caches: map[string]LoaderCache{ + "default": cache, + }, + } + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err = resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + + // All 3 products should be in the result + // prod-1 should have the cached name, prod-2 and prod-3 should have fetched names + expectedOutput := `{"data":{"topProducts":[{"__typename":"Product","id":"prod-1","name":"Cached Product One"},{"__typename":"Product","id":"prod-2","name":"Fetched Product Two"},{"__typename":"Product","id":"prod-3","name":"Fetched Product Three"}]}}` + assert.Equal(t, expectedOutput, out) + }) + + t.Run("partial cache loading disabled with L2 - all entities fetched", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + // Pre-populate cache with prod-1 only + prod1Data := `{"__typename":"Product","id":"prod-1","name":"Cached Product One"}` + err := cache.Set(context.Background(), withCacheEntryTTL([]*CacheEntry{ + {Key: `{"__typename":"Product","key":{"id":"prod-1"}}`, Value: []byte(prod1Data)}, + }, 30*time.Second)) + require.NoError(t, err) + cache.ClearLog() + + // Root datasource - returns 3 products + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"topProducts":[{"__typename":"Product","id":"prod-1"},{"__typename":"Product","id":"prod-2"},{"__typename":"Product","id":"prod-3"}]}}`), nil + }).Times(1) + + // Batch entity fetch - WITHOUT partial cache loading (default) + // ALL 3 entities should be fetched + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + // Verify exact input - all 3 entities (partial loading disabled) + expectedInput := `{"method":"POST","body":{"query":"...","variables":{"representations":[{"__typename":"Product","id":"prod-1"},{"__typename":"Product","id":"prod-2"},{"__typename":"Product","id":"prod-3"}]}}}` + assert.Equal(t, expectedInput, string(input)) + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Fetched Product One"},{"__typename":"Product","id":"prod-2","name":"Fetched Product Two"},{"__typename":"Product","id":"prod-3","name":"Fetched Product Three"}]}}`), nil + }).Times(1) + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + }, + } + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","body":{"query":"...","variables":{"representations":[`), SegmentType: StaticSegmentType}, + }, + }, + Items: []InputTemplate{ + { + Segments: []TemplateSegment{ + { + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + }, + }, + }, + }, + Separator: InputTemplate{ + Segments: []TemplateSegment{{Data: []byte(`,`), SegmentType: StaticSegmentType}}, + }, + Footer: InputTemplate{ + Segments: []TemplateSegment{{Data: []byte(`]}}}`), SegmentType: StaticSegmentType}}, + }, + }, + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities"}, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + EnablePartialCacheLoad: false, // KEY: Partial loading DISABLED (default) + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.topProducts", ArrayPath("topProducts")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("topProducts"), + Value: &Array{ + Path: []string{"topProducts"}, + Item: &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }, + }, + }, + }, + }, + }, + } + + loader := &Loader{ + caches: map[string]LoaderCache{ + "default": cache, + }, + } + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err = resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + + // All 3 products should be in the result with fetched names (not cached) + expectedOutput := `{"data":{"topProducts":[{"__typename":"Product","id":"prod-1","name":"Fetched Product One"},{"__typename":"Product","id":"prod-2","name":"Fetched Product Two"},{"__typename":"Product","id":"prod-3","name":"Fetched Product Three"}]}}` + assert.Equal(t, expectedOutput, out) + }) +} + +// TestL1CachePartialLoadingL1Only tests partial cache loading using only L1 cache (no L2). +// This tests a realistic scenario where a batch entity fetch for nested entities +// encounters some entities that are already in L1 cache from a previous fetch. +// TestL1Cache_PartialLoadingL1Only verifies L1-only partial loading with duplicate +// nested entities. Duplicate authors across reviews should be served from L1 cache +// instead of re-fetching from the subgraph. +func TestL1Cache_PartialLoadingL1Only(t *testing.T) { + t.Run("L1 partial cache loading - duplicate entities from nested fetch", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Root datasource - returns products with reviews + // Each review has an author reference, some authors appear multiple times + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + // Product has 3 reviews: 2 by author-1, 1 by author-2 + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1","reviews":[{"body":"Great!","author":{"__typename":"User","id":"author-1"}},{"body":"Love it!","author":{"__typename":"User","id":"author-1"}},{"body":"Nice!","author":{"__typename":"User","id":"author-2"}}]}}}`), nil + }).Times(1) + + // First batch entity fetch - fetches ALL authors (author-1, author-1, author-2) + // This populates L1 cache with author-1 and author-2 + // Note: Due to deduplication in batch, author-1 appears once in the actual request + entityDS1 := NewMockDataSource(ctrl) + entityDS1.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + // Verify exact input - deduplicated to 2 unique authors + expectedInput := `{"method":"POST","body":{"query":"first author fetch","variables":{"representations":[{"__typename":"User","id":"author-1"},{"__typename":"User","id":"author-2"}]}}}` + assert.Equal(t, expectedInput, string(input)) + // Response for unique authors (deduplicated) + return []byte(`{"data":{"_entities":[{"__typename":"User","id":"author-1","username":"user1"},{"__typename":"User","id":"author-2","username":"user2"}]}}`), nil + }).Times(1) + + // Second batch entity fetch - WITH partial cache loading enabled + // This fetch requests all 3 author references again + // With partial loading: author-1 and author-2 are in L1 cache, no fetch needed + // Since ALL are cached, the fetch should be skipped entirely + entityDS2 := NewMockDataSource(ctrl) + entityDS2.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Times(0) // Should NOT be called - all authors are in L1 cache + + userCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + userProvidesData := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("username"), Value: &Scalar{Path: []string{"username"}, Nullable: false}}, + }, + } + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Fetches: Sequence( + // Root fetch - gets product with reviews and author references + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + + // First batch entity fetch - for authors (populates L1 cache) + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","body":{"query":"first author fetch","variables":{"representations":[`), SegmentType: StaticSegmentType}, + }, + }, + Items: []InputTemplate{ + { + Segments: []TemplateSegment{ + { + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + }, + }, + }, + }, + Separator: InputTemplate{ + Segments: []TemplateSegment{{Data: []byte(`,`), SegmentType: StaticSegmentType}}, + }, + Footer: InputTemplate{ + Segments: []TemplateSegment{{Data: []byte(`]}}}`), SegmentType: StaticSegmentType}}, + }, + }, + DataSource: entityDS1, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities"}, + }, + Info: &FetchInfo{ + DataSourceID: "users", + DataSourceName: "users", + OperationType: ast.OperationTypeQuery, + ProvidesData: userProvidesData, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: userCacheKeyTemplate, + UseL1Cache: true, + // First fetch does NOT have partial loading - fetches all + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product.reviews.author", ObjectPath("product"), ArrayPath("reviews"), ObjectPath("author")), + + // Second batch entity fetch - WITH EnablePartialCacheLoad + // Should skip fetch entirely (all authors already in L1 cache) + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","body":{"query":"second author fetch","variables":{"representations":[`), SegmentType: StaticSegmentType}, + }, + }, + Items: []InputTemplate{ + { + Segments: []TemplateSegment{ + { + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + }, + }, + }, + }, + Separator: InputTemplate{ + Segments: []TemplateSegment{{Data: []byte(`,`), SegmentType: StaticSegmentType}}, + }, + Footer: InputTemplate{ + Segments: []TemplateSegment{{Data: []byte(`]}}}`), SegmentType: StaticSegmentType}}, + }, + }, + DataSource: entityDS2, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities"}, + }, + Info: &FetchInfo{ + DataSourceID: "users", + DataSourceName: "users", + OperationType: ast.OperationTypeQuery, + ProvidesData: userProvidesData, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: userCacheKeyTemplate, + UseL1Cache: true, + EnablePartialCacheLoad: true, // KEY: Enable partial loading + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product.reviews.author", ObjectPath("product"), ArrayPath("reviews"), ObjectPath("author")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + { + Name: []byte("reviews"), + Value: &Array{ + Path: []string{"reviews"}, + Item: &Object{ + Fields: []*Field{ + {Name: []byte("body"), Value: &String{Path: []string{"body"}}}, + { + Name: []byte("author"), + Value: &Object{ + Path: []string{"author"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("username"), Value: &String{Path: []string{"username"}}}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + // NO L2 cache - testing L1 only + loader := &Loader{} + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = false // L2 disabled + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + + // All authors should be in the result with usernames from first fetch + expectedOutput := `{"data":{"product":{"__typename":"Product","id":"prod-1","reviews":[{"body":"Great!","author":{"__typename":"User","id":"author-1","username":"user1"}},{"body":"Love it!","author":{"__typename":"User","id":"author-1","username":"user1"}},{"body":"Nice!","author":{"__typename":"User","id":"author-2","username":"user2"}}]}}}` + assert.Equal(t, expectedOutput, out) + }) +} + +// TestL1Cache_NestedEntitiesInFetchResponse verifies that nested entities within a +// fetch response are NOT extracted and cached in L1. Only the top-level fetched +// entity is cached. Without this boundary, stale nested data could be served. +func TestL1Cache_NestedEntitiesInFetchResponse(t *testing.T) { + t.Run("nested entities in entity fetch response are not populated in L1", func(t *testing.T) { + // When entity fetch 1 returns User u1 whose response contains a nested User u3 + // (via bestFriend), only u1 is stored in L1. The nested u3 is NOT extracted and + // cached separately. A subsequent entity fetch 2 for u3 must call the subgraph. + // + // If nested entity L1 population were implemented, entityDS2 would be Times(0) + // because u3 would already be in L1 from fetch 1's response. + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Root fetch - returns two user references at different paths + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"firstUser":{"__typename":"User","id":"u1"},"secondUser":{"__typename":"User","id":"u3"}}}`), nil + }).Times(1) + + // Entity fetch 1 - resolves User u1, response includes nested User u3 (bestFriend) + entityDS1 := NewMockDataSource(ctrl) + entityDS1.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"User","id":"u1","name":"Alice","bestFriend":{"__typename":"User","id":"u3","name":"Charlie"}}]}}`), nil + }).Times(1) + + // Entity fetch 2 - resolves User u3 + // Called because u3 is NOT in L1 (only u1 was cached from fetch 1) + entityDS2 := NewMockDataSource(ctrl) + entityDS2.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"User","id":"u3","name":"Charlie"}]}}`), nil + }).Times(1) // Would be Times(0) if nested entity L1 population were implemented + + userCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + userProvidesData := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + }, + } + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + + // Entity fetch 1: resolves u1 at firstUser path + // Response includes nested u3 as bestFriend, but only u1 is cached in L1 + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","body":{"query":"first fetch","variables":{"representations":[`), SegmentType: StaticSegmentType}, + }, + }, + Items: []InputTemplate{ + { + Segments: []TemplateSegment{ + { + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + }, + }, + }, + }, + Separator: InputTemplate{ + Segments: []TemplateSegment{{Data: []byte(`,`), SegmentType: StaticSegmentType}}, + }, + Footer: InputTemplate{ + Segments: []TemplateSegment{{Data: []byte(`]}}}`), SegmentType: StaticSegmentType}}, + }, + }, + DataSource: entityDS1, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities"}, + }, + Info: &FetchInfo{ + DataSourceID: "users", + DataSourceName: "users", + OperationType: ast.OperationTypeQuery, + ProvidesData: userProvidesData, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: userCacheKeyTemplate, + UseL1Cache: true, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.firstUser", ObjectPath("firstUser")), + + // Entity fetch 2: resolves u3 at secondUser path + // u3 appeared as nested entity in fetch 1's response but is NOT in L1 + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","body":{"query":"second fetch","variables":{"representations":[`), SegmentType: StaticSegmentType}, + }, + }, + Items: []InputTemplate{ + { + Segments: []TemplateSegment{ + { + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + }, + }, + }, + }, + Separator: InputTemplate{ + Segments: []TemplateSegment{{Data: []byte(`,`), SegmentType: StaticSegmentType}}, + }, + Footer: InputTemplate{ + Segments: []TemplateSegment{{Data: []byte(`]}}}`), SegmentType: StaticSegmentType}}, + }, + }, + DataSource: entityDS2, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities"}, + }, + Info: &FetchInfo{ + DataSourceID: "users", + DataSourceName: "users", + OperationType: ast.OperationTypeQuery, + ProvidesData: userProvidesData, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: userCacheKeyTemplate, + UseL1Cache: true, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.secondUser", ObjectPath("secondUser")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("firstUser"), + Value: &Object{ + Path: []string{"firstUser"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + { + Name: []byte("bestFriend"), + Value: &Object{ + Path: []string{"bestFriend"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }, + }, + }, + }, + }, + }, + { + Name: []byte("secondUser"), + Value: &Object{ + Path: []string{"secondUser"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }, + }, + }, + }, + }, + } + + loader := &Loader{} + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = false + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + expectedOutput := `{"data":{"firstUser":{"__typename":"User","id":"u1","name":"Alice","bestFriend":{"__typename":"User","id":"u3","name":"Charlie"}},"secondUser":{"__typename":"User","id":"u3","name":"Charlie"}}}` + assert.Equal(t, expectedOutput, out) + + // gomock verifies: entityDS1.Times(1) and entityDS2.Times(1) + // entityDS2 being called proves u3 (nested in fetch 1's response) was NOT cached in L1 + }) +} + +// TestL1Cache_UseL1CacheFlagDisabled verifies that UseL1Cache=false on a fetch +// bypasses L1 even when L1 is globally enabled. The postprocessor sets this flag +// when a fetch cannot benefit from L1 caching. +func TestL1Cache_UseL1CacheFlagDisabled(t *testing.T) { + t.Run("UseL1Cache=false bypasses L1 even when globally enabled", func(t *testing.T) { + // This test verifies that when UseL1Cache=false is set on a fetch, + // the L1 cache is bypassed even though L1 is globally enabled. + // This is the behavior set by the optimizeL1Cache postprocessor when + // a fetch cannot benefit from L1 caching. + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Root datasource + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + // Entity fetch - should be called TWICE because UseL1Cache=false + // even though L1 is globally enabled + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil + }).Times(2) // Called twice because UseL1Cache=false bypasses L1 + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + }, + } + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + + // First entity fetch - UseL1Cache=false + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: false, // Explicitly disabled + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + + // Second entity fetch - UseL1Cache=false, should NOT hit L1 + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: false, // Explicitly disabled + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST"}`), SegmentType: StaticSegmentType}, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }, + }, + }, + }, + }, + } + + loader := &Loader{} + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true // L1 globally ENABLED + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1","name":"Product One"}}}`, out) + + // Verify L1 cache stats show no hits (both fetches went to subgraph) + stats := ctx.GetCacheStats() + // No L1 reads when UseL1Cache=false + assert.Equal(t, 0, len(stats.L1Reads)) + }) +} diff --git a/v2/pkg/engine/resolve/l1_l2_cache_e2e_test.go b/v2/pkg/engine/resolve/l1_l2_cache_e2e_test.go new file mode 100644 index 0000000000..41baa417ae --- /dev/null +++ b/v2/pkg/engine/resolve/l1_l2_cache_e2e_test.go @@ -0,0 +1,1636 @@ +package resolve + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/fastjsonext" +) + +// TestL1L2CacheEndToEnd provides comprehensive end-to-end tests for the L1/L2 caching system. +// +// L1 Cache (Per-Request, In-Memory): +// - Stored in Loader as sync.Map +// - Lifecycle: Single GraphQL request +// - Only used for entity fetches (not root fetches) +// - Purpose: Prevents redundant fetches for same entity at different paths +// +// L2 Cache (External, Cross-Request): +// - Uses LoaderCache interface implementations +// - Lifecycle: Configured TTL, shared across requests +// - Applies to both root fetches and entity fetches +// +// Lookup Order (entity fetches): L1 -> L2 -> Subgraph Fetch +// Lookup Order (root fetches): L2 -> Subgraph Fetch (no L1) + +func TestL1L2CacheEndToEnd(t *testing.T) { + // ============================================================================= + // L1 CACHE ONLY TESTS + // ============================================================================= + + t.Run("L1 Only - entity reuse within same request", func(t *testing.T) { + // This test verifies that L1 cache prevents redundant entity fetches + // within a single request when the same entity appears at multiple paths. + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Root fetch - get product with minimal data + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + // First entity fetch - should be called (L1 miss) + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One","price":99.99}]}}`), nil + }).Times(1) + + // Second entity fetch for same entity - should NOT be called (L1 hit) + entityDS2 := NewMockDataSource(ctrl) + entityDS2.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Times(0) // L1 should prevent this call + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + {Name: []byte("price"), Value: &Scalar{Path: []string{"price"}, Nullable: false}}, + }, + } + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + // Root fetch + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{{Data: []byte(`{"method":"POST","url":"http://products"}`), SegmentType: StaticSegmentType}}, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + // First entity fetch + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{"method":"POST","url":"http://products","body":{"query":"entity1","variables":{"representations":[`), SegmentType: StaticSegmentType}}}, + Items: []InputTemplate{{Segments: []TemplateSegment{{SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: NewGraphQLVariableResolveRenderer(&Object{Fields: []*Field{{Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, {Name: []byte("id"), Value: &String{Path: []string{"id"}}}}})}}}}, + Footer: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`]}}}`), SegmentType: StaticSegmentType}}}, + }, + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{ + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + }, + }, "query.product", ObjectPath("product")), + // Second entity fetch (same entity at different path) + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{"method":"POST","url":"http://products","body":{"query":"entity2","variables":{"representations":[`), SegmentType: StaticSegmentType}}}, + Items: []InputTemplate{{Segments: []TemplateSegment{{SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: NewGraphQLVariableResolveRenderer(&Object{Fields: []*Field{{Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, {Name: []byte("id"), Value: &String{Path: []string{"id"}}}}})}}}}, + Footer: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`]}}}`), SegmentType: StaticSegmentType}}}, + }, + DataSource: entityDS2, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{ + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + }, + }, "query.product.related", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + {Name: []byte("price"), Value: &Float{Path: []string{"price"}}}, + }, + }, + }, + }, + }, + } + + loader := &Loader{} + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = false // L1 only + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1","name":"Product One","price":99.99}}}`, out) + }) + + t.Run("L1 Only - disabled means separate fetches", func(t *testing.T) { + // When L1 is disabled, same entity at different paths should trigger separate fetches + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Return([]byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil).Times(1) + + // Both entity fetches should be called when L1 is disabled + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Return([]byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil).Times(2) + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + }, + } + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{}`), SegmentType: StaticSegmentType}}}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{"body":{"representations":[`), SegmentType: StaticSegmentType}}}, + Items: []InputTemplate{{Segments: []TemplateSegment{{SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: NewGraphQLVariableResolveRenderer(&Object{Fields: []*Field{{Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, {Name: []byte("id"), Value: &String{Path: []string{"id"}}}}})}}}}, + Footer: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`]}}}`), SegmentType: StaticSegmentType}}}, + }, + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{DataSourceName: "products", OperationType: ast.OperationTypeQuery, ProvidesData: providesData}, + Caching: FetchCacheConfiguration{Enabled: true, CacheName: "default", CacheKeyTemplate: productCacheKeyTemplate, UseL1Cache: true}, + }, "query.product", ObjectPath("product")), + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{"body":{"representations":[`), SegmentType: StaticSegmentType}}}, + Items: []InputTemplate{{Segments: []TemplateSegment{{SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: NewGraphQLVariableResolveRenderer(&Object{Fields: []*Field{{Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, {Name: []byte("id"), Value: &String{Path: []string{"id"}}}}})}}}}, + Footer: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`]}}}`), SegmentType: StaticSegmentType}}}, + }, + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{DataSourceName: "products", OperationType: ast.OperationTypeQuery, ProvidesData: providesData}, + Caching: FetchCacheConfiguration{Enabled: true, CacheName: "default", CacheKeyTemplate: productCacheKeyTemplate, UseL1Cache: true}, + }, "query.product.related", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{ + {Name: []byte("product"), Value: &Object{Path: []string{"product"}, Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }}}, + }, + }, + } + + loader := &Loader{} + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = false // Disabled + ctx.ExecutionOptions.Caching.EnableL2Cache = false + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + }) + + // ============================================================================= + // L2 CACHE ONLY TESTS + // ============================================================================= + + t.Run("L2 Only - miss then hit across requests", func(t *testing.T) { + // This test verifies L2 cache works for cross-request caching + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + // Root DS for first request + rootDS1 := NewMockDataSource(ctrl) + rootDS1.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Return([]byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil).Times(1) + + // First request: entity DS called (cache miss) + entityDS1 := NewMockDataSource(ctrl) + entityDS1.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Return([]byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Cached Product"}]}}`), nil).Times(1) + + // Root DS for second request + rootDS2 := NewMockDataSource(ctrl) + rootDS2.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Return([]byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil).Times(1) + + // Second request: entity DS NOT called (cache hit) + entityDS2 := NewMockDataSource(ctrl) + entityDS2.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Times(0) // Cache hit + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + }, + } + + createResponse := func(rootDS, entityDS DataSource) *GraphQLResponse { + return &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{}`), SegmentType: StaticSegmentType}}}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{"representations":[`), SegmentType: StaticSegmentType}}}, + Items: []InputTemplate{{Segments: []TemplateSegment{{SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: NewGraphQLVariableResolveRenderer(&Object{Fields: []*Field{{Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, {Name: []byte("id"), Value: &String{Path: []string{"id"}}}}})}}}}, + Footer: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`]}`), SegmentType: StaticSegmentType}}}, + }, + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{DataSourceName: "products", OperationType: ast.OperationTypeQuery, ProvidesData: providesData}, + Caching: FetchCacheConfiguration{Enabled: true, CacheName: "default", CacheKeyTemplate: productCacheKeyTemplate, TTL: time.Minute, UseL1Cache: true}, + }, "query.product", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{ + {Name: []byte("product"), Value: &Object{Path: []string{"product"}, Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }}}, + }, + }, + } + } + + // First request (cache miss) + ctx1 := NewContext(context.Background()) + ctx1.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx1.ExecutionOptions.Caching.EnableL1Cache = false + ctx1.ExecutionOptions.Caching.EnableL2Cache = true + + loader1 := &Loader{caches: map[string]LoaderCache{"default": cache}} + + ar1 := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable1 := NewResolvable(ar1, ResolvableOptions{}) + err := resolvable1.Init(ctx1, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader1.LoadGraphQLResponseData(ctx1, createResponse(rootDS1, entityDS1), resolvable1) + require.NoError(t, err) + + log := cache.GetLog() + wantFirstLog := []CacheLogEntry{ + // _entities(Product) — L2 miss, product not yet cached + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"id":"prod-1"}}`, Hit: false}}}, + // _entities(Product) — store fetched product data in L2 + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"id":"prod-1"}}`, TTL: time.Minute}}}, + } + assert.Equal(t, wantFirstLog, log, "First request: L2 miss then set") + + // Second request (cache hit) — new loader but same L2 cache instance + cache.ClearLog() + ctx2 := NewContext(context.Background()) + ctx2.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx2.ExecutionOptions.Caching.EnableL1Cache = false + ctx2.ExecutionOptions.Caching.EnableL2Cache = true + + loader2 := &Loader{caches: map[string]LoaderCache{"default": cache}} + + ar2 := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable2 := NewResolvable(ar2, ResolvableOptions{}) + err = resolvable2.Init(ctx2, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader2.LoadGraphQLResponseData(ctx2, createResponse(rootDS2, entityDS2), resolvable2) + require.NoError(t, err) + + log2 := cache.GetLog() + wantSecondLog := []CacheLogEntry{ + // _entities(Product) — L2 hit, product cached from first request; no DS call needed + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"id":"prod-1"}}`, Hit: true}}}, + } + assert.Equal(t, wantSecondLog, log2, "Second request: L2 hit only") + }) + + t.Run("L2 Only - disabled means no cache operations", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + // Root DS for both requests + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Return([]byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil).Times(2) + + // Entity DS called both times (no cache) + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Return([]byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product"}]}}`), nil).Times(2) // Called both times + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesData := &Object{Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + }} + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{}`), SegmentType: StaticSegmentType}}}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{"representations":[`), SegmentType: StaticSegmentType}}}, + Items: []InputTemplate{{Segments: []TemplateSegment{{SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: NewGraphQLVariableResolveRenderer(&Object{Fields: []*Field{{Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, {Name: []byte("id"), Value: &String{Path: []string{"id"}}}}})}}}}, + Footer: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`]}`), SegmentType: StaticSegmentType}}}, + }, + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{DataSourceName: "products", OperationType: ast.OperationTypeQuery, ProvidesData: providesData}, + Caching: FetchCacheConfiguration{Enabled: true, CacheName: "default", CacheKeyTemplate: productCacheKeyTemplate, UseL1Cache: true}, + }, "query.product", ObjectPath("product")), + ), + Data: &Object{Fields: []*Field{{Name: []byte("product"), Value: &Object{Path: []string{"product"}, Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }}}}}, + } + + // Run twice with L2 disabled + for range 2 { + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = false + ctx.ExecutionOptions.Caching.EnableL2Cache = false // Disabled + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + } + + // Verify no cache operations occurred + log := cache.GetLog() + assert.Empty(t, log, "No cache operations should occur when L2 is disabled") + }) + + // ============================================================================= + // L1 + L2 COMBINED TESTS + // ============================================================================= + + t.Run("L1+L2 - L1 hit prevents L2 lookup", func(t *testing.T) { + // When L1 has the data, L2 should not be consulted for entity fetches + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Return([]byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil).Times(1) + + // First entity fetch populates both L1 and L2 + entityDS1 := NewMockDataSource(ctrl) + entityDS1.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Return([]byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil).Times(1) + + // Second entity fetch should hit L1 (no DS call, no L2 lookup needed) + entityDS2 := NewMockDataSource(ctrl) + entityDS2.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Times(0) + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesData := &Object{Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + }} + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{}`), SegmentType: StaticSegmentType}}}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{"representations":[`), SegmentType: StaticSegmentType}}}, + Items: []InputTemplate{{Segments: []TemplateSegment{{SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: NewGraphQLVariableResolveRenderer(&Object{Fields: []*Field{{Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, {Name: []byte("id"), Value: &String{Path: []string{"id"}}}}})}}}}, + Footer: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`]}`), SegmentType: StaticSegmentType}}}, + }, + DataSource: entityDS1, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{DataSourceName: "products", OperationType: ast.OperationTypeQuery, ProvidesData: providesData}, + Caching: FetchCacheConfiguration{Enabled: true, CacheName: "default", CacheKeyTemplate: productCacheKeyTemplate, TTL: time.Minute, UseL1Cache: true}, + }, "query.product", ObjectPath("product")), + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{"representations":[`), SegmentType: StaticSegmentType}}}, + Items: []InputTemplate{{Segments: []TemplateSegment{{SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: NewGraphQLVariableResolveRenderer(&Object{Fields: []*Field{{Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, {Name: []byte("id"), Value: &String{Path: []string{"id"}}}}})}}}}, + Footer: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`]}`), SegmentType: StaticSegmentType}}}, + }, + DataSource: entityDS2, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{DataSourceName: "products", OperationType: ast.OperationTypeQuery, ProvidesData: providesData}, + Caching: FetchCacheConfiguration{Enabled: true, CacheName: "default", CacheKeyTemplate: productCacheKeyTemplate, TTL: time.Minute, UseL1Cache: true}, + }, "query.product.related", ObjectPath("product")), + ), + Data: &Object{Fields: []*Field{{Name: []byte("product"), Value: &Object{Path: []string{"product"}, Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }}}}}, + } + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + // Two sequential entity fetches for the same product (prod-1): + // 1st fetch: L1 miss -> L2 miss -> DS call -> populate L1 + L2 + // 2nd fetch: L1 hit -> skip L2 and DS entirely + // So L2 only sees operations from the 1st fetch + log := cache.GetLog() + wantLog := []CacheLogEntry{ + // 1st _entities(Product) — L1 miss, L2 miss + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"id":"prod-1"}}`, Hit: false}}}, + // 1st _entities(Product) — store fetched data in L2 (L1 also populated in-memory) + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"id":"prod-1"}}`, TTL: time.Minute}}}, + // 2nd _entities(Product) — no L2 operations: L1 hit short-circuits + } + assert.Equal(t, wantLog, log, "L1 hit should prevent second L2 lookup") + }) + + t.Run("L1+L2 - L1 miss, L2 hit provides data", func(t *testing.T) { + // When L1 misses but L2 has data, data should come from L2 + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + // Pre-populate L2 cache with correct key format: {"__typename":"Product","key":{"id":"prod-1"}} + _ = cache.Set(context.Background(), withCacheEntryTTL([]*CacheEntry{ + {Key: `{"__typename":"Product","key":{"id":"prod-1"}}`, Value: []byte(`{"__typename":"Product","id":"prod-1","name":"L2 Cached Product"}`)}, + }, time.Minute)) + cache.ClearLog() // Clear the set log + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Return([]byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil).Times(1) + + // Entity DS should NOT be called (L2 hit) + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Times(0) + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesData := &Object{Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + }} + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{}`), SegmentType: StaticSegmentType}}}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{"representations":[`), SegmentType: StaticSegmentType}}}, + Items: []InputTemplate{{Segments: []TemplateSegment{{SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: NewGraphQLVariableResolveRenderer(&Object{Fields: []*Field{{Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, {Name: []byte("id"), Value: &String{Path: []string{"id"}}}}})}}}}, + Footer: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`]}`), SegmentType: StaticSegmentType}}}, + }, + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{DataSourceName: "products", OperationType: ast.OperationTypeQuery, ProvidesData: providesData}, + Caching: FetchCacheConfiguration{Enabled: true, CacheName: "default", CacheKeyTemplate: productCacheKeyTemplate, UseL1Cache: true}, + }, "query.product", ObjectPath("product")), + ), + Data: &Object{Fields: []*Field{{Name: []byte("product"), Value: &Object{Path: []string{"product"}, Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }}}}}, + } + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1","name":"L2 Cached Product"}}}`, out) + + log := cache.GetLog() + wantLog := []CacheLogEntry{ + // _entities(Product) — L1 miss (empty), L2 hit from pre-populated cache; no DS call needed + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"id":"prod-1"}}`, Hit: true}}}, + } + assert.Equal(t, wantLog, log, "L2 hit: single get operation with hit") + }) + + t.Run("L1+L2 - cross-request: L1 isolated, L2 shared", func(t *testing.T) { + // L1 is per-request, L2 is shared across requests + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + // Root DS for request 1 + rootDS1 := NewMockDataSource(ctrl) + rootDS1.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Return([]byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil).Times(1) + + // Request 1: Cache miss, fetches from DS + entityDS1 := NewMockDataSource(ctrl) + entityDS1.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Return([]byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil).Times(1) + + // Root DS for request 2 + rootDS2 := NewMockDataSource(ctrl) + rootDS2.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Return([]byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil).Times(1) + + // Request 2: L2 hit (L1 is fresh/empty for new request) + entityDS2 := NewMockDataSource(ctrl) + entityDS2.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Times(0) // L2 hit + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesData := &Object{Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + }} + + createResponse := func(rootDS, entityDS DataSource) *GraphQLResponse { + return &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{}`), SegmentType: StaticSegmentType}}}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{"representations":[`), SegmentType: StaticSegmentType}}}, + Items: []InputTemplate{{Segments: []TemplateSegment{{SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: NewGraphQLVariableResolveRenderer(&Object{Fields: []*Field{{Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, {Name: []byte("id"), Value: &String{Path: []string{"id"}}}}})}}}}, + Footer: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`]}`), SegmentType: StaticSegmentType}}}, + }, + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{DataSourceName: "products", OperationType: ast.OperationTypeQuery, ProvidesData: providesData}, + Caching: FetchCacheConfiguration{Enabled: true, CacheName: "default", CacheKeyTemplate: productCacheKeyTemplate, TTL: time.Minute, UseL1Cache: true}, + }, "query.product", ObjectPath("product")), + ), + Data: &Object{Fields: []*Field{{Name: []byte("product"), Value: &Object{Path: []string{"product"}, Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }}}}}, + } + } + + // Request 1 + ctx1 := NewContext(context.Background()) + ctx1.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx1.ExecutionOptions.Caching.EnableL1Cache = true + ctx1.ExecutionOptions.Caching.EnableL2Cache = true + + loader1 := &Loader{caches: map[string]LoaderCache{"default": cache}} + + ar1 := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable1 := NewResolvable(ar1, ResolvableOptions{}) + err := resolvable1.Init(ctx1, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader1.LoadGraphQLResponseData(ctx1, createResponse(rootDS1, entityDS1), resolvable1) + require.NoError(t, err) + + // Request 2 (new context = new L1, but same L2) + cache.ClearLog() + ctx2 := NewContext(context.Background()) + ctx2.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx2.ExecutionOptions.Caching.EnableL1Cache = true + ctx2.ExecutionOptions.Caching.EnableL2Cache = true + + loader2 := &Loader{caches: map[string]LoaderCache{"default": cache}} + + ar2 := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable2 := NewResolvable(ar2, ResolvableOptions{}) + err = resolvable2.Init(ctx2, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader2.LoadGraphQLResponseData(ctx2, createResponse(rootDS2, entityDS2), resolvable2) + require.NoError(t, err) + + // Request 2 uses a new Loader (new L1) but same L2 cache instance + log := cache.GetLog() + wantLog := []CacheLogEntry{ + // _entities(Product) — L1 miss (new request, empty L1), L2 hit from request 1; no DS call + {Operation: "get", Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"id":"prod-1"}}`, Hit: true}}}, + } + assert.Equal(t, wantLog, log, "Request 2: L2 hit (L1 is fresh/empty)") + }) + + t.Run("Both disabled - no cache operations", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Return([]byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil).Times(1) + + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Return([]byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product"}]}}`), nil).Times(1) + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesData := &Object{Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + }} + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{}`), SegmentType: StaticSegmentType}}}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{"representations":[`), SegmentType: StaticSegmentType}}}, + Items: []InputTemplate{{Segments: []TemplateSegment{{SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: NewGraphQLVariableResolveRenderer(&Object{Fields: []*Field{{Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, {Name: []byte("id"), Value: &String{Path: []string{"id"}}}}})}}}}, + Footer: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`]}`), SegmentType: StaticSegmentType}}}, + }, + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{DataSourceName: "products", OperationType: ast.OperationTypeQuery, ProvidesData: providesData}, + Caching: FetchCacheConfiguration{Enabled: true, CacheName: "default", CacheKeyTemplate: productCacheKeyTemplate, UseL1Cache: true}, + }, "query.product", ObjectPath("product")), + ), + Data: &Object{Fields: []*Field{{Name: []byte("product"), Value: &Object{Path: []string{"product"}, Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }}}}}, + } + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = false + ctx.ExecutionOptions.Caching.EnableL2Cache = false + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + // Verify no cache operations + log := cache.GetLog() + assert.Empty(t, log, "No cache operations should occur when both L1 and L2 are disabled") + }) +} + +// TestL1CacheSkipsParallelFetch verifies that parallel fetches are skipped when L1 cache has complete hits. +// This tests the optimization at loader.go:296 where goroutines are not spawned for parallel fetch nodes +// that have all entities already in L1 cache from a previous sequential fetch. +func TestL1CacheSkipsParallelFetch(t *testing.T) { + t.Run("parallel fetches skipped on L1 hit from previous fetch", func(t *testing.T) { + // This test sets up a sequence where: + // 1. Root fetch returns products + // 2. First entity fetch runs and populates L1 cache with all needed data + // 3. Parallel group runs - the fetch for same entities should be SKIPPED (L1 hit) + // + // The key behavior being tested: when L1 cache has a complete hit for all entities + // in a parallel fetch node, the goroutine is not spawned (line 295-296 in loader.go) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"products":[{"__typename":"Product","id":"prod-1"},{"__typename":"Product","id":"prod-2"}]}}`), nil + }).Times(1) + + // First entity fetch (sequential) - populates L1 with all fields including price + entityDS1 := NewMockDataSource(ctrl) + entityDS1.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One","price":99.99},{"__typename":"Product","id":"prod-2","name":"Product Two","price":49.99}]}}`), nil + }).Times(1) + + // Second entity fetch (in parallel group) - should NOT be called (L1 hit from entityDS1) + entityDS2 := NewMockDataSource(ctrl) + entityDS2.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Times(0) // L1 cache hit should skip this fetch entirely - THIS IS THE KEY ASSERTION + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + // First fetch provides both name AND price so L1 can satisfy second fetch + providesDataFull := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + {Name: []byte("price"), Value: &Scalar{Path: []string{"price"}, Nullable: false}}, + }, + } + + // Second fetch only needs price (subset of what first fetch provides) + providesDataPrice := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("price"), Value: &Scalar{Path: []string{"price"}, Nullable: false}}, + }, + } + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + // Root fetch - get products + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{{Data: []byte(`{"method":"POST","url":"http://products"}`), SegmentType: StaticSegmentType}}, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + // First entity fetch - populates L1 with product entities (includes price) + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{"method":"POST","url":"http://products","body":{"query":"names","variables":{"representations":[`), SegmentType: StaticSegmentType}}}, + Items: []InputTemplate{{Segments: []TemplateSegment{{SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: NewGraphQLVariableResolveRenderer(&Object{Fields: []*Field{{Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, {Name: []byte("id"), Value: &String{Path: []string{"id"}}}}})}}}}, + Footer: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`]}}}`), SegmentType: StaticSegmentType}}}, + }, + DataSource: entityDS1, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{ + DataSourceName: "products-names", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesDataFull, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + }, + }, "query.products", ArrayPath("products")), + // Parallel group with single fetch - should skip because L1 has all data + Parallel( + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{"method":"POST","url":"http://pricing","body":{"query":"prices","variables":{"representations":[`), SegmentType: StaticSegmentType}}}, + Items: []InputTemplate{{Segments: []TemplateSegment{{SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: NewGraphQLVariableResolveRenderer(&Object{Fields: []*Field{{Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, {Name: []byte("id"), Value: &String{Path: []string{"id"}}}}})}}}}, + Footer: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`]}}}`), SegmentType: StaticSegmentType}}}, + }, + DataSource: entityDS2, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{ + DataSourceName: "pricing", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesDataPrice, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + }, + }, "query.products", ArrayPath("products")), + ), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("products"), + Value: &Array{ + Path: []string{"products"}, + Item: &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + {Name: []byte("price"), Value: &Float{Path: []string{"price"}}}, + }, + }, + }, + }, + }, + }, + } + + loader := &Loader{} + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = false // L1 only for this test + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + // Output includes all data from L1 cache (merged from first fetch) + // __typename is included because the entity data from L1 cache includes it + assert.Equal(t, `{"data":{"products":[{"__typename":"Product","id":"prod-1","name":"Product One","price":99.99},{"__typename":"Product","id":"prod-2","name":"Product Two","price":49.99}]}}`, out) + + // Verify L1 stats: + // - 2 misses from first entity fetch (sequential, populates L1) + // - 2 hits from second entity fetch in parallel (same products, skipped via L1) + stats := ctx.GetCacheStats() + var l1Hits, l1Misses int + for _, ev := range stats.L1Reads { + if ev.Kind == CacheKeyHit { + l1Hits++ + } else { + l1Misses++ + } + } + assert.Equal(t, 2, l1Hits, "L1 should have 2 hits (parallel fetch for same entities skipped)") + assert.Equal(t, 2, l1Misses, "L1 should have 2 misses (first entity fetch)") + }) + +} + +func TestL1CacheFieldAccumulation(t *testing.T) { + t.Run("fields from fetch 1 survive fetch 2 merge and are available for fetch 3", func(t *testing.T) { + // Scenario: 3 sequential entity fetches for the same entity (User:1), + // each with different ProvidesData (different field sets). + // + // Fetch 1: ProvidesData = {name} + // → L1 MISS, calls subgraph, stores {__typename, id, name} in L1 + // + // Fetch 2: ProvidesData = {email} + // → L1 HIT but widening check fails (cached value lacks "email") + // → Calls subgraph, gets {__typename, id, email} + // → Merges into L1: {__typename, id, name, email} + // + // Fetch 3: ProvidesData = {name} + // → L1 HIT, widening check passes ("name" is in L1 from fetch 1) + // → Skips subgraph call + // + // This proves: + // 1. L1 passthrough write preserves all fields (including @key "id") + // 2. L1 merge accumulates fields across fetches + // 3. Fetch 1's "name" survives fetch 2's merge and is available for fetch 3 + // 4. Fetch 3 consumes a field that fetch 2 did NOT provide + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"user":{"__typename":"User","id":"1"}}}`), nil + }).Times(1) + + // Fetch 1: returns name only + entityDS1 := NewMockDataSource(ctrl) + entityDS1.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"User","id":"1","name":"Alice"}]}}`), nil + }).Times(1) + + // Fetch 2: returns email only (NOT name — fetch 2's subgraph doesn't provide name) + entityDS2 := NewMockDataSource(ctrl) + entityDS2.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"User","id":"1","email":"alice@example.com"}]}}`), nil + }).Times(1) + + // Fetch 3: should NOT be called — "name" is in L1 from fetch 1 + entityDS3 := NewMockDataSource(ctrl) + entityDS3.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Times(0) + + userCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesData1 := &Object{ + Fields: []*Field{ + {Name: []byte("name"), Value: &Scalar{}}, + }, + } + ComputeHasAliases(providesData1) + + providesData2 := &Object{ + Fields: []*Field{ + {Name: []byte("email"), Value: &Scalar{}}, + }, + } + ComputeHasAliases(providesData2) + + // Fetch 3 wants "name" — a field from fetch 1, NOT from fetch 2. + providesData3 := &Object{ + Fields: []*Field{ + {Name: []byte("name"), Value: &Scalar{}}, + }, + } + ComputeHasAliases(providesData3) + + entityInput := BatchInput{ + Header: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{"method":"POST","url":"http://users","body":{"query":"q","variables":{"representations":[`), SegmentType: StaticSegmentType}}}, + Items: []InputTemplate{{Segments: []TemplateSegment{{ + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }}), + }}}}, + Footer: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`]}}}`), SegmentType: StaticSegmentType}}}, + } + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{{Data: []byte(`{"method":"POST","url":"http://users"}`), SegmentType: StaticSegmentType}}, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&BatchEntityFetch{ + Input: entityInput, + DataSource: entityDS1, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{ + DataSourceName: "users", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData1, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + CacheKeyTemplate: userCacheKeyTemplate, + UseL1Cache: true, + }, + }, "query.user", ObjectPath("user")), + SingleWithPath(&BatchEntityFetch{ + Input: entityInput, + DataSource: entityDS2, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{ + DataSourceName: "users", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData2, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + CacheKeyTemplate: userCacheKeyTemplate, + UseL1Cache: true, + }, + }, "query.user", ObjectPath("user")), + SingleWithPath(&BatchEntityFetch{ + Input: entityInput, + DataSource: entityDS3, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{ + DataSourceName: "users", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData3, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + CacheKeyTemplate: userCacheKeyTemplate, + UseL1Cache: true, + }, + }, "query.user", ObjectPath("user")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("user"), + Value: &Object{ + Path: []string{"user"}, + Fields: []*Field{ + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + {Name: []byte("email"), Value: &String{Path: []string{"email"}}}, + }, + }, + }, + }, + }, + } + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + loader := &Loader{ + ctx: ctx, + jsonArena: ar, + l1Cache: map[string]*astjson.Value{}, + resolvable: resolvable, + caches: map[string]LoaderCache{"default": NewFakeLoaderCache()}, + } + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + // Extra fields (__typename, id) from L1 passthrough are present in the + // merged data tree but harmless — the render walk only outputs fields + // listed in the response plan. + assert.Equal(t, `{"data":{"user":{"__typename":"User","id":"1","name":"Alice","email":"alice@example.com"}}}`, string(out)) + + stats := ctx.GetCacheStats() + // Fetch 1: L1 miss → subgraph call → stores {name, id, __typename} + // Fetch 2: L1 hit but widening fails (no email) → subgraph call → merges email into L1 + // Fetch 3: L1 hit, widening passes (name present from fetch 1) → no subgraph call + var l1Hits, l1Misses int + for _, ev := range stats.L1Reads { + if ev.Kind == CacheKeyHit { + l1Hits++ + } else { + l1Misses++ + } + } + assert.Equal(t, 1, l1Hits, "Fetch 3 should hit L1 (name from fetch 1 survived fetch 2's merge)") + assert.Equal(t, 1, l1Misses, "Fetch 1 should miss L1 (cache empty)") + + // Verify the L1 cache entry contains ALL accumulated fields. + const cacheKey = `{"__typename":"User","key":{"id":"1"}}` + cached, ok := loader.l1Cache[cacheKey] + require.True(t, ok, "L1 should have User:1 entry") + cachedJSON := string(cached.MarshalTo(nil)) + assert.Equal(t, `{"__typename":"User","id":"1","name":"Alice","email":"alice@example.com"}`, cachedJSON, + "L1 entry must contain name (fetch 1), email (fetch 2 merge), and key fields (id, __typename) via passthrough") + }) + + t.Run("different aliases for same field across fetches", func(t *testing.T) { + // Fetch 1: ProvidesData = {nickname: name} (alias "nickname" for field "name") + // → L1 MISS, calls subgraph, stores {__typename, id, name} in L1 (normalized) + // + // Fetch 2: ProvidesData = {email} + // → L1 widening miss (no email), calls subgraph + // + // Fetch 3: ProvidesData = {displayName: name} (different alias for same field) + // → L1 HIT: L1 stores schema-name "name", denormalize maps it to "displayName" + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"user":{"__typename":"User","id":"1"}}}`), nil + }).Times(1) + + entityDS1 := NewMockDataSource(ctrl) + entityDS1.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + // Subgraph returns schema field name "name", response has alias "nickname" + return []byte(`{"data":{"_entities":[{"__typename":"User","id":"1","nickname":"Alice"}]}}`), nil + }).Times(1) + + entityDS2 := NewMockDataSource(ctrl) + entityDS2.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"User","id":"1","email":"alice@example.com"}]}}`), nil + }).Times(1) + + // Fetch 3 should NOT call subgraph — "name" is in L1 from fetch 1 + entityDS3 := NewMockDataSource(ctrl) + entityDS3.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Times(0) + + userCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + // Fetch 1: alias "nickname" → schema "name" + providesData1 := &Object{ + HasAliases: true, + Fields: []*Field{ + {Name: []byte("nickname"), OriginalName: []byte("name"), Value: &Scalar{}}, + }, + } + + providesData2 := &Object{ + Fields: []*Field{ + {Name: []byte("email"), Value: &Scalar{}}, + }, + } + ComputeHasAliases(providesData2) + + // Fetch 3: alias "displayName" → schema "name" + providesData3 := &Object{ + HasAliases: true, + Fields: []*Field{ + {Name: []byte("displayName"), OriginalName: []byte("name"), Value: &Scalar{}}, + }, + } + + entityInput := BatchInput{ + Header: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{"method":"POST","url":"http://users","body":{"query":"q","variables":{"representations":[`), SegmentType: StaticSegmentType}}}, + Items: []InputTemplate{{Segments: []TemplateSegment{{ + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }}), + }}}}, + Footer: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`]}}}`), SegmentType: StaticSegmentType}}}, + } + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{{Data: []byte(`{"method":"POST","url":"http://users"}`), SegmentType: StaticSegmentType}}, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&BatchEntityFetch{ + Input: entityInput, + DataSource: entityDS1, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{DataSourceName: "users", OperationType: ast.OperationTypeQuery, ProvidesData: providesData1}, + Caching: FetchCacheConfiguration{Enabled: true, CacheName: "default", CacheKeyTemplate: userCacheKeyTemplate, UseL1Cache: true}, + }, "query.user", ObjectPath("user")), + SingleWithPath(&BatchEntityFetch{ + Input: entityInput, + DataSource: entityDS2, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{DataSourceName: "users", OperationType: ast.OperationTypeQuery, ProvidesData: providesData2}, + Caching: FetchCacheConfiguration{Enabled: true, CacheName: "default", CacheKeyTemplate: userCacheKeyTemplate, UseL1Cache: true}, + }, "query.user", ObjectPath("user")), + SingleWithPath(&BatchEntityFetch{ + Input: entityInput, + DataSource: entityDS3, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{DataSourceName: "users", OperationType: ast.OperationTypeQuery, ProvidesData: providesData3}, + Caching: FetchCacheConfiguration{Enabled: true, CacheName: "default", CacheKeyTemplate: userCacheKeyTemplate, UseL1Cache: true}, + }, "query.user", ObjectPath("user")), + ), + Data: &Object{ + Fields: []*Field{ + {Name: []byte("user"), Value: &Object{ + Path: []string{"user"}, + Fields: []*Field{ + {Name: []byte("displayName"), Value: &String{Path: []string{"displayName"}}}, + {Name: []byte("email"), Value: &String{Path: []string{"email"}}}, + }, + }}, + }, + }, + } + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + loader := &Loader{ + ctx: ctx, + jsonArena: ar, + l1Cache: map[string]*astjson.Value{}, + resolvable: resolvable, + caches: map[string]LoaderCache{"default": NewFakeLoaderCache()}, + } + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"user":{"__typename":"User","id":"1","nickname":"Alice","email":"alice@example.com","displayName":"Alice"}}}`, string(out), + "fetch 3 should get name via different alias (displayName)") + + stats := ctx.GetCacheStats() + var l1Hits int + for _, ev := range stats.L1Reads { + if ev.Kind == CacheKeyHit { + l1Hits++ + } + } + assert.Equal(t, 1, l1Hits, "Fetch 3 should hit L1 (schema name 'name' stored by fetch 1, denormalized to 'displayName')") + }) + + t.Run("alias then no alias for same field", func(t *testing.T) { + // Fetch 1: ProvidesData = {nickname: name} (alias) + // → L1 MISS, stores normalized "name" in L1 + // + // Fetch 2: ProvidesData = {email} + // → L1 widening miss + // + // Fetch 3: ProvidesData = {name} (no alias, schema name) + // → L1 HIT: "name" is in L1 from fetch 1's normalized write + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"user":{"__typename":"User","id":"1"}}}`), nil + }).Times(1) + + entityDS1 := NewMockDataSource(ctrl) + entityDS1.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"User","id":"1","nickname":"Alice"}]}}`), nil + }).Times(1) + + entityDS2 := NewMockDataSource(ctrl) + entityDS2.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"User","id":"1","email":"alice@example.com"}]}}`), nil + }).Times(1) + + entityDS3 := NewMockDataSource(ctrl) + entityDS3.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Times(0) // L1 hit + + userCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + // Fetch 1: alias "nickname" → schema "name" + providesData1 := &Object{ + HasAliases: true, + Fields: []*Field{ + {Name: []byte("nickname"), OriginalName: []byte("name"), Value: &Scalar{}}, + }, + } + + providesData2 := &Object{ + Fields: []*Field{ + {Name: []byte("email"), Value: &Scalar{}}, + }, + } + ComputeHasAliases(providesData2) + + // Fetch 3: no alias, uses schema name directly + providesData3 := &Object{ + Fields: []*Field{ + {Name: []byte("name"), Value: &Scalar{}}, + }, + } + ComputeHasAliases(providesData3) + + entityInput := BatchInput{ + Header: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{"method":"POST","url":"http://users","body":{"query":"q","variables":{"representations":[`), SegmentType: StaticSegmentType}}}, + Items: []InputTemplate{{Segments: []TemplateSegment{{ + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }}), + }}}}, + Footer: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`]}}}`), SegmentType: StaticSegmentType}}}, + } + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{{Data: []byte(`{"method":"POST","url":"http://users"}`), SegmentType: StaticSegmentType}}, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&BatchEntityFetch{ + Input: entityInput, + DataSource: entityDS1, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{DataSourceName: "users", OperationType: ast.OperationTypeQuery, ProvidesData: providesData1}, + Caching: FetchCacheConfiguration{Enabled: true, CacheName: "default", CacheKeyTemplate: userCacheKeyTemplate, UseL1Cache: true}, + }, "query.user", ObjectPath("user")), + SingleWithPath(&BatchEntityFetch{ + Input: entityInput, + DataSource: entityDS2, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{DataSourceName: "users", OperationType: ast.OperationTypeQuery, ProvidesData: providesData2}, + Caching: FetchCacheConfiguration{Enabled: true, CacheName: "default", CacheKeyTemplate: userCacheKeyTemplate, UseL1Cache: true}, + }, "query.user", ObjectPath("user")), + SingleWithPath(&BatchEntityFetch{ + Input: entityInput, + DataSource: entityDS3, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{DataSourceName: "users", OperationType: ast.OperationTypeQuery, ProvidesData: providesData3}, + Caching: FetchCacheConfiguration{Enabled: true, CacheName: "default", CacheKeyTemplate: userCacheKeyTemplate, UseL1Cache: true}, + }, "query.user", ObjectPath("user")), + ), + Data: &Object{ + Fields: []*Field{ + {Name: []byte("user"), Value: &Object{ + Path: []string{"user"}, + Fields: []*Field{ + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + {Name: []byte("email"), Value: &String{Path: []string{"email"}}}, + }, + }}, + }, + }, + } + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + loader := &Loader{ + ctx: ctx, + jsonArena: ar, + l1Cache: map[string]*astjson.Value{}, + resolvable: resolvable, + caches: map[string]LoaderCache{"default": NewFakeLoaderCache()}, + } + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"user":{"__typename":"User","id":"1","nickname":"Alice","email":"alice@example.com","name":"Alice"}}}`, string(out), + "fetch 3 should get name (no alias) from L1") + + stats := ctx.GetCacheStats() + var l1Hits int + for _, ev := range stats.L1Reads { + if ev.Kind == CacheKeyHit { + l1Hits++ + } + } + assert.Equal(t, 1, l1Hits, "Fetch 3 should hit L1 (schema name 'name' stored by fetch 1's alias normalize)") + }) +} diff --git a/v2/pkg/engine/resolve/l2_cache_key_interceptor_test.go b/v2/pkg/engine/resolve/l2_cache_key_interceptor_test.go new file mode 100644 index 0000000000..194b49c69b --- /dev/null +++ b/v2/pkg/engine/resolve/l2_cache_key_interceptor_test.go @@ -0,0 +1,808 @@ +package resolve + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/go-arena" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/fastjsonext" +) + +// helper functions to reduce boilerplate in interceptor tests + +func newProductCacheKeyTemplate() *EntityQueryCacheKeyTemplate { + return &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } +} + +func newProductProvidesData() *Object { + return &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}, Nullable: false}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + }, + } +} + +func newEntityFetchSegments() []TemplateSegment { + return []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://products.service","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){__typename ... on Product {id name}}}","variables":{"representations":[`), + SegmentType: StaticSegmentType, + }, + { + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + }, + { + Data: []byte(`]}}}`), + SegmentType: StaticSegmentType, + }, + } +} + +func newProductResponseData() *Object { + return &Object{ + Fields: []*Field{ + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }, + }, + }, + }, + } +} + +// TestL2CacheKeyInterceptor verifies that L2CacheKeyInterceptor and GlobalCacheKeyPrefix +// transform L2 cache keys correctly without affecting L1 keys. +// Without this, tenant isolation or schema-versioned cache keys would silently break. +func TestL2CacheKeyInterceptor(t *testing.T) { + t.Run("interceptor transforms L2 keys for entity fetch", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + // Root datasource + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + // Entity datasource - called once (cache miss on first request) + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil + }).Times(1) + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{product {__typename id}}"}}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newProductCacheKeyTemplate(), + UseL1Cache: true, + }, + }, + InputTemplate: InputTemplate{Segments: newEntityFetchSegments()}, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: newProductProvidesData(), + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: newProductResponseData(), + } + + loader := &Loader{ + caches: map[string]LoaderCache{"default": cache}, + } + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.ExecutionOptions.Caching.L2CacheKeyInterceptor = func(_ context.Context, key string, _ L2CacheKeyInterceptorInfo) string { + return "tenant-abc:" + key + } + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + // First request: cache miss, fetches from datasource, stores in L2 + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1","name":"Product One"}}}`, out) + + cacheLog := cache.GetLog() + + // Find set operation and verify keys have prefix + var setKeys []string + for _, entry := range cacheLog { + if entry.Operation == "set" { + for _, item := range entry.Items { + setKeys = append(setKeys, item.Key) + } + } + } + // Verify L2 set key has interceptor prefix + require.Equal(t, 1, len(setKeys)) + assert.Equal(t, `tenant-abc:{"__typename":"Product","key":{"id":"prod-1"}}`, setKeys[0]) + + // Now do a second request against the same cache — should get a cache hit + // Need a new root DS that returns the same data and a new entity DS that should NOT be called + cache.ClearLog() + + ctrl2 := gomock.NewController(t) + defer ctrl2.Finish() + + rootDS2 := NewMockDataSource(ctrl2) + rootDS2.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + entityDS2 := NewMockDataSource(ctrl2) + entityDS2.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Times(0) // Should NOT be called — cache hit + + response2 := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS2, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{product {__typename id}}"}}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS2, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newProductCacheKeyTemplate(), + UseL1Cache: true, + }, + }, + InputTemplate: InputTemplate{Segments: newEntityFetchSegments()}, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: newProductProvidesData(), + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: newProductResponseData(), + } + + loader2 := &Loader{ + caches: map[string]LoaderCache{"default": cache}, + } + + ctx2 := NewContext(context.Background()) + ctx2.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx2.ExecutionOptions.Caching.EnableL2Cache = true + ctx2.ExecutionOptions.Caching.L2CacheKeyInterceptor = func(_ context.Context, key string, _ L2CacheKeyInterceptorInfo) string { + return "tenant-abc:" + key + } + + ar2 := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable2 := NewResolvable(ar2, ResolvableOptions{}) + err = resolvable2.Init(ctx2, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader2.LoadGraphQLResponseData(ctx2, response2, resolvable2) + require.NoError(t, err) + + cacheLog2 := cache.GetLog() + var getHits []bool + var getKeys []string + for _, entry := range cacheLog2 { + if entry.Operation == "get" { + for _, item := range entry.Items { + getKeys = append(getKeys, item.Key) + getHits = append(getHits, item.Hit) + } + } + } + // Verify L2 get key has interceptor prefix and is a hit + require.Equal(t, 1, len(getKeys)) + assert.Equal(t, `tenant-abc:{"__typename":"Product","key":{"id":"prod-1"}}`, getKeys[0]) + assert.Equal(t, true, getHits[0]) + }) + + t.Run("interceptor does NOT affect L1 keys", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + // Root datasource + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + // First entity fetch - should be called (populates L1) + entityDS1 := NewMockDataSource(ctrl) + entityDS1.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil + }).Times(1) + + // Second entity fetch for SAME entity - should NOT be called (L1 hit) + entityDS2 := NewMockDataSource(ctrl) + entityDS2.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + Times(0) // L1 should prevent this call + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{product {__typename id}}"}}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + // First entity fetch — populates L1 + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS1, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newProductCacheKeyTemplate(), + UseL1Cache: true, + }, + }, + InputTemplate: InputTemplate{Segments: newEntityFetchSegments()}, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: newProductProvidesData(), + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + // Second entity fetch for SAME entity — should hit L1 cache + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS2, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newProductCacheKeyTemplate(), + UseL1Cache: true, + }, + }, + InputTemplate: InputTemplate{Segments: newEntityFetchSegments()}, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: newProductProvidesData(), + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: newProductResponseData(), + } + + loader := &Loader{ + caches: map[string]LoaderCache{"default": cache}, + } + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.ExecutionOptions.Caching.L2CacheKeyInterceptor = func(_ context.Context, key string, _ L2CacheKeyInterceptorInfo) string { + return "tenant-xyz:" + key + } + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + // L1 worked: entityDS2 was not called (Times(0) enforced by gomock) + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1","name":"Product One"}}}`, out) + + // L2 keys have the prefix + cacheLog := cache.GetLog() + var setKeys []string + for _, entry := range cacheLog { + if entry.Operation == "set" { + for _, item := range entry.Items { + setKeys = append(setKeys, item.Key) + } + } + } + // L2 keys have the interceptor prefix; L1 was unaffected (entityDS2 not called) + require.Equal(t, 1, len(setKeys)) + assert.Equal(t, `tenant-xyz:{"__typename":"Product","key":{"id":"prod-1"}}`, setKeys[0]) + }) + + t.Run("interceptor receives correct SubgraphName and CacheName", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil + }).Times(1) + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{product {__typename id}}"}}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "product-cache", + TTL: 30 * time.Second, + CacheKeyTemplate: newProductCacheKeyTemplate(), + UseL1Cache: true, + }, + }, + InputTemplate: InputTemplate{Segments: newEntityFetchSegments()}, + Info: &FetchInfo{ + DataSourceID: "products-ds", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: newProductProvidesData(), + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: newProductResponseData(), + } + + loader := &Loader{ + caches: map[string]LoaderCache{"product-cache": cache}, + } + + var capturedInfos []L2CacheKeyInterceptorInfo + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.ExecutionOptions.Caching.L2CacheKeyInterceptor = func(_ context.Context, key string, info L2CacheKeyInterceptorInfo) string { + capturedInfos = append(capturedInfos, info) + return key // pass through unchanged + } + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + // Verify interceptor received correct metadata + require.Equal(t, 1, len(capturedInfos)) + assert.Equal(t, L2CacheKeyInterceptorInfo{ + SubgraphName: "products", + CacheName: "product-cache", + }, capturedInfos[0]) + }) + + t.Run("global prefix is prepended to L2 keys", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil + }).Times(1) + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{product {__typename id}}"}}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newProductCacheKeyTemplate(), + UseL1Cache: true, + }, + }, + InputTemplate: InputTemplate{Segments: newEntityFetchSegments()}, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: newProductProvidesData(), + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: newProductResponseData(), + } + + loader := &Loader{ + caches: map[string]LoaderCache{"default": cache}, + } + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.ExecutionOptions.Caching.GlobalCacheKeyPrefix = "schema-v42" + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + cacheLog := cache.GetLog() + var setKeys []string + for _, entry := range cacheLog { + if entry.Operation == "set" { + for _, item := range entry.Items { + setKeys = append(setKeys, item.Key) + } + } + } + require.Equal(t, 1, len(setKeys)) + // L2 key should have global prefix prepended + assert.Equal(t, `schema-v42:{"__typename":"Product","key":{"id":"prod-1"}}`, setKeys[0]) + }) + + t.Run("global prefix combined with interceptor", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil + }).Times(1) + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{product {__typename id}}"}}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newProductCacheKeyTemplate(), + UseL1Cache: true, + }, + }, + InputTemplate: InputTemplate{Segments: newEntityFetchSegments()}, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: newProductProvidesData(), + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: newProductResponseData(), + } + + loader := &Loader{ + caches: map[string]LoaderCache{"default": cache}, + } + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + ctx.ExecutionOptions.Caching.GlobalCacheKeyPrefix = "schema-v42" + ctx.ExecutionOptions.Caching.L2CacheKeyInterceptor = func(_ context.Context, key string, _ L2CacheKeyInterceptorInfo) string { + return "tenant-abc:" + key + } + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + cacheLog := cache.GetLog() + var setKeys []string + for _, entry := range cacheLog { + if entry.Operation == "set" { + for _, item := range entry.Items { + setKeys = append(setKeys, item.Key) + } + } + } + require.Equal(t, 1, len(setKeys)) + // Interceptor wraps the already-prefixed key: interceptor(global_prefix:entity_key) + assert.Equal(t, `tenant-abc:schema-v42:{"__typename":"Product","key":{"id":"prod-1"}}`, setKeys[0]) + }) + + t.Run("nil interceptor has no effect", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil + }).Times(1) + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{product {__typename id}}"}}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newProductCacheKeyTemplate(), + UseL1Cache: true, + }, + }, + InputTemplate: InputTemplate{Segments: newEntityFetchSegments()}, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: newProductProvidesData(), + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: newProductResponseData(), + } + + loader := &Loader{ + caches: map[string]LoaderCache{"default": cache}, + } + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + // L2CacheKeyInterceptor is nil (default) + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1","name":"Product One"}}}`, out) + + // Cache keys should be in standard format (no transformation) + cacheLog := cache.GetLog() + var setKeys []string + for _, entry := range cacheLog { + if entry.Operation == "set" { + for _, item := range entry.Items { + setKeys = append(setKeys, item.Key) + } + } + } + // No transformation applied — key is in standard format + require.Equal(t, 1, len(setKeys)) + assert.Equal(t, `{"__typename":"Product","key":{"id":"prod-1"}}`, setKeys[0]) + }) +} diff --git a/v2/pkg/engine/resolve/loader.go b/v2/pkg/engine/resolve/loader.go index 0e6d7090db..a6a2f020d9 100644 --- a/v2/pkg/engine/resolve/loader.go +++ b/v2/pkg/engine/resolve/loader.go @@ -133,6 +133,95 @@ type result struct { out []byte singleFlightStats *singleFlightStats tools *batchEntityTools + + fetchInfo *FetchInfo // Stored for updateL2Cache debug context enrichment + + cache LoaderCache + cacheMustBeUpdated bool + l1CacheKeys []*CacheKey // L1 cache keys (no prefix, used for merging) + l2CacheKeys []*CacheKey // L2 cache keys (with subgraph header prefix) + cacheSkipFetch bool + cacheConfig FetchCacheConfiguration + providesData *Object // ProvidesData for alias normalization in L2 cache storage + + // Partial cache loading fields + partialCacheEnabled bool // Whether partial loading is enabled for this fetch + cachedItemIndices []int // Indices of items fully served from cache + fetchItemIndices []int // Indices of items that need to be fetched + + // Batch entity key fields — set when ArgumentIsEntityKey + list argument + batchEntityKeyMode bool // Whether this fetch uses batch entity key cache lookup + batchMergePath []string // Path to merge the assembled array (e.g. ["products"]) + batchPartialFetchEnabled bool // Whether partial fetch mode is enabled for this batch + batchCachedIndices []int // BatchIndex values of cache-hit entities + batchMissedIndices []int // BatchIndex values of cache-miss entities + + // l2AnalyticsEvents accumulates L2 cache key events per-result for goroutine safety. + // Merged into the collector on the main thread after goroutines complete. + l2AnalyticsEvents []CacheKeyEvent + + // l2EntitySources accumulates entity source records in goroutines, merged on main thread. + l2EntitySources []entitySourceRecord + + // l2FetchTimings accumulates fetch timing events in goroutines, merged on main thread. + l2FetchTimings []FetchTimingEvent + + // l2ErrorEvents accumulates error events in goroutines, merged on main thread. + l2ErrorEvents []SubgraphErrorEvent + + // l2CacheOpErrors accumulates cache operation errors in goroutines, merged on main thread. + l2CacheOpErrors []CacheOperationError + + // analyticsEntityType caches the entity type name for analytics recording. + // Set during prepareCacheKeys, used by L2 write recording. + analyticsEntityType string + + // headerHash stores the subgraph header hash computed during prepareCacheKeys. + // Non-zero only when IncludeSubgraphHeaderPrefix is true and headers exist. + // Used by updateL2Cache to record HeaderImpactEvents. + headerHash uint64 + + // includeHeaderPrefix mirrors `cfg.IncludeSubgraphHeaderPrefix && SubgraphHeadersBuilder != nil` + // for the active fetch. The headerHash alone can't distinguish "no headers forwarded" + // (hash == 0 with header partitioning ON — must still produce a "0:" prefix so the WRITE + // matches the READ) from "header partitioning OFF" (hash == 0 — must NOT add a prefix). + // Set in tryL2CacheLoad alongside headerHash; read by rootFieldL2CachePrefix. + includeHeaderPrefix bool + + // Cache trace fields — populated during cache operations, consumed by buildCacheTrace. + // Written only from the goroutine owning this result (or main thread for sequential). + cacheTraceDurationSinceStartNano int64 // when cache processing started (nanos since trace start) + cacheTraceDurationNano int64 // total cache processing time (nanos) + cacheTraceEntityCount int // total entities involved in this fetch + cacheTraceL2GetAttempted bool + cacheTraceL2SetAttempted bool // Regular entries Set + cacheTraceL2SetNegAttempted bool // Negative entries Set + cacheTraceL2GetDuration time.Duration + cacheTraceL2SetDuration time.Duration // Regular entries Set + cacheTraceL2SetNegDuration time.Duration // Negative entries Set + cacheTraceL2GetError string + cacheTraceL2SetError string + cacheTraceL2SetNegError string + cacheTraceL1Hits int + cacheTraceL1Misses int + cacheTraceRequestScopedHits int // entities satisfied by @requestScoped coordinate L1 + cacheTraceL2Hits int + cacheTraceL2Misses int + cacheTraceNegativeHits int + cacheTraceShadowHit bool // L2 had data but shadow mode forced fetch + cacheTraceEntityDetails []CacheTraceEntity + + // shadowCachedValues stores cached L2 values when shadow mode is active. + // After fresh data arrives, these are compared to detect staleness. + // Key is the index into l1CacheKeys (entity fetches) or l2CacheKeys (root fetches). + shadowCachedValues map[int]shadowCacheEntry +} + +// shadowCacheEntry holds a cached value saved during shadow mode L2 lookup. +type shadowCacheEntry struct { + cachedValue *astjson.Value // saved from L2 cache hit + cacheKey string // for correlation + remainingTTL time.Duration // remaining TTL from L2 CacheEntry (0 = unknown) } func (r *result) init(postProcessing PostProcessingConfiguration, info *FetchInfo) { @@ -142,9 +231,25 @@ func (r *result) init(postProcessing PostProcessingConfiguration, info *FetchInf ID: info.DataSourceID, Name: info.DataSourceName, } + r.fetchInfo = info } } +func (l *Loader) createOrInitResult(res *result, postProcessing PostProcessingConfiguration, info *FetchInfo) *result { + if res == nil { + res = &result{} + } + res.postProcessing = postProcessing + if info != nil { + res.ds = DataSourceInfo{ + ID: info.DataSourceID, + Name: info.DataSourceName, + } + res.fetchInfo = info + } + return res +} + func IsIntrospectionDataSource(dataSourceID string) bool { return dataSourceID == IntrospectionSchemaTypeDataSourceID || dataSourceID == IntrospectionTypeFieldsDataSourceID || dataSourceID == IntrospectionTypeEnumValuesDataSourceID } @@ -154,6 +259,12 @@ type Loader struct { ctx *Context info *GraphQLResponseInfo + caches map[string]LoaderCache + + // entityCacheConfigs maps subgraphName → entityTypeName → config. + // Used by processExtensionsCacheInvalidation to look up cache settings at runtime. + entityCacheConfigs map[string]map[string]*EntityCacheInvalidationConfig + propagateSubgraphErrors bool propagateSubgraphStatusCodes bool subgraphErrorPropagationMode SubgraphErrorPropagationMode @@ -187,9 +298,94 @@ type Loader struct { // a heap *Value could be collected while still referenced. jsonArena arena.Arena + // parser is a re-usable astjson.Parser owned by this Loader and used ONLY + // from the main thread. Its scratch slabs (counts, containerSizes, counters, + // tokenLens) grow to the high-water mark of any JSON the Loader has parsed + // and are retained across calls. Same lifetime rule as jsonArena: never touch + // from a goroutine. + parser astjson.Parser + + // transformEntries is a reusable backing slice for building + // astjson.Transform descriptors. Resliced to [:0] before each use. + // Since transforms are built and consumed synchronously on the main + // thread (never stored), a single slice suffices. + transformEntries []astjson.TransformEntry + + // transforms is a reusable backing slice for astjson.Transform + // headers (child/array transforms). Same lifecycle as transformEntries. + transforms []astjson.Transform + + // transformMetas is a reusable backing slice for per-field staging data + // (fieldMeta) used while building a Transform tree. Pre-grown in + // resetTransformSlabs to avoid per-call heap allocations. Same lifecycle + // as transformEntries / transforms. + transformMetas []fieldMeta + // singleFlight is the SubgraphRequestSingleFlight object shared across all client requests. // It's thread safe and can be used to de-duplicate subgraph requests. singleFlight *SubgraphRequestSingleFlight + + // l1Cache is the per-request entity cache (L1). + // Key: cache key string (WITHOUT subgraph header prefix) + // Value: *astjson.Value pointing into l.jsonArena (StructuralCopy on both read and write). + // Only used for entity fetches, NOT root fetches (root fields have no prior entity data). + // + // MAIN-THREAD ONLY: plain map, NOT sync.Map. Every read and write happens on the + // resolver main thread: + // - reads: tryL1CacheLoad in resolveParallel Phase 1 / resolveSingle's tryCacheLoad + // - writes: populateL1Cache / populateL1CacheForRootFieldEntities, called from + // populateCachesAfterFetch via mergeResult (Phase 4 of resolveParallel + // and in resolveSingle after a successful subgraph fetch) + // Phase 2 HTTP goroutines never touch this map — bulkL2Lookup moved the L2 read + // to the main thread, and merge/cache-population run sequentially in Phase 4. + // If you add a new access site, it must also be on the main thread. + // + // IMPORTANT: L1 writes always StructuralCopy onto l.jsonArena (with normalize + // passthrough for alias/arg normalization). Reads also StructuralCopy + // to give the consumer a fresh, mutable value owned by the current request arena. + l1Cache map[string]*astjson.Value + + // requestScopedL1 is a per-request cache for @requestScoped field values. + // Key: coordinate string (e.g. "viewer.Personalized.currentViewer") + // Value: *astjson.Value pointer to the cached field value in jsonArena. + // Separate from l1Cache which is keyed by entity cache keys. + // + // MAIN-THREAD ONLY: same lifetime and threading rules as l1Cache. Reads happen in + // tryRequestScopedInjection (Phase 1.5 / Phase 3.5 / resolveSingle), writes in + // exportRequestScopedFields (invoked from the main thread after merge). + // The same arena-lifetime rule applies here: only detached values owned by + // l.jsonArena may be stored. + requestScopedL1 map[string]*astjson.Value + + // enableMutationL2CachePopulation is set per-mutation-field in resolveSingle + // when processing a root mutation fetch. Entity fetches that follow in the + // sequence inherit this flag, checked in updateL2Cache. + // By default false: mutations do NOT populate L2 cache. + // + // Inheritance is opaque from this declaration: the flag is assigned at the + // SingleFetch branch in (*Loader).resolveSingle (loader.go, case + // *SingleFetch with OperationType == Mutation) and consumed at + // (*Loader).updateL2Cache in loader_cache.go. The mutation root sets it; + // subsequent entity fetches in the same sequence observe it until the + // next mutation root reassigns or Loader.Free() zeroes it. + enableMutationL2CachePopulation bool + // mutationCacheTTLOverride overrides the entity TTL for mutation-triggered L2 writes. + // Set per-mutation-field alongside enableMutationL2CachePopulation. + // When zero, the entity's default TTL is used. + mutationCacheTTLOverride time.Duration + + // Parallel Phase 4 defers L2 Sets so all writes for the same cache instance + // can be sent in one bulk call after every fetch has merged. + deferL2CacheWrites bool + deferredL2CacheSets []*l2CacheSetContributor +} + +// cacheOperationSource returns the CacheOperationSource based on the current operation type. +func (l *Loader) cacheOperationSource() CacheOperationSource { + if l.info != nil && l.info.OperationType == ast.OperationTypeMutation { + return CacheSourceMutation + } + return CacheSourceQuery } func (l *Loader) Free() { @@ -197,13 +393,29 @@ func (l *Loader) Free() { l.ctx = nil l.resolvable = nil l.taintedObjs = nil + l.l1Cache = nil + l.requestScopedL1 = nil + l.jsonArena = nil + l.enableMutationL2CachePopulation = false + l.mutationCacheTTLOverride = 0 + l.deferL2CacheWrites = false + l.deferredL2CacheSets = nil + // l.parser is intentionally retained — it holds no arena references and its + // scratch slabs amortize across requests. } func (l *Loader) LoadGraphQLResponseData(ctx *Context, response *GraphQLResponse, resolvable *Resolvable) (err error) { + l.enableMutationL2CachePopulation = false + l.mutationCacheTTLOverride = 0 l.resolvable = resolvable l.ctx = ctx l.info = response.Info l.taintedObjs = make(taintedObjects) + l.l1Cache = make(map[string]*astjson.Value) + l.requestScopedL1 = make(map[string]*astjson.Value) + l.deferL2CacheWrites = false + l.deferredL2CacheSets = l.deferredL2CacheSets[:0] + ctx.initCacheAnalytics() return l.resolveFetchNode(response.Fetches) } @@ -235,29 +447,212 @@ func (l *Loader) resolveParallel(nodes []*FetchTreeNode) error { } }() itemsItems := make([][]*astjson.Value, len(nodes)) - g, ctx := errgroup.WithContext(l.ctx.ctx) + + // Phase 1: Prepare cache keys + L1 check on MAIN thread for ALL nodes + // L1 stats use non-atomic operations, so they MUST be on the main thread for i := range nodes { - i := i - results[i] = &result{} + results[i] = l.createOrInitResult(nil, getFetchPostProcessing(nodes[i].Item.Fetch), getFetchInfo(nodes[i].Item.Fetch)) itemsItems[i] = l.selectItemsForPath(nodes[i].Item.FetchPath) + f := nodes[i].Item.Fetch + info := getFetchInfo(f) + cfg := getFetchCaching(f) + + // Record cache trace start time + tracingCache := l.ctx.TracingOptions.Enable && !l.ctx.TracingOptions.ExcludeCacheStats + if tracingCache { + results[i].cacheTraceDurationSinceStartNano = GetDurationNanoSinceTraceStart(l.ctx.ctx) + } + + // Set partial loading flag BEFORE cache lookup so tracking arrays are populated + // Shadow mode forces partial loading off - all items always fetched + if cfg.ShadowMode { + results[i].partialCacheEnabled = false + } else { + results[i].partialCacheEnabled = cfg.EnablePartialCacheLoad + } + + // Prepare cache keys for L1 and L2 + isEntityFetch, err := l.prepareCacheKeys(info, cfg, itemsItems[i], results[i]) + if err != nil { + return errors.WithStack(err) + } + + // Set entity count from cache keys + if len(results[i].l2CacheKeys) > 0 { + for _, ck := range results[i].l2CacheKeys { + results[i].cacheTraceEntityCount += len(ck.Keys) + } + } else if len(results[i].l1CacheKeys) > 0 { + for _, ck := range results[i].l1CacheKeys { + results[i].cacheTraceEntityCount += len(ck.Keys) + } + } + + // L1 Check (main thread only - not thread-safe) + // UseL1Cache flag is set by postprocessor to optimize L1 usage + if isEntityFetch && l.ctx.ExecutionOptions.Caching.EnableL1Cache && cfg.UseL1Cache && len(results[i].l1CacheKeys) > 0 { + allComplete := l.tryL1CacheLoad(info, results[i].l1CacheKeys, results[i]) + if allComplete { + // All entities found in L1 - mark to skip goroutine + results[i].cacheSkipFetch = true + } else if results[i].partialCacheEnabled && len(results[i].cachedItemIndices) > 0 { + // Partial hit with partial loading enabled - keep FromCache values + // Continue to L2/fetch for remaining items + } else { + // All-or-nothing mode OR no hits - clear FromCache for L2 to try + for _, ck := range results[i].l1CacheKeys { + ck.FromCache = nil + } + results[i].cachedItemIndices = nil + results[i].fetchItemIndices = nil + } + } + + } + + // Phase 1.5: @requestScoped coordinate L1 injection (main thread, before fetches). + // + // Iterating synchronously here and checking tryRequestScopedInjection BEFORE launching + // Phase 2 goroutines lets us skip the entire subgraph round-trip when the per-request + // L1 already holds the hinted value (populated by an earlier fetch in the same plan). + // + // Without this step, each parallel batch entity fetch would still launch an HTTP call + // even when the data is already in requestScopedL1 — the post-fetch phase below would + // then mark the fetch as LoadSkipped, but the round-trip (and its artificial latency in + // demos) has already been paid. For a query with @requestScoped currentViewer selected + // at multiple nesting depths, that means N viewer fetches where 1 would suffice. + // + // Safety: injection mutates the fetch's own `items` slice. Each node in `nodes` has its + // own disjoint `items` (different entities in the response tree), so running this on the + // main thread in a loop is free of cross-node races. The post-fetch Phase 3.5 loop is + // kept as a fallback for hints that become satisfiable later (e.g., a hint depending on + // data an in-flight goroutine is still producing). + for i := range nodes { + res := results[i] + if res.cacheSkipFetch || res.fetchSkipped { + continue + } + cfg := getFetchCaching(nodes[i].Item.Fetch) + if l.tryRequestScopedInjection(res, cfg, itemsItems[i]) { + res.fetchSkipped = true + res.cacheTraceRequestScopedHits = res.cacheTraceEntityCount + if l.ctx.TracingOptions.Enable { + ensureFetchTrace(nodes[i].Item.Fetch).LoadSkipped = true + } + } + } + + // Phase 2L2: Bulk L2 lookup on the main thread. + // Replaces the per-fetch L2 read that previously happened inside Phase 2 goroutines. + // All L2 parsing happens here on l.jsonArena via l.parser. After this call: + // - res.cacheSkipFetch is set for fetches whose L2 hits cover all entities + // - res.l2CacheKeys[].FromCache is populated for partial hits + // - res.l2AnalyticsEvents / l2FetchTimings have been accumulated + // - attachCachedOutputToTrace has been called for each cache-skip fetch + // Goroutines launched in Phase 2 below run HTTP fetches only. + if l.ctx.ExecutionOptions.Caching.EnableL2Cache { + if err := l.bulkL2Lookup(l.ctx.ctx, nodes, results); err != nil { + return errors.WithStack(err) + } + } + + // Snapshot cacheTraceDurationNano for each parallel-path result at the end of + // main-thread cache work (Phase 1 + Phase 1.5 + Phase 2L2). After this point we + // run Phase 2 HTTP goroutines and Phase 3.5/4 merge — those are NOT cache work, + // so including them in the cache duration (as the lazy fallback in buildCacheTrace + // used to do) reported the entire batch wall-clock under the L1/requestScoped + // "duration_nanoseconds" field. The playground then displayed multi-millisecond + // "L1 hit" timings, even though the actual L1/coordinate-cache lookups on the main + // thread take a handful of microseconds. Capturing now gives every result an + // accurate cache-work duration regardless of how slow any sibling HTTP fetch is. + if l.ctx.TracingOptions.Enable && !l.ctx.TracingOptions.ExcludeCacheStats { + nowNs := GetDurationNanoSinceTraceStart(l.ctx.ctx) + for i := range results { + if results[i].cacheTraceDurationSinceStartNano > 0 && results[i].cacheTraceDurationNano == 0 { + results[i].cacheTraceDurationNano = nowNs - results[i].cacheTraceDurationSinceStartNano + } + } + } + + // Phase 2: Parallel HTTP fetches for nodes that didn't fully hit L1 or L2. + g, ctx := errgroup.WithContext(l.ctx.ctx) + for i := range nodes { f := nodes[i].Item.Fetch item := nodes[i].Item items := itemsItems[i] res := results[i] + + // Skip goroutine if L1 was a complete hit, or if Phase 1.5 already + // satisfied this fetch from the @requestScoped coordinate L1, or if + // bulkL2Lookup already satisfied the fetch. + if res.cacheSkipFetch || res.fetchSkipped { + continue + } + + // Goroutine thread-safety contract: the spawned goroutine does HTTP + // only — it must never allocate on l.jsonArena, parse JSON, or touch + // l.parser, l1Cache, or requestScopedL1. The arena is not + // thread-safe and has no goroutine-arena pool anymore; the raw + // subgraph []byte is stashed on *result and parsed on the main + // thread in Phase 4 via mergeResult → parseBytesWithArena. + // See v2/pkg/engine/resolve/CLAUDE.md §"Thread Safety Model". g.Go(func() error { - return l.loadFetch(ctx, f, item, items, res) + return l.loadFetchHTTP(ctx, f, item, items, res) }) } err := g.Wait() if err != nil { return errors.WithStack(err) } + + // Phase 3: Merge L2 analytics events and entity sources from goroutines (main thread) + if l.ctx.cacheAnalyticsEnabled() { + for i := range results { + if len(results[i].l2AnalyticsEvents) > 0 { + l.ctx.cacheAnalytics.MergeL2Events(results[i].l2AnalyticsEvents) + } + if len(results[i].l2EntitySources) > 0 { + l.ctx.cacheAnalytics.MergeEntitySources(results[i].l2EntitySources) + } + if len(results[i].l2FetchTimings) > 0 { + l.ctx.cacheAnalytics.MergeL2FetchTimings(results[i].l2FetchTimings) + } + if len(results[i].l2ErrorEvents) > 0 { + l.ctx.cacheAnalytics.MergeL2Errors(results[i].l2ErrorEvents) + } + if len(results[i].l2CacheOpErrors) > 0 { + l.ctx.cacheAnalytics.MergeL2CacheOpErrors(results[i].l2CacheOpErrors) + } + } + } + + // Phase 3.5: RequestScoped injection (main thread, after all fetches complete) + // This runs between fetch completion and merge so injected data doesn't interfere + // with other parallel fetches that are still populating entity items. + for i := range results { + if !results[i].cacheSkipFetch && !results[i].fetchSkipped { + cfg := getFetchCaching(nodes[i].Item.Fetch) + if l.tryRequestScopedInjection(results[i], cfg, itemsItems[i]) { + results[i].fetchSkipped = true + results[i].cacheTraceRequestScopedHits = results[i].cacheTraceEntityCount + if l.ctx.TracingOptions.Enable { + ensureFetchTrace(nodes[i].Item.Fetch).LoadSkipped = true + } + } + } + } + + // Phase 4: Merge results (main thread) + l.deferL2CacheWrites = true + l.deferredL2CacheSets = l.deferredL2CacheSets[:0] for i := range results { if results[i].nestedMergeItems != nil { for j := range results[i].nestedMergeItems { err = l.mergeResult(nodes[i].Item, results[i].nestedMergeItems[j], itemsItems[i][j:j+1]) l.callOnFinished(results[i].nestedMergeItems[j]) if err != nil { + l.deferL2CacheWrites = false + l.deferredL2CacheSets = nil return errors.WithStack(err) } } @@ -265,9 +660,20 @@ func (l *Loader) resolveParallel(nodes []*FetchTreeNode) error { err = l.mergeResult(nodes[i].Item, results[i], itemsItems[i]) l.callOnFinished(results[i]) if err != nil { + l.deferL2CacheWrites = false + l.deferredL2CacheSets = nil return errors.WithStack(err) } } + // Export requestScoped fields after merge (main thread) + l.exportRequestScopedFields(results[i], getFetchCaching(nodes[i].Item.Fetch), itemsItems[i]) + } + deferredL2CacheSets := l.deferredL2CacheSets + l.deferL2CacheWrites = false + l.deferredL2CacheSets = nil + l.writeL2CacheSetContributors(deferredL2CacheSets) + for i := range results { + l.attachCacheTrace(nodes[i].Item.Fetch, results[i], getFetchCaching(nodes[i].Item.Fetch)) } return nil } @@ -290,44 +696,365 @@ func (l *Loader) resolveSingle(item *FetchItem) error { switch f := item.Fetch.(type) { case *SingleFetch: - res := &result{} - err := l.loadSingleFetch(l.ctx.ctx, f, item, items, res) + // Propagate mutation field cache config to loader for child entity fetches. + // Each mutation root fetch updates this flag; subsequent entity fetches inherit it. + // This is the inheritance site for Loader.enableMutationL2CachePopulation + // (field declared in the Loader struct above); consumed in updateL2Cache. + if f.Info != nil && f.Info.OperationType == ast.OperationTypeMutation { + l.enableMutationL2CachePopulation = f.Caching.EnableMutationL2CachePopulation + l.mutationCacheTTLOverride = f.Caching.MutationCacheTTLOverride + } + // Empty list / null key short-circuit for batch entity key lookups. + // When ArgumentIsEntityKey is true and the argument is [] or null, + // return an empty response without calling the resolver or cache. + // This is a fetch-level optimization, not a caching feature. + if argPath := f.Caching.batchEntityKeyArgumentPath(); len(argPath) > 0 { + argValue := resolveArgumentValue(l.ctx, argPath) + if argValue == nil || argValue.Type() == astjson.TypeNull { + return l.mergeBatchEmptyResponse(item, f, items) + } + if argValue.Type() == astjson.TypeArray && len(argValue.GetArray()) == 0 { + return l.mergeBatchEmptyResponse(item, f, items) + } + } + res := l.createOrInitResult(nil, f.PostProcessing, f.Info) + skip, err := l.tryCacheLoad(l.ctx.ctx, f.Info, f.Caching, items, res) if err != nil { - return err + return errors.WithStack(err) + } + if !skip { + if l.tryRequestScopedInjection(res, f.Caching, items) { + res.cacheTraceRequestScopedHits = res.cacheTraceEntityCount + if l.ctx.TracingOptions.Enable { + ensureFetchTrace(f).LoadSkipped = true + } + l.exportRequestScopedFields(res, f.Caching, items) + l.attachCacheTrace(f, res, f.Caching) + return nil + } } + if !skip { + // Batch partial fetch filtering is handled inside loadSingleFetch + err = l.loadSingleFetch(l.ctx.ctx, f, item, items, res) + if err != nil { + return err + } + } else if l.ctx.TracingOptions.Enable { + // Cache hit covered everything — the subgraph was not called. Mirror the + // LoadSkipped reporting that the bulk-parallel paths (resolveParallel) and + // the @requestScoped injection branch above already do, so observability + // can distinguish "served from cache" from "fetched fresh". + ensureFetchTrace(f).LoadSkipped = true + } + l.mergeResultAnalytics(res) err = l.mergeResult(item, res, items) + l.exportRequestScopedFields(res, f.Caching, items) l.callOnFinished(res) + l.attachCacheTrace(f, res, f.Caching) return err case *BatchEntityFetch: - res := &result{} + res := l.createOrInitResult(nil, f.PostProcessing, f.Info) defer batchEntityToolPool.Put(res.tools) - err := l.loadBatchEntityFetch(l.ctx.ctx, item, f, items, res) + skip, err := l.tryCacheLoad(l.ctx.ctx, f.Info, f.Caching, items, res) if err != nil { return errors.WithStack(err) } + if !skip { + if l.tryRequestScopedInjection(res, f.Caching, items) { + // Data was injected directly onto items — skip fetch AND merge + res.cacheTraceRequestScopedHits = res.cacheTraceEntityCount + if l.ctx.TracingOptions.Enable { + ensureFetchTrace(f).LoadSkipped = true + } + l.attachCachedOutputToTrace(f, res) + l.exportRequestScopedFields(res, f.Caching, items) + l.attachCacheTrace(f, res, f.Caching) + return nil + } + } + if !skip { + err = l.loadBatchEntityFetch(l.ctx.ctx, item, f, items, res) + if err != nil { + return errors.WithStack(err) + } + } else { + l.attachCachedOutputToTrace(f, res) + if l.ctx.TracingOptions.Enable { + // Cache hit covered every entity in the batch — record LoadSkipped. + ensureFetchTrace(f).LoadSkipped = true + } + } + l.mergeResultAnalytics(res) err = l.mergeResult(item, res, items) + l.exportRequestScopedFields(res, f.Caching, items) l.callOnFinished(res) + l.attachCacheTrace(f, res, f.Caching) return err case *EntityFetch: - res := &result{} - err := l.loadEntityFetch(l.ctx.ctx, item, f, items, res) + res := l.createOrInitResult(nil, f.PostProcessing, f.Info) + skip, err := l.tryCacheLoad(l.ctx.ctx, f.Info, f.Caching, items, res) if err != nil { return errors.WithStack(err) } + if !skip { + if l.tryRequestScopedInjection(res, f.Caching, items) { + res.cacheTraceRequestScopedHits = res.cacheTraceEntityCount + if l.ctx.TracingOptions.Enable { + ensureFetchTrace(f).LoadSkipped = true + } + l.attachCachedOutputToTrace(f, res) + l.exportRequestScopedFields(res, f.Caching, items) + l.attachCacheTrace(f, res, f.Caching) + return nil + } + } + if !skip { + err = l.loadEntityFetch(l.ctx.ctx, item, f, items, res) + if err != nil { + return errors.WithStack(err) + } + } else { + l.attachCachedOutputToTrace(f, res) + if l.ctx.TracingOptions.Enable { + // Cache hit covered the entity — record LoadSkipped. + ensureFetchTrace(f).LoadSkipped = true + } + } + l.mergeResultAnalytics(res) err = l.mergeResult(item, res, items) + l.exportRequestScopedFields(res, f.Caching, items) l.callOnFinished(res) + l.attachCacheTrace(f, res, f.Caching) return err default: return nil } } +// mergeResultAnalytics merges analytics events accumulated on a result into the collector. +// In resolveParallel, this happens in bulk after all goroutines complete. +// In resolveSingle, we must call this per-result since there's no bulk merge phase. +func (l *Loader) mergeResultAnalytics(res *result) { + if !l.ctx.cacheAnalyticsEnabled() { + return + } + if len(res.l2FetchTimings) > 0 { + l.ctx.cacheAnalytics.MergeL2FetchTimings(res.l2FetchTimings) + } + if len(res.l2ErrorEvents) > 0 { + l.ctx.cacheAnalytics.MergeL2Errors(res.l2ErrorEvents) + } + if len(res.l2CacheOpErrors) > 0 { + l.ctx.cacheAnalytics.MergeL2CacheOpErrors(res.l2CacheOpErrors) + } +} + func (l *Loader) callOnFinished(res *result) { if l.ctx.LoaderHooks != nil && res.loaderHookContext != nil { l.ctx.LoaderHooks.OnFinished(res.loaderHookContext, res.ds, newResponseInfo(res, l.ctx.subgraphErrors)) } } +// buildCacheTrace constructs a CacheTrace from the result's accumulated cache trace fields. +// MUST be called AFTER mergeResult + populateCachesAfterFetch, when final state is known. +func (l *Loader) buildCacheTrace(res *result, cfg FetchCacheConfiguration) *CacheTrace { + if !l.ctx.TracingOptions.Enable || l.ctx.TracingOptions.ExcludeCacheStats { + return nil + } + if cfg.CacheKeyTemplate == nil { + return nil + } + + // Cache duration is captured inline: + // - Sequential path: tryCacheLoad's defer (loader_cache.go ~689) records start→end + // of all cache work for this fetch (prepareCacheKeys + L1 + L2). + // - Parallel path: resolveParallel snapshots duration after Phase 2L2 completes, + // i.e. at the end of main-thread cache work, BEFORE Phase 2 HTTP goroutines run. + // Both paths set res.cacheTraceDurationNano at a point where all cache work for the + // fetch is done, so there is no fallback-compute here. An earlier version of this + // function computed a wall-clock delta at attachCacheTrace time, which for parallel + // batches reported the slowest sibling's HTTP fetch as the "L1 hit duration". + + // Fold @requestScoped coordinate L1 hits into the L1 counters. + // Entities satisfied by requestScoped injection were recorded as L1 misses + // during Phase 1 (entity L1 check). Now that requestScoped has run, convert + // those misses to hits so the trace accurately reflects L1-level cache efficiency. + l1Hits := res.cacheTraceL1Hits + res.cacheTraceRequestScopedHits + l1Misses := max(res.cacheTraceL1Misses-res.cacheTraceRequestScopedHits, 0) + + ct := &CacheTrace{ + DurationSinceStartNano: res.cacheTraceDurationSinceStartNano, + DurationSinceStartPretty: time.Duration(res.cacheTraceDurationSinceStartNano).String(), + DurationNano: res.cacheTraceDurationNano, + DurationPretty: time.Duration(res.cacheTraceDurationNano).String(), + L1Enabled: cfg.UseL1Cache && l.ctx.ExecutionOptions.Caching.EnableL1Cache, + L2Enabled: cfg.Enabled && l.ctx.ExecutionOptions.Caching.EnableL2Cache && res.cache != nil, + CacheName: cfg.CacheName, + TTLSeconds: int64(cfg.TTL.Seconds()), + EntityCount: res.cacheTraceEntityCount, + L1Hit: l1Hits, + L1Miss: l1Misses, + L2Hit: res.cacheTraceL2Hits, + L2Miss: res.cacheTraceL2Misses, + NegativeCacheHits: res.cacheTraceNegativeHits, + PartialCacheLoad: cfg.EnablePartialCacheLoad, + ShadowMode: cfg.ShadowMode, + ShadowHit: res.cacheTraceShadowHit, + IncludeSubgraphHeaderPrefix: cfg.IncludeSubgraphHeaderPrefix, + } + + if res.cacheTraceL2GetDuration > 0 { + ct.L2GetDurationNano = res.cacheTraceL2GetDuration.Nanoseconds() + ct.L2GetDurationPretty = res.cacheTraceL2GetDuration.String() + } + if res.cacheTraceL2SetDuration > 0 { + ct.L2SetDurationNano = res.cacheTraceL2SetDuration.Nanoseconds() + ct.L2SetDurationPretty = res.cacheTraceL2SetDuration.String() + } + if res.cacheTraceL2SetNegDuration > 0 { + ct.L2SetNegativeDurationNano = res.cacheTraceL2SetNegDuration.Nanoseconds() + ct.L2SetNegativeDurationPretty = res.cacheTraceL2SetNegDuration.String() + } + + ct.L2GetError = res.cacheTraceL2GetError + ct.L2SetError = res.cacheTraceL2SetError + ct.L2SetNegativeError = res.cacheTraceL2SetNegError + + if len(res.cacheTraceEntityDetails) > 0 { + ct.Entities = res.cacheTraceEntityDetails + } + + if !l.ctx.TracingOptions.ExcludeRawInputData { + keys := res.l2CacheKeys + if len(keys) == 0 { + keys = res.l1CacheKeys + } + for _, ck := range keys { + ct.Keys = append(ct.Keys, ck.Keys...) + } + } + + if l.ctx.TracingOptions.EnablePredictableDebugTimings { + ct.DurationSinceStartNano = 1 + ct.DurationSinceStartPretty = "1ns" + ct.DurationNano = 1 + ct.DurationPretty = "1ns" + if res.cacheTraceL2GetAttempted { + ct.L2GetDurationNano = 1 + ct.L2GetDurationPretty = "1ns" + } + if res.cacheTraceL2SetAttempted { + ct.L2SetDurationNano = 1 + ct.L2SetDurationPretty = "1ns" + } + if res.cacheTraceL2SetNegAttempted { + ct.L2SetNegativeDurationNano = 1 + ct.L2SetNegativeDurationPretty = "1ns" + } + } + + return ct +} + +// ensureFetchTrace ensures the fetch has a Trace allocated. +// Required for cache-hit paths where load*Fetch is skipped. +func ensureFetchTrace(fetch Fetch) *DataSourceLoadTrace { + switch f := fetch.(type) { + case *SingleFetch: + if f.Trace == nil { + f.Trace = &DataSourceLoadTrace{} + } + return f.Trace + case *EntityFetch: + if f.Trace == nil { + f.Trace = &DataSourceLoadTrace{} + } + return f.Trace + case *BatchEntityFetch: + if f.Trace == nil { + f.Trace = &DataSourceLoadTrace{} + } + return f.Trace + } + return nil +} + +// attachCacheTrace builds and attaches CacheTrace to the fetch's trace. +// MUST be called AFTER mergeResult + populateCachesAfterFetch. +// Zero overhead when tracing is disabled or ExcludeCacheStats is true. +// attachCachedOutputToTrace serializes the cached entity values into a +// synthetic _entities response and stores it as trace output. This makes +// cache-hit fetch responses visible in ART traces so dev tools (like the +// playground's cache explorer) can show per-fetch response diffs between +// cached and uncached runs. Only called when tracing is enabled. +func (l *Loader) attachCachedOutputToTrace(fetch Fetch, res *result) { + if !l.ctx.TracingOptions.Enable || l.ctx.TracingOptions.ExcludeOutput { + return + } + trace := ensureFetchTrace(fetch) + if trace == nil { + return + } + // Build a synthetic {"data":{"_entities":[...]}} from cached values. + // After tryL2CacheLoad, cached values are copied to l1CacheKeys[].FromCache + // (line ~1094 in loader_cache.go). Check both arrays in case only one is + // populated. Also check l2CacheKeys directly for the L2-only path. + var entities [][]byte + for _, ck := range res.l1CacheKeys { + if ck.FromCache != nil { + entities = append(entities, ck.FromCache.MarshalTo(nil)) + } + } + if len(entities) == 0 { + for _, ck := range res.l2CacheKeys { + if ck.FromCache != nil { + entities = append(entities, ck.FromCache.MarshalTo(nil)) + } + } + } + if len(entities) == 0 { + return + } + if len(entities) == 0 { + return + } + // Assemble: {"data":{"_entities":[,,...]}} + // Each `e` came from astjson.Value.MarshalTo(nil), which emits compact, dedup-key + // JSON directly. No whitespace to compact, no duplicate keys to dedupe. So we + // emit the buffer as-is — skipping compactJSON's json.Compact + ParseBytes + + // DeduplicateObjectKeysRecursively + MarshalTo round-trip, which dominated + // Phase 2L2 cost on ART-traced cache-hit requests (measured ~1–2ms per parallel + // batch on a 32-entity L2 hit chain, vs. ~400µs with this step skipped). + totalLen := len(`{"data":{"_entities":[]}}`) + for _, e := range entities { + totalLen += len(e) + 1 // entity + separator + } + buf := make([]byte, 0, totalLen) + buf = append(buf, `{"data":{"_entities":[`...) + for i, e := range entities { + if i > 0 { + buf = append(buf, ',') + } + buf = append(buf, e...) + } + buf = append(buf, ']', '}', '}') + trace.Output = buf +} + +func (l *Loader) attachCacheTrace(fetch Fetch, res *result, cfg FetchCacheConfiguration) { + if !l.ctx.TracingOptions.Enable || l.ctx.TracingOptions.ExcludeCacheStats { + return + } + ct := l.buildCacheTrace(res, cfg) + if ct == nil { + return + } + trace := ensureFetchTrace(fetch) + if trace != nil { + trace.CacheTrace = ct + } +} + func (l *Loader) selectItemsForPath(path []FetchItemPathElement) []*astjson.Value { // Use arena allocation for the initial items slice items := arena.AllocateSlice[*astjson.Value](l.jsonArena, 1, 1) @@ -416,13 +1143,25 @@ func (l *Loader) itemsData(items []*astjson.Value) *astjson.Value { return arr } -func (l *Loader) loadFetch(ctx context.Context, fetch Fetch, fetchItem *FetchItem, items []*astjson.Value, res *result) error { +// loadFetchHTTP loads data assuming L1/L2 cache checks have already happened +// on the main thread. This function runs inside a goroutine and only performs +// HTTP I/O via the underlying DataSource. Response parsing happens later in +// mergeResult on the main thread. +func (l *Loader) loadFetchHTTP(ctx context.Context, fetch Fetch, fetchItem *FetchItem, items []*astjson.Value, res *result) error { + // If L1/L2 was a complete hit, skip everything. + if res.cacheSkipFetch { + return nil + } + // Perform actual fetch switch f := fetch.(type) { case *SingleFetch: + res = l.createOrInitResult(res, f.PostProcessing, f.Info) return l.loadSingleFetch(ctx, f, fetchItem, items, res) case *EntityFetch: + res = l.createOrInitResult(res, f.PostProcessing, f.Info) return l.loadEntityFetch(ctx, fetchItem, f, items, res) case *BatchEntityFetch: + res = l.createOrInitResult(res, f.PostProcessing, f.Info) return l.loadBatchEntityFetch(ctx, fetchItem, f, items, res) } return nil @@ -450,38 +1189,353 @@ func (e ErrMergeResult) Error() string { return fmt.Sprintf("unable to merge results from subgraph %s", e.Subgraph) } -func (l *Loader) mergeResult(fetchItem *FetchItem, res *result, items []*astjson.Value) error { - if res.err != nil { - return l.renderErrorsFailedToFetch(fetchItem, res, failedToFetchNoReason) +// mergeBatchCacheHit assembles cached entities into a JSON array and merges into items. +// Called when cacheSkipFetch=true and batchEntityKeyMode=true (all batch keys hit cache). +// The cache keys are in l2CacheKeys, each with FromCache pointing to entity-level data +// already wrapped at EntityMergePath during tryL2CacheLoad. +func (l *Loader) mergeBatchCacheHit(fetchItem *FetchItem, res *result, items []*astjson.Value) error { + // Determine the maximum batch index to size the result array + cacheKeys := res.l2CacheKeys + if len(cacheKeys) == 0 { + cacheKeys = res.l1CacheKeys } - if res.authorizationRejected { - err := l.renderAuthorizationRejectedErrors(fetchItem, res) - if err != nil { - return err + maxIndex := -1 + for _, ck := range cacheKeys { + if ck.BatchIndex > maxIndex { + maxIndex = ck.BatchIndex } - trueValue := astjson.TrueValue(l.jsonArena) - skipErrorsPath := make([]string, len(res.postProcessing.MergePath)+1) - copy(skipErrorsPath, res.postProcessing.MergePath) - skipErrorsPath[len(skipErrorsPath)-1] = "__skipErrors" - for _, item := range items { - astjson.SetValue(l.jsonArena, item, trueValue, skipErrorsPath...) + } + if maxIndex < 0 { + responseData := astjson.ObjectValue(l.jsonArena) + fieldName := "" + if res.fetchInfo != nil && len(res.fetchInfo.RootFields) > 0 { + fieldName = res.fetchInfo.RootFields[0].FieldName + } + if fieldName != "" { + // Preserve the subgraph response shape for an empty batch, e.g. {"products":[]}. + responseData.Set(l.jsonArena, fieldName, astjson.ArrayValue(l.jsonArena)) + } + if len(items) == 0 { + // Root-level merge: replace the response data directly. + l.resolvable.data = responseData + return nil + } + if len(items) == 1 { + var err error + // Nested merge: attach the empty shaped response at the configured batch merge path. + items[0], err = astjson.MergeValuesWithPath(l.jsonArena, items[0], responseData, res.batchMergePath...) + if err != nil { + return l.renderErrorsFailedToFetch(fetchItem, res, "batch cache merge failed") + } } return nil } - if res.rateLimitRejected { - err := l.renderRateLimitRejectedErrors(fetchItem, res) + + // Build the entity array. EntityMergePath wrapping was done during L2 load, + // so FromCache has the entity at the merge path (e.g. {"products": {...entity...}}). + // We need to extract entity-level data for the array. + entityArray := astjson.ArrayValue(l.jsonArena) + for i := 0; i <= maxIndex; i++ { + entityArray.SetArrayItem(l.jsonArena, i, astjson.NullValue) + } + // Determine the entity extraction path from EntityMergePath (set by prepareCacheKeys). + // This is the path used to extract/wrap entity data in the cache (e.g., ["products"]). + var entityMergePath []string + if len(cacheKeys) > 0 { + entityMergePath = cacheKeys[0].EntityMergePath + } + + for _, ck := range cacheKeys { + if ck.FromCache == nil { + continue + } + // Extract entity from the EntityMergePath wrapper applied during L2 load + entity := ck.FromCache + if len(entityMergePath) > 0 { + if inner := ck.FromCache.Get(entityMergePath...); inner != nil { + entity = inner + } + } + // Cached entities may be backed by a per-goroutine arena. Detach them onto + // the loader's response arena before splicing them into the final response tree. + entityArray.SetArrayItem(l.jsonArena, ck.BatchIndex, l.parser.StructuralCopy(l.jsonArena, entity)) + } + + // Build a response object that mirrors the subgraph response shape: + // {"fieldName": [entity1, entity2, ...]} + // Then merge it at MergePath into items. + responseData := astjson.ObjectValue(l.jsonArena) + if len(entityMergePath) > 0 { + // Set the array under the entity merge path (e.g., {"products": [...]}) + current := responseData + for i := 0; i < len(entityMergePath)-1; i++ { + next := astjson.ObjectValue(l.jsonArena) + current.Set(l.jsonArena, entityMergePath[i], next) + current = next + } + current.Set(l.jsonArena, entityMergePath[len(entityMergePath)-1], entityArray) + } + + responseValue := responseData + if selectsBatchEntityArrayResult(res.postProcessing.SelectResponseDataPath) || + slices.Equal(res.batchMergePath, entityMergePath) { + responseValue = entityArray + } + + if len(items) == 0 { + l.resolvable.data = responseData + } else if len(items) == 1 { + var err error + items[0], err = astjson.MergeValuesWithPath(l.jsonArena, items[0], responseValue, res.batchMergePath...) if err != nil { - return err + return l.renderErrorsFailedToFetch(fetchItem, res, "batch cache merge failed") } - trueValue := astjson.TrueValue(l.jsonArena) - skipErrorsPath := make([]string, len(res.postProcessing.MergePath)+1) - copy(skipErrorsPath, res.postProcessing.MergePath) - skipErrorsPath[len(skipErrorsPath)-1] = "__skipErrors" - for _, item := range items { - astjson.SetValue(l.jsonArena, item, trueValue, skipErrorsPath...) + } + + if res.cacheMustBeUpdated { + l.updateL2Cache(res) + } + return nil +} + +// populateBatchCacheKeysFromResponse extracts individual entities from the response array +// and sets each batch CacheKey's Item for cache population. +// Called after mergeResult for batch entity key fetches. +func (l *Loader) populateBatchCacheKeysFromResponse(res *result, items []*astjson.Value, info *FetchInfo) { + if !res.batchEntityKeyMode || len(items) == 0 { + return + } + + // Navigate to the response array. For root fields, the response is merged into items[0] + // at the MergePath, then the actual array is under the root field name. + // E.g., for products(upcs: ...), items[0] = {"products": [entity1, entity2, ...]} + var arrayPath []string + arrayPath = append(arrayPath, res.batchMergePath...) + if info != nil && len(info.RootFields) > 0 { + arrayPath = append(arrayPath, info.RootFields[0].FieldName) + } + + responseArray := items[0].Get(arrayPath...) + if responseArray == nil || responseArray.Type() != astjson.TypeArray { + return + } + elements := responseArray.GetArray() + + // In partial fetch mode, skip setting Items for cached indices. + // This ensures cacheKeysToEntriesBatch only writes fresh entities. + var cachedSet map[int]struct{} + if res.batchPartialFetchEnabled && len(res.batchCachedIndices) > 0 { + cachedSet = make(map[int]struct{}, len(res.batchCachedIndices)) + for _, idx := range res.batchCachedIndices { + cachedSet[idx] = struct{}{} + } + } + + // Set each CacheKey's Item to the corresponding array element + for _, ck := range res.l2CacheKeys { + if ck.BatchIndex >= 0 && ck.BatchIndex < len(elements) { + if cachedSet != nil { + if _, isCached := cachedSet[ck.BatchIndex]; isCached { + continue // Skip: already cached, don't re-write + } + } + ck.Item = elements[ck.BatchIndex] + // Clear EntityMergePath — Item already points to entity-level data within the array + ck.EntityMergePath = nil + } + } + for _, ck := range res.l1CacheKeys { + if ck.BatchIndex >= 0 && ck.BatchIndex < len(elements) { + if cachedSet != nil { + if _, isCached := cachedSet[ck.BatchIndex]; isCached { + continue + } + } + ck.Item = elements[ck.BatchIndex] + ck.EntityMergePath = nil + } + } +} + +// mergeBatchPartialResponse interleaves cached entities with fresh subgraph results +// for partial batch fetch. The subgraph response only contains the missed entities, +// and this function rebuilds the full array in original input order. +func (l *Loader) mergeBatchPartialResponse(res *result, items []*astjson.Value, info *FetchInfo) { + if len(items) == 0 { + return + } + + // Navigate to the response array in the merged items + var arrayPath []string + arrayPath = append(arrayPath, res.batchMergePath...) + if info != nil && len(info.RootFields) > 0 { + arrayPath = append(arrayPath, info.RootFields[0].FieldName) + } + + freshArray := items[0].Get(arrayPath...) + if freshArray == nil || freshArray.Type() != astjson.TypeArray { + return + } + freshElements := freshArray.GetArray() + + // Determine total array size from all batch indices + allIndices := append(res.batchCachedIndices, res.batchMissedIndices...) + maxIndex := -1 + for _, idx := range allIndices { + if idx > maxIndex { + maxIndex = idx + } + } + if maxIndex < 0 { + return + } + + // Build sets for cached and missed indices + cachedSet := make(map[int]struct{}, len(res.batchCachedIndices)) + for _, idx := range res.batchCachedIndices { + cachedSet[idx] = struct{}{} + } + + // Build the complete array + completeArray := astjson.ArrayValue(l.jsonArena) + freshIdx := 0 + for i := 0; i <= maxIndex; i++ { + if _, isCached := cachedSet[i]; isCached { + // Find the cached entity from L2 cache keys + var entity *astjson.Value + for _, ck := range res.l2CacheKeys { + if ck.BatchIndex == i && ck.FromCache != nil { + entity = ck.FromCache + break + } + } + if entity != nil { + completeArray.SetArrayItem(l.jsonArena, i, l.parser.StructuralCopy(l.jsonArena, entity)) + } else { + completeArray.SetArrayItem(l.jsonArena, i, astjson.NullValue) + } + } else { + // Fresh entity from subgraph response + if freshIdx < len(freshElements) { + completeArray.SetArrayItem(l.jsonArena, i, freshElements[freshIdx]) + freshIdx++ + } else { + completeArray.SetArrayItem(l.jsonArena, i, astjson.NullValue) + } + } + } + + // Replace the response array with the complete interleaved array + if len(arrayPath) > 0 { + parent := items[0] + for i := 0; i < len(arrayPath)-1; i++ { + parent = parent.Get(arrayPath[i]) + if parent == nil { + return + } + } + parent.Set(l.jsonArena, arrayPath[len(arrayPath)-1], completeArray) + } +} + +// filterBatchVariablesForPartialFetch builds a cloned resolve context whose batch +// list argument contains only the missed IDs for this partial fetch. +func (l *Loader) filterBatchVariablesForPartialFetch(res *result, f *SingleFetch) (*Context, error) { + argPath := f.Caching.batchEntityKeyArgumentPath() + if len(argPath) == 0 { + return nil, nil + } + + filteredVariables, err := cloneVariablesWithBatchIndices(l.ctx, argPath, res.batchMissedIndices) + if err != nil || filteredVariables == nil { + return nil, err + } + + renderCtx := l.ctx.clone(l.ctx.ctx) + renderCtx.Variables = filteredVariables + return renderCtx, nil +} + +// mergeBatchEmptyResponse handles the empty list / null key short-circuit for batch entity key lookups. +// Constructs a response with an empty array at the root field path and merges it into items. +func (l *Loader) mergeBatchEmptyResponse(_ *FetchItem, f *SingleFetch, items []*astjson.Value) error { + // Build a response object that mimics what the subgraph would return: + // For products(upcs: []), the subgraph would return {"products": []} + // After SelectResponseDataPath, this becomes the responseData. + // We need to produce the same shape for normal merge to work. + var fieldName string + if f.Info != nil && len(f.Info.RootFields) > 0 { + fieldName = f.Info.RootFields[0].FieldName + } + + emptyArray := astjson.ArrayValue(l.jsonArena) + if fieldName != "" { + // Build {"fieldName": []} and merge at MergePath + responseData := astjson.ObjectValue(l.jsonArena) + responseData.Set(l.jsonArena, fieldName, emptyArray) + if len(items) == 0 { + l.resolvable.data = responseData + } else if len(items) == 1 { + items[0], _ = astjson.MergeValuesWithPath(l.jsonArena, items[0], responseData, f.PostProcessing.MergePath...) + } + } else { + // No field name available — merge empty array at MergePath directly + if len(items) == 1 { + items[0], _ = astjson.MergeValuesWithPath(l.jsonArena, items[0], emptyArray, f.PostProcessing.MergePath...) + } + } + return nil +} + +func (l *Loader) mergeResult(fetchItem *FetchItem, res *result, items []*astjson.Value) error { + if res.err != nil { + return l.renderErrorsFailedToFetch(fetchItem, res, failedToFetchNoReason) + } + if rejected, err := l.evaluateRejected(fetchItem, res, items); err != nil || rejected { + return err + } + if res.cacheSkipFetch { + // Batch entity key cache hit: assemble cached entities into an array response. + if res.batchEntityKeyMode { + return l.mergeBatchCacheHit(fetchItem, res, items) + } + // Merge cached data into items + for _, key := range res.l1CacheKeys { + if key.FromCache == nil { + continue + } + // Negative cache hit: subgraph has nothing for this entity, skip merge. + // MergeValues(object, null) would discard the null anyway (astjson behavior). + if key.FromCache.Type() == astjson.TypeNull { + continue + } + // Merge cached data into item + _, err := astjson.MergeValues(l.jsonArena, key.Item, l.parser.StructuralCopy(l.jsonArena, key.FromCache)) + if err != nil { + return l.renderErrorsFailedToFetch(fetchItem, res, "invalid cache item") + } + } + if res.cacheMustBeUpdated { + l.updateL2Cache(res) } return nil } + + // Handle partial cache loading: merge cached items first + if res.partialCacheEnabled && len(res.cachedItemIndices) > 0 { + for _, idx := range res.cachedItemIndices { + if idx < len(res.l1CacheKeys) && res.l1CacheKeys[idx] != nil && res.l1CacheKeys[idx].FromCache != nil { + // Negative cache hit: skip merge (subgraph has nothing for this entity) + if res.l1CacheKeys[idx].FromCache.Type() == astjson.TypeNull { + continue + } + _, err := astjson.MergeValues(l.jsonArena, res.l1CacheKeys[idx].Item, l.parser.StructuralCopy(l.jsonArena, res.l1CacheKeys[idx].FromCache)) + if err != nil { + return l.renderErrorsFailedToFetch(fetchItem, res, "invalid cache item") + } + } + } + } if res.fetchSkipped { return nil } @@ -499,6 +1553,10 @@ func (l *Loader) mergeResult(fetchItem *FetchItem, res *result, items []*astjson return l.renderErrorsFailedToFetch(fetchItem, res, invalidGraphQLResponse) } + // Extract cache invalidation signal from subgraph response extensions. + // This is not restricted to mutations — any subgraph response can signal invalidation. + cacheInvalidation := response.Get("extensions", "cacheInvalidation") + var responseData *astjson.Value if res.postProcessing.SelectResponseDataPath != nil { responseData = response.Get(res.postProcessing.SelectResponseDataPath...) @@ -537,13 +1595,14 @@ func (l *Loader) mergeResult(fetchItem *FetchItem, res *result, items []*astjson } // Check if data needs processing. - if res.postProcessing.SelectResponseDataPath != nil && astjson.ValueIsNull(responseData) { - // First check if this is actually an entity null fetch, instead of a data null fetch. - // In this case we return early to avoid adding subgraph errors or merging this into items. - if isEmptyEntityFetch(fetchItem, response) { - return nil - } - + // For fetches selecting a specific _entities[index] item, a null responseData means + // the subgraph had no matching entity. That is a valid GraphQL response even when + // negative caching is disabled. + entityNull := len(items) > 0 && + responseData != nil && + responseData.Type() == astjson.TypeNull && + selectsSingleEntityResult(res.postProcessing.SelectResponseDataPath) + if res.postProcessing.SelectResponseDataPath != nil && astjson.ValueIsNull(responseData) && !entityNull { // When: // - No errors or data are present // - Status code is not within the 2XX range @@ -567,27 +1626,61 @@ func (l *Loader) mergeResult(fetchItem *FetchItem, res *result, items []*astjson // no data return nil } - if len(items) == 0 { // If the data is set, it must be an object according to GraphQL over HTTP spec if responseData.Type() != astjson.TypeObject { return l.renderErrorsFailedToFetch(fetchItem, res, invalidGraphQLResponseShape) } l.resolvable.data = responseData + // Always run invalidation, even on partial-error responses. + l.runCacheInvalidation(fetchItem, res, responseData, cacheInvalidation) + // Only populate caches on success (no errors) + if !hasErrors { + l.populateCachesAfterFetch(fetchItem, res) + } return nil } if len(items) == 1 && res.batchStats == nil { - items[0], _, err = astjson.MergeValuesWithPath(l.jsonArena, items[0], responseData, res.postProcessing.MergePath...) - if err != nil { - return errors.WithStack(ErrMergeResult{ - Subgraph: res.ds.Name, - Reason: err, - Path: fetchItem.ResponsePath, - }) + if responseData != nil && responseData.Type() != astjson.TypeNull { + items[0], err = astjson.MergeValuesWithPath(l.jsonArena, items[0], responseData, res.postProcessing.MergePath...) + if err != nil { + return errors.WithStack(ErrMergeResult{ + Subgraph: res.ds.Name, + Reason: err, + Path: fetchItem.ResponsePath, + }) + } } if slices.Contains(taintedIndices, 0) { l.taintedObjs.add(items[0]) } + // Batch entity key mode: map individual entities from the response array to cache keys + if res.batchEntityKeyMode { + // For partial fetch: interleave cached + fresh entities before populating cache keys + if res.batchPartialFetchEnabled && len(res.batchCachedIndices) > 0 { + l.mergeBatchPartialResponse(res, items, getFetchInfo(fetchItem.Fetch)) + } + l.populateBatchCacheKeysFromResponse(res, items, getFetchInfo(fetchItem.Fetch)) + } else { + // Update cache key items to point to merged data for L1 and L2 caches + if len(res.l1CacheKeys) > 0 && res.l1CacheKeys[0] != nil { + res.l1CacheKeys[0].Item = items[0] + } + if len(res.l2CacheKeys) > 0 && res.l2CacheKeys[0] != nil { + res.l2CacheKeys[0].Item = items[0] + // Detect explicit null entity responses so regular cache writes are suppressed. + // Actual negative-sentinel persistence is still gated by NegativeCacheTTL in updateL2Cache. + if responseData != nil && responseData.Type() == astjson.TypeNull { + res.l2CacheKeys[0].NegativeCacheHit = true + } + } + } + // Always run invalidation, even on partial-error responses. + l.runCacheInvalidation(fetchItem, res, responseData, cacheInvalidation) + // Only populate caches on success (no errors) + if !hasErrors { + l.populateCachesAfterFetch(fetchItem, res) + } return nil } batch := responseData.GetArray() @@ -600,22 +1693,59 @@ func (l *Loader) mergeResult(fetchItem *FetchItem, res *result, items []*astjson return l.renderErrorsFailedToFetch(fetchItem, res, fmt.Sprintf(invalidBatchItemCount, len(res.batchStats), len(batch))) } + // Build a mapping from original item pointers to merged pointers + // This is needed because MergeValuesWithPath may return new objects + originalToMerged := make(map[*astjson.Value]*astjson.Value) + for batchIndex, targets := range res.batchStats { src := batch[batchIndex] - for _, target := range targets { - _, _, mErr := astjson.MergeValuesWithPath(l.jsonArena, target, src, res.postProcessing.MergePath...) - if mErr != nil { - return errors.WithStack(ErrMergeResult{ - Subgraph: res.ds.Name, - Reason: mErr, - Path: fetchItem.ResponsePath, - }) + for targetIdx, target := range targets { + mergedTarget := target + if src != nil && src.Type() != astjson.TypeNull { + var mErr error + mergedTarget, mErr = astjson.MergeValuesWithPath(l.jsonArena, target, src, res.postProcessing.MergePath...) + if mErr != nil { + return errors.WithStack(ErrMergeResult{ + Subgraph: res.ds.Name, + Reason: mErr, + Path: fetchItem.ResponsePath, + }) + } } + // Track the original to merged mapping + originalToMerged[target] = mergedTarget + // Update the target in batchStats with the merged result + res.batchStats[batchIndex][targetIdx] = mergedTarget if slices.Contains(taintedIndices, batchIndex) { - l.taintedObjs.add(target) + l.taintedObjs.add(mergedTarget) + } + } + } + // Update cache key items to point to merged data for L1 and L2 caches + for _, ck := range res.l1CacheKeys { + if ck != nil && ck.Item != nil { + if merged, ok := originalToMerged[ck.Item]; ok { + ck.Item = merged } } } + for _, ck := range res.l2CacheKeys { + if ck != nil && ck.Item != nil { + if merged, ok := originalToMerged[ck.Item]; ok { + ck.Item = merged + } + if batchIndex := ck.BatchIndex; batchIndex >= 0 && batchIndex < len(batch) && + batch[batchIndex] != nil && batch[batchIndex].Type() == astjson.TypeNull { + ck.NegativeCacheHit = true + } + } + } + // Always run invalidation, even on partial-error responses. + l.runCacheInvalidation(fetchItem, res, responseData, cacheInvalidation) + // Only populate caches on success (no errors) + if !hasErrors { + l.populateCachesAfterFetch(fetchItem, res) + } return nil } @@ -624,34 +1754,109 @@ func (l *Loader) mergeResult(fetchItem *FetchItem, res *result, items []*astjson } for i := range items { - items[i], _, err = astjson.MergeValuesWithPath(l.jsonArena, items[i], batch[i], res.postProcessing.MergePath...) - if err != nil { - return errors.WithStack(ErrMergeResult{ - Subgraph: res.ds.Name, - Reason: err, - Path: fetchItem.ResponsePath, - }) + if batch[i] != nil && batch[i].Type() != astjson.TypeNull { + items[i], err = astjson.MergeValuesWithPath(l.jsonArena, items[i], batch[i], res.postProcessing.MergePath...) + if err != nil { + return errors.WithStack(ErrMergeResult{ + Subgraph: res.ds.Name, + Reason: err, + Path: fetchItem.ResponsePath, + }) + } } if slices.Contains(taintedIndices, i) { l.taintedObjs.add(items[i]) } + // Update cache key items to point to merged data for L1 and L2 caches + if i < len(res.l1CacheKeys) && res.l1CacheKeys[i] != nil { + res.l1CacheKeys[i].Item = items[i] + } + if i < len(res.l2CacheKeys) && res.l2CacheKeys[i] != nil { + res.l2CacheKeys[i].Item = items[i] + // Detect explicit null entity responses so regular cache writes are suppressed. + // Actual negative-sentinel persistence is still gated by NegativeCacheTTL in updateL2Cache. + if batch[i] != nil && batch[i].Type() == astjson.TypeNull { + res.l2CacheKeys[i].NegativeCacheHit = true + } + } + } + + // Record subgraph-fetched entity details for cache trace BEFORE cache population. + tracingCache := l.ctx.TracingOptions.Enable && !l.ctx.TracingOptions.ExcludeCacheStats + if tracingCache { + if !res.cacheSkipFetch && len(res.l1CacheKeys) > 0 { + for i, ck := range res.l1CacheKeys { + if res.partialCacheEnabled && slices.Contains(res.cachedItemIndices, i) { + continue + } + if len(ck.Keys) > 0 && !l.ctx.TracingOptions.ExcludeRawInputData { + res.cacheTraceEntityDetails = append(res.cacheTraceEntityDetails, CacheTraceEntity{ + Key: ck.Keys[0], + Source: "subgraph", + }) + } + } + } + } + + // Always run invalidation, even on partial-error responses. + l.runCacheInvalidation(fetchItem, res, responseData, cacheInvalidation) + // Only populate caches on success (no errors) + if !hasErrors { + l.populateCachesAfterFetch(fetchItem, res) } return nil } -// isEmptyEntityFetch returns true if fetchItem resembles an sucessful entity fetch -// where no entity has been returned, else false. -func isEmptyEntityFetch(fetchItem *FetchItem, response *astjson.Value) bool { - kind := fetchItem.Fetch.FetchKind() +// runCacheInvalidation runs mutation entity impact detection and extensions-based +// cache invalidation. It is intentionally separated from populateCachesAfterFetch +// so it can be called unconditionally, even when the subgraph response contains errors. +func (l *Loader) runCacheInvalidation(fetchItem *FetchItem, res *result, responseData *astjson.Value, cacheInvalidation *astjson.Value) { + info := getFetchInfo(fetchItem.Fetch) + deletedKeys := l.detectMutationEntityImpact(res, info, responseData) + l.processExtensionsCacheInvalidation(res, cacheInvalidation, deletedKeys) +} - if kind == FetchKindEntity || kind == FetchKindEntityBatch { - entitiesData := response.Get("data", "_entities") - if astjson.ValueIsNonNull(entitiesData) && entitiesData.Type() == astjson.TypeArray { - return true +// populateCachesAfterFetch runs shadow comparison and L1/L2 cache population. +// Called after a successful (error-free) fetch merge. +// +// Invalidation (detectMutationEntityImpact + processExtensionsCacheInvalidation) is +// called via runCacheInvalidation at each call site unconditionally before this function. +func (l *Loader) populateCachesAfterFetch(fetchItem *FetchItem, res *result) { + info := getFetchInfo(fetchItem.Fetch) + l.compareShadowValues(res, info) + l.populateL1Cache(fetchItem, res) + l.updateL2Cache(res) +} + +func (l *Loader) evaluateRejected(fetchItem *FetchItem, res *result, items []*astjson.Value) (bool, error) { + if res.authorizationRejected { + err := l.renderAuthorizationRejectedErrors(fetchItem, res) + if err != nil { + return false, err + } + l.setSkipErrors(res, items) + return true, nil + } + if res.rateLimitRejected { + err := l.renderRateLimitRejectedErrors(fetchItem, res) + if err != nil { + return false, err } + l.setSkipErrors(res, items) + return true, nil } + return false, nil +} - return false +func (l *Loader) setSkipErrors(res *result, items []*astjson.Value) { + trueValue := astjson.TrueValue(l.jsonArena) + skipErrorsPath := make([]string, len(res.postProcessing.MergePath)+1) + copy(skipErrorsPath, res.postProcessing.MergePath) + skipErrorsPath[len(skipErrorsPath)-1] = "__skipErrors" + for _, item := range items { + astjson.SetValue(l.jsonArena, item, trueValue, skipErrorsPath...) + } } var ( @@ -704,6 +1909,10 @@ func (l *Loader) appendSubgraphError(res *result, fetchItem *FetchItem, value *a func (l *Loader) mergeErrors(res *result, fetchItem *FetchItem, value *astjson.Value) error { values := value.GetArray() + // Record subgraph error analytics before processing modifies the values + if l.ctx.cacheAnalyticsEnabled() && len(values) > 0 { + l.recordSubgraphErrorAnalytics(res, values) + } l.optionallyOmitErrorLocations(values) if l.rewriteSubgraphErrorPaths { rewriteErrorPaths(l.jsonArena, fetchItem, values) @@ -784,6 +1993,27 @@ func (l *Loader) mergeErrors(res *result, fetchItem *FetchItem, value *astjson.V return nil } +func selectsSingleEntityResult(path []string) bool { + if len(path) < 3 { + return false + } + + if path[len(path)-2] != "_entities" { + return false + } + + _, err := strconv.Atoi(path[len(path)-1]) + return err == nil +} + +func selectsBatchEntityArrayResult(path []string) bool { + if len(path) == 0 { + return false + } + + return path[len(path)-1] == "_entities" +} + // optionallyAllowCustomExtensionProperties removes all properties from the "extensions" object // that are not in the allowedProperties map. // If no properties are left, the "extensions" object is removed. @@ -900,6 +2130,7 @@ func (l *Loader) optionallyOmitErrorFields(values []*astjson.Value) { // optionallyOmitErrorLocations removes the "locations" object from all values. func (l *Loader) optionallyOmitErrorLocations(values []*astjson.Value) { + for _, value := range values { // If the flag is set, delete all locations if !value.Exists(locationsField) || l.omitSubgraphErrorLocations { @@ -1077,6 +2308,17 @@ func (l *Loader) renderErrorsFailedDeps(fetchItem *FetchItem, res *result) error func (l *Loader) renderErrorsFailedToFetch(fetchItem *FetchItem, res *result, reason string) error { l.ctx.appendSubgraphErrors(res.ds, res.err, NewSubgraphError(res.ds, fetchItem.ResponsePath, reason, res.statusCode)) + if l.ctx.cacheAnalyticsEnabled() { + msg := reason + if res.err != nil { + msg = res.err.Error() + } + l.ctx.cacheAnalytics.RecordError(SubgraphErrorEvent{ + DataSource: res.ds.Name, + EntityType: res.analyticsEntityType, + Message: truncateErrorMessage(msg, 256), + }) + } errorObject, err := astjson.ParseWithArena(l.jsonArena, l.renderSubgraphBaseError(res.ds, fetchItem.ResponsePath, reason)) if err != nil { return err @@ -1090,6 +2332,30 @@ func (l *Loader) renderErrorsFailedToFetch(fetchItem *FetchItem, res *result, re return nil } +// recordSubgraphErrorAnalytics extracts analytics-relevant data from subgraph GraphQL errors. +// Extracts errors[0].extensions.code and errors[0].message for the SubgraphErrorEvent. +func (l *Loader) recordSubgraphErrorAnalytics(res *result, values []*astjson.Value) { + if len(values) == 0 { + return + } + first := values[0] + var msg, code string + if msgVal := first.Get("message"); msgVal != nil { + msg = string(msgVal.GetStringBytes()) + } + if extVal := first.Get("extensions"); extVal != nil { + if codeVal := extVal.Get("code"); codeVal != nil { + code = string(codeVal.GetStringBytes()) + } + } + l.ctx.cacheAnalytics.RecordError(SubgraphErrorEvent{ + DataSource: res.ds.Name, + EntityType: res.analyticsEntityType, + Message: truncateErrorMessage(msg, 256), + Code: code, + }) +} + func (l *Loader) renderErrorsStatusFallback(fetchItem *FetchItem, res *result, statusCode int) error { reason := fmt.Sprintf("%d", statusCode) if statusText := http.StatusText(statusCode); statusText != "" { @@ -1209,7 +2475,7 @@ func (l *Loader) renderRateLimitRejectedErrors(fetchItem *FetchItem, res *result if err != nil { return err } - errorObject, _, err = astjson.MergeValuesWithPath(l.jsonArena, errorObject, extension, "extensions") + errorObject, err = astjson.MergeValuesWithPath(l.jsonArena, errorObject, extension, "extensions") if err != nil { return err } @@ -1289,7 +2555,6 @@ func (l *Loader) validatePreFetch(input []byte, info *FetchInfo, res *result) (a func (l *Loader) loadSingleFetch(ctx context.Context, fetch *SingleFetch, fetchItem *FetchItem, items []*astjson.Value, res *result) error { res.init(fetch.PostProcessing, fetch.Info) buf := bytes.NewBuffer(nil) - inputData := l.itemsData(items) if l.ctx.TracingOptions.Enable { fetch.Trace = &DataSourceLoadTrace{} @@ -1310,7 +2575,18 @@ func (l *Loader) loadSingleFetch(ctx context.Context, fetch *SingleFetch, fetchI return nil } - err := fetch.InputTemplate.Render(l.ctx, inputData, buf) + renderCtx := l.ctx + if res.batchPartialFetchEnabled && len(res.batchMissedIndices) > 0 && len(res.batchCachedIndices) > 0 { + filteredCtx, err := l.filterBatchVariablesForPartialFetch(res, fetch) + if err != nil { + return errors.WithStack(err) + } + if filteredCtx != nil { + renderCtx = filteredCtx + } + } + + err := fetch.InputTemplate.Render(renderCtx, inputData, buf) if err != nil { res.out = l.renderErrorsInvalidInput(fetchItem) return nil @@ -1450,7 +2726,6 @@ var ( func (l *Loader) loadBatchEntityFetch(ctx context.Context, fetchItem *FetchItem, fetch *BatchEntityFetch, items []*astjson.Value, res *result) error { res.init(fetch.PostProcessing, fetch.Info) - if l.ctx.TracingOptions.Enable { fetch.Trace = &DataSourceLoadTrace{} if !l.ctx.TracingOptions.ExcludeRawInputData && len(items) != 0 { @@ -1486,8 +2761,25 @@ func (l *Loader) loadBatchEntityFetch(ctx context.Context, fetchItem *FetchItem, batchItemIndex := 0 addSeparator := false + // Build a set of indices that need fetching for partial cache loading + // Only allocate the map when partial loading is enabled and there are items to fetch + var fetchIndexSet map[int]struct{} + if res.partialCacheEnabled && len(res.fetchItemIndices) > 0 { + fetchIndexSet = make(map[int]struct{}, len(res.fetchItemIndices)) + for _, idx := range res.fetchItemIndices { + fetchIndexSet[idx] = struct{}{} + } + } + WithNextItem: for i, item := range items { + // Skip items that are already cached when partial loading is enabled + if fetchIndexSet != nil { + if _, needsFetch := fetchIndexSet[i]; !needsFetch { + continue + } + } + for j := range fetch.Input.Items { itemInput.Reset() err = fetch.Input.Items[j].Render(l.ctx, item, itemInput) @@ -1578,7 +2870,7 @@ WithNextItem: } func redactHeaders(rawJSON json.RawMessage) (json.RawMessage, error) { - var obj map[string]interface{} + var obj map[string]any sensitiveHeaders := []string{ "authorization", @@ -1595,7 +2887,7 @@ func redactHeaders(rawJSON json.RawMessage) (json.RawMessage, error) { } if headers, ok := obj["header"]; ok { - if headerMap, isMap := headers.(map[string]interface{}); isMap { + if headerMap, isMap := headers.(map[string]any); isMap { for key, values := range headerMap { if slices.Contains(sensitiveHeaders, strings.ToLower(key)) { headerMap[key] = []string{"****"} @@ -1903,6 +3195,11 @@ func (l *Loader) executeSourceLoad(ctx context.Context, fetchItem *FetchItem, so var responseContext *httpclient.ResponseContext ctx, responseContext = httpclient.InjectResponseContext(ctx) + var fetchStart time.Time + if l.ctx.cacheAnalyticsEnabled() { + fetchStart = time.Now() + } + if l.ctx.LoaderHooks != nil { res.loaderHookContext = l.ctx.LoaderHooks.OnLoad(ctx, res.ds) @@ -1921,6 +3218,29 @@ func (l *Loader) executeSourceLoad(ctx context.Context, fetchItem *FetchItem, so res.statusCode = responseContext.StatusCode res.httpResponseContext = responseContext + // Record subgraph fetch timing for analytics (uses per-result slice for goroutine safety) + if l.ctx.cacheAnalyticsEnabled() { + info := fetchItem.Fetch.FetchInfo() + var entityType string + isEntityFetch := false + if info != nil { + if len(info.RootFields) > 0 { + entityType = info.RootFields[0].TypeName + } + isEntityFetch = info.OperationType == ast.OperationTypeQuery && (entityType != "Query" && entityType != "Mutation" && entityType != "Subscription") + } + res.l2FetchTimings = append(res.l2FetchTimings, FetchTimingEvent{ + DataSource: res.ds.Name, + EntityType: entityType, + DurationMs: time.Since(fetchStart).Milliseconds(), + Source: FieldSourceSubgraph, + ItemCount: 1, + IsEntityFetch: isEntityFetch, + HTTPStatusCode: res.statusCode, + ResponseBytes: len(res.out), + }) + } + if l.ctx.TracingOptions.Enable { if res.singleFlightStats != nil { trace.SingleFlightUsed = res.singleFlightStats.used @@ -1969,3 +3289,18 @@ func (l *Loader) compactJSON(data []byte) ([]byte, error) { astjson.DeduplicateObjectKeysRecursively(v) return v.MarshalTo(nil), nil } + +// canSkipFetch returns true if the cache provided exactly the information required to satisfy the query plan +// the query planner generates info.ProvidesData which tells precisely which fields the fetch must load +// if a single value is missing, we will execute the fetch +func (l *Loader) canSkipFetch(info *FetchInfo, res *result) bool { + if info == nil || info.OperationType != ast.OperationTypeQuery || info.ProvidesData == nil { + return false + } + for i := range res.l1CacheKeys { + if !l.validateItemHasRequiredData(res.l1CacheKeys[i].FromCache, info.ProvidesData) { + return false + } + } + return true +} diff --git a/v2/pkg/engine/resolve/loader_arena_gc_test.go b/v2/pkg/engine/resolve/loader_arena_gc_test.go index 9e16e8e8a2..2b2ba3c563 100644 --- a/v2/pkg/engine/resolve/loader_arena_gc_test.go +++ b/v2/pkg/engine/resolve/loader_arena_gc_test.go @@ -8,6 +8,14 @@ import ( "net/http" "runtime" "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/httpclient" @@ -299,12 +307,114 @@ func Benchmark_ArenaGCSafety(b *testing.B) { return resp }, }, + { + // Codepath: L1 cache population — entity fetch with UseL1Cache stores + // arena-allocated *astjson.Value pointers in Loader.l1Cache (sync.Map). + // After ArenaResolveGraphQLResponse releases the arena, those pointers + // become dangling. runtime.GC() should detect them. + name: "l1CacheDanglingPointers", + resolverOpts: func() ResolverOptions { + return ResolverOptions{ + MaxConcurrency: 1024, + } + }, + setupCtx: func() *Context { + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + return ctx + }, + setupResp: func() *GraphQLResponse { + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}}}, + }, + } + return &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + // Root fetch + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: FakeDataSource(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{product {__typename id}}"}}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + // Entity fetch — populates L1 cache with arena-allocated pointers + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: FakeDataSource(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://products.service","body":{"query":"...","variables":{"representations":[`), SegmentType: StaticSegmentType}, + {SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + })}, + {Data: []byte(`]}}}`), SegmentType: StaticSegmentType}, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }, + }, + }, + }, + }, + } + }, + }, } for _, tc := range cases { b.Run(tc.name, func(b *testing.B) { - rCtx, cancel := context.WithCancel(context.Background()) - defer cancel() + rCtx := b.Context() resolver := New(rCtx, tc.resolverOpts()) buf := &bytes.Buffer{} @@ -330,3 +440,420 @@ func Benchmark_ArenaGCSafety(b *testing.B) { }) } } + +// TestL1CacheStalePointersAfterArenaReset deterministically proves that L1 cache +// entries become stale when the arena is reset and reused. This is the root cause +// of the CI crash "found pointer to free object": the Loader's l1Cache (sync.Map) +// holds *astjson.Value pointers into arena memory that becomes invalid after +// resolveArenaPool.Release() resets the arena. +func TestL1CacheStalePointersAfterArenaReset(t *testing.T) { + // Shared entity fetch setup — same as l1_cache_test.go + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}}}, + }, + } + + // buildResponse creates a GraphQLResponse with a root fetch + entity fetch that populates L1 cache. + buildResponse := func(rootDS, entityDS DataSource) *GraphQLResponse { + return &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{product {__typename id}}"}}`), SegmentType: StaticSegmentType}, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://products.service","body":{"query":"...","variables":{"representations":[`), SegmentType: StaticSegmentType}, + {SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + })}, + {Data: []byte(`]}}}`), SegmentType: StaticSegmentType}, + }, + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }, + }, + }, + }, + }, + } + } + + t.Run("detached values survive arena reset", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()). + Return([]byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil).Times(1) + + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()). + Return([]byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil).Times(1) + + response := buildResponse(rootDS, entityDS) + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + loader := &Loader{jsonArena: ar} + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + // Verify L1 cache was populated with correct data + var cacheCount int + var originalBytes []byte + for _, value := range loader.l1Cache { + cacheCount++ + originalBytes = append(originalBytes[:0], value.MarshalTo(nil)...) + } + require.Equal(t, 1, cacheCount) + assert.Equal(t, `{"__typename":"Product","id":"prod-1","name":"Product One"}`, string(originalBytes)) + + // L1 cache entries always own a DeepCopy on l.jsonArena. The GC safety + // property is that the stored value is reachable from a GC root (the + // l1Cache sync.Map) and arena-allocated memory is pinned until the + // arena is released — which is what Loader.Free() does. + loader.Free() + assert.Nil(t, loader.l1Cache) + }) + + t.Run("Free prevents stale pointer access", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()). + Return([]byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil).Times(1) + + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()). + Return([]byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Product One"}]}}`), nil).Times(1) + + response := buildResponse(rootDS, entityDS) + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + loader := &Loader{jsonArena: ar} + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + // Verify L1 cache was populated + cacheCount := len(loader.l1Cache) + require.Equal(t, 1, cacheCount) + + // The fix: Free() nils l1Cache before arena release + loader.Free() + // Free() nils l1Cache to sever references to arena-allocated values + assert.Nil(t, loader.l1Cache) + }) +} + +func TestL1Cache_EntityFetchStoresDetachedValuesWithoutAliases(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + + loader := &Loader{ + jsonArena: ar, + l1Cache: map[string]*astjson.Value{}, + } + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + loader.ctx = ctx + + const cacheKey = `{"__typename":"Article","key":{"id":"a1"}}` + const originalJSON = `{"__typename":"Article","id":"a1","title":"Original"}` + + entity := mustParseArena(t, ar, originalJSON) + + fetchItem := &FetchItem{ + Fetch: &SingleFetch{ + FetchConfiguration: FetchConfiguration{ + Caching: FetchCacheConfiguration{ + Enabled: true, + UseL1Cache: true, + }, + }, + Info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &Scalar{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}}}, + {Name: []byte("title"), Value: &Scalar{Path: []string{"title"}}}, + }, + }, + }, + }, + } + + res := &result{ + l1CacheKeys: []*CacheKey{ + { + Item: entity, + Keys: []string{cacheKey}, + }, + }, + } + + loader.populateL1Cache(fetchItem, res) + + cached, ok := loader.l1Cache[cacheKey] + require.True(t, ok) + + require.NotPanics(t, func() { + assert.Equal(t, originalJSON, string(cached.MarshalTo(nil))) + }) + + // Mutate source entity to verify structural independence. + entity.Set(ar, "title", astjson.StringValue(ar, "Mutated")) + + require.NotPanics(t, func() { + assert.Equal(t, originalJSON, string(cached.MarshalTo(nil))) + }) +} + +func TestL1Cache_RootFieldEntityPromotionStoresDetachedValues(t *testing.T) { + t.Parallel() + + // Single arena — mirrors the real runtime where resolvable.data and l1Cache + // values all live on l.jsonArena. StructuralCopy gives structural isolation + // (container nodes are distinct) while aliasing leaf values on the same arena. + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + + loader := &Loader{ + jsonArena: ar, + l1Cache: map[string]*astjson.Value{}, + ctx: ctx, + resolvable: &Resolvable{ + data: mustParseArena(t, ar, `{"articles":[{"__typename":"Article","id":"a1","title":"Original"}]}`), + }, + } + + entityTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Path: []string{"articles"}, + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + // Root-field L1 promotion now requires singleFetch.Info.ProvidesData so the + // loader can derive an entity-shaped normalize Transform. + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("articles"), Value: &Array{Item: &Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &Scalar{}}, + {Name: []byte("id"), Value: &Scalar{}}, + {Name: []byte("title"), Value: &Scalar{}}, + }, + }}}, + }, + } + + fetchItem := &FetchItem{ + Fetch: &SingleFetch{ + FetchConfiguration: FetchConfiguration{ + Caching: FetchCacheConfiguration{ + Enabled: true, + UseL1Cache: true, + RootFieldL1EntityCacheKeyTemplates: map[string]CacheKeyTemplate{ + "articles:Article": entityTemplate, + }, + }, + }, + Info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + }, + } + + loader.populateL1CacheForRootFieldEntities(fetchItem) + + const cacheKey = `{"__typename":"Article","key":{"id":"a1"}}` + cached, ok := loader.l1Cache[cacheKey] + require.True(t, ok) + + require.NotPanics(t, func() { + assert.Equal(t, `{"__typename":"Article","id":"a1","title":"Original"}`, string(cached.MarshalTo(nil))) + }) + + // Mutate the source to verify structural independence. + loader.resolvable.data.Get("articles").GetArray()[0].Set(ar, "title", astjson.StringValue(ar, "Mutated")) + + // Cached value must still produce original data because structuralCopy + // creates distinct container nodes. Leaf values are aliased but since + // we changed via Set (which replaces the value pointer, not the string + // content), the cached value's alias still points to the original. + require.NotPanics(t, func() { + assert.Equal(t, `{"__typename":"Article","id":"a1","title":"Original"}`, string(cached.MarshalTo(nil))) + }) +} + +func TestL1Cache_RootFieldEntityPromotionDoesNotPanicOnL1HitAfterArenaReuse(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.TracingOptions.Enable = true + + loader := &Loader{ + jsonArena: ar, + l1Cache: map[string]*astjson.Value{}, + ctx: ctx, + resolvable: &Resolvable{ + data: mustParseArena(t, ar, `{"articles":[{"__typename":"Article","id":"a1","title":"Original"}]}`), + }, + } + + entityTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Path: []string{"articles"}, + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("articles"), Value: &Array{Item: &Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &Scalar{}}, + {Name: []byte("id"), Value: &Scalar{}}, + {Name: []byte("title"), Value: &Scalar{}}, + }, + }}}, + }, + } + + fetchItem := &FetchItem{ + Fetch: &SingleFetch{ + FetchConfiguration: FetchConfiguration{ + Caching: FetchCacheConfiguration{ + Enabled: true, + UseL1Cache: true, + RootFieldL1EntityCacheKeyTemplates: map[string]CacheKeyTemplate{ + "articles:Article": entityTemplate, + }, + }, + }, + Info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + }, + } + + loader.populateL1CacheForRootFieldEntities(fetchItem) + + // Mutate source to verify L1 structural independence + loader.resolvable.data.Get("articles").GetArray()[0].Set(ar, "title", astjson.StringValue(ar, "Mutated")) + + const cacheKey = `{"__typename":"Article","key":{"id":"a1"}}` + cacheKeys := []*CacheKey{ + { + Keys: []string{cacheKey}, + }, + } + + info := &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &Scalar{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}}}, + {Name: []byte("title"), Value: &Scalar{Path: []string{"title"}}}, + }, + }, + } + + res := &result{} + + require.NotPanics(t, func() { + hit := loader.tryL1CacheLoad(info, cacheKeys, res) + assert.True(t, hit) + }) +} diff --git a/v2/pkg/engine/resolve/loader_cache.go b/v2/pkg/engine/resolve/loader_cache.go new file mode 100644 index 0000000000..76dc07d22c --- /dev/null +++ b/v2/pkg/engine/resolve/loader_cache.go @@ -0,0 +1,3392 @@ +package resolve + +import ( + "bytes" + "cmp" + "context" + "encoding/json" + "slices" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/pool" +) + +// CacheWriteReason identifies why a cache entry was written. +type CacheWriteReason string + +const ( + // CacheWriteReasonRefresh means an existing cached key was rewritten with fresh or merged data. + CacheWriteReasonRefresh CacheWriteReason = "refresh" + // CacheWriteReasonBackfill means a requested key that missed on read was proven by final entity data. + CacheWriteReasonBackfill CacheWriteReason = "backfill" + // CacheWriteReasonDerived means a new key was derived from final entity data that was not in the original request. + CacheWriteReasonDerived CacheWriteReason = "derived" +) + +type CacheEntry struct { + Key string + Value []byte + // TTL controls this entry's expiration on write. Zero uses the cache backend default; + // negative means no TTL / indefinite. + TTL time.Duration + RemainingTTL time.Duration // remaining TTL from cache (0 = unknown/not supported) + WriteReason CacheWriteReason // why this entry was written (empty for reads) +} + +// EntityCacheInvalidationConfig holds the minimal cache settings needed to build +// invalidation keys for a specific entity type on a specific subgraph. +// Separate from plan.EntityCacheConfiguration to avoid a resolve → plan dependency; +// only CacheName and IncludeSubgraphHeaderPrefix are needed at invalidation time. +type EntityCacheInvalidationConfig struct { + CacheName string + IncludeSubgraphHeaderPrefix bool +} + +type LoaderCache interface { + Get(ctx context.Context, keys []string) ([]*CacheEntry, error) + Set(ctx context.Context, entries []*CacheEntry) error + Delete(ctx context.Context, keys []string) error +} + +type l2CacheSetContributor struct { + res *result + entries []*CacheEntry + regularEntries []*CacheEntry + negativeEntries []*CacheEntry +} + +type l2CacheSetGroup struct { + cache LoaderCache + contributors []*l2CacheSetContributor + entries []*CacheEntry +} + +// l1AnalyticsSize returns the byte size of an L1 entry for analytics purposes. +// Returns 0 (avoiding the marshal cost) when analytics are disabled. +func l1AnalyticsSize(enabled bool, v *astjson.Value) int { + if !enabled || v == nil { + return 0 + } + return len(v.MarshalTo(nil)) +} + +// hasNonEmptyKey reports whether any entry in keys is a non-empty string. +// Used as a defensive guard before issuing an L2 Get — a batch of entirely +// empty strings is never a legitimate lookup, so skip it cleanly. +func hasNonEmptyKey(keys []string) bool { + for _, k := range keys { + if k != "" { + return true + } + } + return false +} + +// extractCacheKeysStrings extracts all unique cache key strings from CacheKeys +func (l *Loader) extractCacheKeysStrings(a arena.Arena, cacheKeys []*CacheKey) []string { + if len(cacheKeys) == 0 { + return nil + } + out := make([]string, 0, len(cacheKeys)) + seen := make(map[string]struct{}, len(cacheKeys)) + for i := range cacheKeys { + for j := range cacheKeys[i].Keys { + keyStr := cacheKeys[i].Keys[j] + if _, ok := seen[keyStr]; ok { + continue + } + seen[keyStr] = struct{}{} + out = append(out, keyStr) + } + } + return out +} + +// countUniqueCacheKeyStrings counts unique cache key strings across CacheKeys +// without allocating the strings slice. Used by analytics/tracing call sites +// that only need the count. +func countUniqueCacheKeyStrings(cacheKeys []*CacheKey) int { + if len(cacheKeys) == 0 { + return 0 + } + seen := make(map[string]struct{}, len(cacheKeys)) + for i := range cacheKeys { + for j := range cacheKeys[i].Keys { + seen[cacheKeys[i].Keys[j]] = struct{}{} + } + } + return len(seen) +} + +// populateFromCache populates CacheKey.FromCache fields from cache entries. +// Parses each candidate VERBATIM via l.parser onto the given arena. +// Denormalization (alias re-application) happens LATER at the materialization +// site via structuralCopyDenormalized. +func (l *Loader) populateFromCache(a arena.Arena, cacheKeys []*CacheKey, entries []*CacheEntry) error { + return l.populateCacheKeysFromIndex(a, cacheKeys, indexCacheEntriesByKey(entries)) +} + +// indexCacheEntriesByKey builds a map[key]*CacheEntry from a raw cache-Get response. +// Nil entries are filtered. Later entries with duplicate keys overwrite earlier ones +// (matches existing behavior at the bulk-L2 call site). +func indexCacheEntriesByKey(entries []*CacheEntry) map[string]*CacheEntry { + if len(entries) == 0 { + return nil + } + byKey := make(map[string]*CacheEntry, len(entries)) + for _, e := range entries { + if e != nil { + byKey[e.Key] = e + } + } + return byKey +} + +// populateCacheKeysFromIndex is the shared per-CacheKey match+parse loop used by +// both populateFromCache (sequential path) and populateFromCacheBulk (parallel +// path). It resets the cache-read state on each CacheKey, collects candidates +// from byKey, records missingKeys, sorts by freshness, and parses the freshest +// candidate verbatim onto the arena. +func (l *Loader) populateCacheKeysFromIndex(a arena.Arena, cacheKeys []*CacheKey, byKey map[string]*CacheEntry) error { + for j := range cacheKeys { + ck := cacheKeys[j] + ck.FromCache = nil + ck.missingKeys = nil + ck.cachedData = cachedData{} + + var candidates []fromCacheCandidate + matchedKeys := make(map[string]struct{}, len(ck.Keys)) + for _, key := range ck.Keys { + entry, ok := byKey[key] + if !ok || entry == nil || entry.Value == nil { + continue + } + matchedKeys[key] = struct{}{} + candidates = append(candidates, fromCacheCandidate{ + value: entry.Value, + remainingTTL: entry.RemainingTTL, + }) + } + for _, key := range ck.Keys { + if _, ok := matchedKeys[key]; !ok { + ck.missingKeys = append(ck.missingKeys, key) + } + } + if len(candidates) == 0 { + continue + } + slices.SortStableFunc(candidates, func(a, b fromCacheCandidate) int { + return compareCacheCandidateFreshness(a.remainingTTL, b.remainingTTL) + }) + ck.fromCacheCandidates = candidates + // Safe: guarded by len(candidates) == 0 continue above, so candidates[0] exists. + ck.fromCacheRemainingTTL = candidates[0].remainingTTL + parsed, err := l.parseL2Bytes(a, candidates[0].value) + if err != nil { + return errors.WithStack(err) + } + ck.FromCache = parsed + } + return nil +} + +// parseL2Bytes parses an L2 cache entry's bytes into a *astjson.Value on the +// given arena, VERBATIM (no Transform). Uses l.parser — main thread only. +// Denormalization is applied separately at the materialization site via +// structuralCopyDenormalized. +func (l *Loader) parseL2Bytes(a arena.Arena, bytes []byte) (*astjson.Value, error) { + return l.parser.ParseBytesWithArena(a, bytes) +} + +func compareCacheCandidateFreshness(a, b time.Duration) int { + aKnown := a > 0 + bKnown := b > 0 + switch { + case aKnown && bKnown: + return cmp.Compare(b, a) + case aKnown: + return -1 + case bKnown: + return 1 + default: + return 0 + } +} + +func wrapCacheValueAtMergePath(a arena.Arena, value *astjson.Value, mergePath []string) *astjson.Value { + if value == nil || len(mergePath) == 0 { + return value + } + wrapped := value + for i := len(mergePath) - 1; i >= 0; i-- { + obj := astjson.ObjectValue(a) + obj.Set(a, mergePath[i], wrapped) + wrapped = obj + } + return wrapped +} + +func (l *Loader) reorderCacheValueToSelectionOrder(a arena.Arena, value *astjson.Value, node Node) *astjson.Value { + if value == nil || node == nil { + return value + } + + switch n := node.(type) { + case *Object: + if value.Type() != astjson.TypeObject { + return value + } + reordered := astjson.ObjectValue(a) + seen := make(map[string]struct{}, len(n.Fields)) + for _, field := range n.Fields { + fieldName := l.cacheFieldName(field) + fieldValue := value.Get(fieldName) + if fieldValue == nil { + continue + } + reordered.Set(a, fieldName, l.reorderCacheValueToSelectionOrder(a, fieldValue, field.Value)) + seen[fieldName] = struct{}{} + } + + obj, err := value.Object() + if err != nil { + return value + } + obj.Visit(func(key []byte, fieldValue *astjson.Value) { + fieldName := string(key) + if _, ok := seen[fieldName]; ok { + return + } + reordered.Set(a, fieldName, fieldValue) + }) + return reordered + case *Array: + if value.Type() != astjson.TypeArray { + return value + } + items, err := value.Array() + if err != nil { + return value + } + reordered := astjson.ArrayValue(a) + for i, item := range items { + reordered.SetArrayItem(a, i, l.reorderCacheValueToSelectionOrder(a, item, n.Item)) + } + return reordered + default: + return value + } +} + +func (l *Loader) resolveMultiCandidateCacheValue(a arena.Arena, ck *CacheKey, providesData *Object) bool { + if ck.FromCache == nil { + return false + } + if providesData == nil || l.validateItemHasRequiredData(ck.FromCache, providesData) { + return true + } + if len(ck.fromCacheCandidates) <= 1 { + return false + } + + var merged *astjson.Value + for i := len(ck.fromCacheCandidates) - 1; i >= 0; i-- { + parsed, err := l.parseL2Bytes(a, ck.fromCacheCandidates[i].value) + if err != nil { + continue + } + parsed = wrapCacheValueAtMergePath(a, parsed, ck.EntityMergePath) + if merged == nil { + merged = parsed + continue + } + if _, err = astjson.MergeValues(a, merged, parsed); err != nil { + merged = nil + break + } + } + if merged != nil && l.validateItemHasRequiredData(merged, providesData) { + ck.FromCache = l.reorderCacheValueToSelectionOrder(a, merged, providesData) + ck.fromCacheNeedsWriteback = true + return true + } + + for i := 1; i < len(ck.fromCacheCandidates); i++ { + parsed, err := l.parseL2Bytes(a, ck.fromCacheCandidates[i].value) + if err != nil { + continue + } + parsed = wrapCacheValueAtMergePath(a, parsed, ck.EntityMergePath) + if l.validateItemHasRequiredData(parsed, providesData) { + ck.FromCache = l.reorderCacheValueToSelectionOrder(a, parsed, providesData) + ck.fromCacheRemainingTTL = ck.fromCacheCandidates[i].remainingTTL + ck.fromCacheNeedsWriteback = true + return true + } + } + + return false +} + +func batchEntityValidationObject(providesData *Object, entityMergePath []string) *Object { + if providesData == nil { + return nil + } + if len(entityMergePath) == 0 { + return providesData + } + + current := providesData + for i, segment := range entityMergePath { + var next Node + for _, field := range current.Fields { + if string(field.Name) == segment || string(field.OriginalName) == segment { + next = field.Value + break + } + } + if next == nil { + return nil + } + if i == len(entityMergePath)-1 { + switch value := next.(type) { + case *Object: + return value + case *Array: + obj, _ := value.Item.(*Object) + return obj + default: + return nil + } + } + switch value := next.(type) { + case *Object: + current = value + case *Array: + obj, ok := value.Item.(*Object) + if !ok { + return nil + } + current = obj + default: + return nil + } + } + + return current +} + +func (l *Loader) resolveBatchEntityCacheValue(a arena.Arena, ck *CacheKey, providesData *Object) bool { + if ck.FromCache == nil { + return false + } + if providesData == nil || l.validateItemHasRequiredData(ck.FromCache, providesData) { + return true + } + if len(ck.fromCacheCandidates) <= 1 { + return false + } + + var merged *astjson.Value + for i := len(ck.fromCacheCandidates) - 1; i >= 0; i-- { + parsed, err := l.parseL2Bytes(a, ck.fromCacheCandidates[i].value) + if err != nil { + continue + } + if merged == nil { + merged = parsed + continue + } + if _, err = astjson.MergeValues(a, merged, parsed); err != nil { + merged = nil + break + } + } + if merged != nil && l.validateItemHasRequiredData(merged, providesData) { + ck.FromCache = l.reorderCacheValueToSelectionOrder(a, merged, providesData) + ck.fromCacheNeedsWriteback = true + return true + } + + for i := 1; i < len(ck.fromCacheCandidates); i++ { + parsed, err := l.parseL2Bytes(a, ck.fromCacheCandidates[i].value) + if err != nil { + continue + } + if l.validateItemHasRequiredData(parsed, providesData) { + ck.FromCache = l.reorderCacheValueToSelectionOrder(a, parsed, providesData) + ck.fromCacheRemainingTTL = ck.fromCacheCandidates[i].remainingTTL + ck.fromCacheNeedsWriteback = true + return true + } + } + + return false +} + +func hasMissingRequestedKeys(cacheKeys []*CacheKey) bool { + for _, ck := range cacheKeys { + if len(ck.missingKeys) > 0 { + return true + } + } + return false +} + +func needsResolvedCacheWriteback(cacheKeys []*CacheKey) bool { + for _, ck := range cacheKeys { + if ck.fromCacheNeedsWriteback { + return true + } + } + return false +} + +// cacheKeysToEntries converts CacheKeys to CacheEntries for storage +// For each CacheKey, creates entries for all its KeyEntries with the same value +func (l *Loader) cacheKeysToEntries(a arena.Arena, cacheKeys []*CacheKey) ([]*CacheEntry, error) { + // Use heap slice for []*CacheEntry — arena memory is noscan, so GC cannot + // trace *CacheEntry pointers stored there, risking premature collection. + out := make([]*CacheEntry, 0, len(cacheKeys)) + buf := arena.AllocateSlice[byte](a, 64, 64) + seen := make(map[string]struct{}, len(cacheKeys)) + for i := range cacheKeys { + for j := range cacheKeys[i].Keys { + if cacheKeys[i].Item == nil || cacheKeys[i].NegativeCacheHit { + continue + } + keyStr := cacheKeys[i].Keys[j] + if _, ok := seen[keyStr]; ok { + continue + } + seen[keyStr] = struct{}{} + // When EntityMergePath is set, store entity-level data (extracted at merge path) + // instead of response-level data, so entity fetches can read it directly. + itemToStore := cacheKeys[i].Item + if len(cacheKeys[i].EntityMergePath) > 0 { + if entityData := cacheKeys[i].Item.Get(cacheKeys[i].EntityMergePath...); entityData != nil { + itemToStore = entityData + } + } + // Preserve fields from the previously cached object when this writeback only + // contains a narrower entity projection. Without this merge, a follow-up fetch + // can overwrite shared entity/root cache state with partial data and turn the + // next request into an incorrect cache hit. + // + // The pointer check avoids re-merging when itemToStore already points at the + // cached AST value. + if cacheKeys[i].FromCache != nil && itemToStore != cacheKeys[i].FromCache { + if merged := mergeCachedValueForWrite(a, cacheKeys[i].FromCache, itemToStore); merged != nil { + itemToStore = merged + } + } + buf = itemToStore.MarshalTo(buf[:0]) + // Value must be heap-allocated: it is handed to the L2 cache (e.g. ristretto) + // which retains the slice across requests. The arena `a` (jsonArena) is reset + // at the end of the request, so an arena-backed slice would be overwritten and + // subsequent cache reads would return corrupted bytes. + entryValue := make([]byte, len(buf)) + copy(entryValue, buf) + out = append(out, &CacheEntry{ + Key: cacheKeys[i].Keys[j], + Value: entryValue, + }) + } + } + return out, nil +} + +// mergeCachedValueForWrite preserves fields from the older cached object when a +// follow-up writeback only contains a narrower entity projection for the same key. +// The fresh payload still wins on overlapping fields. +func mergeCachedValueForWrite(a arena.Arena, cachedValue, freshValue *astjson.Value) *astjson.Value { + if cachedValue == nil || freshValue == nil { + return freshValue + } + if cachedValue.Type() != astjson.TypeObject || freshValue.Type() != astjson.TypeObject { + return freshValue + } + merged, err := astjson.MergeValues(a, cachedValue, freshValue) + if err != nil { + return freshValue + } + return merged +} + +// cacheKeysToNegativeEntries collects L2 cache entries for null entity responses (negative caching). +// Only entries flagged with NegativeCacheHit are included. +// Most negative-cache entries store the literal null sentinel. When the same cache key already has +// positive entity data beyond its key fields, keep that object shape and materialize the requested +// nullable fields as explicit nulls. That lets later shared-key reads preserve the parent/root shape +// without turning key-only scaffolding into a false positive cache hit. +func (l *Loader) cacheKeysToNegativeEntries(a arena.Arena, res *result, cacheKeys []*CacheKey) []*CacheEntry { + var out []*CacheEntry + seen := make(map[string]struct{}) + for i := range cacheKeys { + if !cacheKeys[i].NegativeCacheHit { + continue + } + value := l.negativeCachePositiveValue(a, res, cacheKeys[i]) + if len(value) == 0 { + value = []byte("null") + } + for _, keyStr := range cacheKeys[i].Keys { + if _, ok := seen[keyStr]; ok { + continue + } + seen[keyStr] = struct{}{} + // Clone per entry: multiple keys in the same iteration would otherwise + // alias one slice, letting external cache implementations that retain + // Value leak mutations across keys. + out = append(out, &CacheEntry{ + Key: keyStr, + Value: bytes.Clone(value), + }) + } + } + return out +} + +// negativeCachePositiveValue reuses an existing object-shaped payload for negative-cache writes +// only when it carries data beyond the entity key fields. Key-only payloads still collapse to the +// literal null sentinel so later reads do not treat bare identity scaffolding as a full entity hit. +func (l *Loader) negativeCachePositiveValue(a arena.Arena, res *result, ck *CacheKey) []byte { + if !cacheKeyHasPositiveEntityData(ck) { + return nil + } + entity := ck.Item + if entity == nil { + entity = ck.FromCache + } + if entity == nil { + return nil + } + if len(ck.EntityMergePath) > 0 { + entity = entity.Get(ck.EntityMergePath...) + } + if entity == nil || entity.Type() != astjson.TypeObject { + return nil + } + cloned, err := astjson.ParseBytesWithArena(a, entity.MarshalTo(nil)) + if err != nil { + return nil + } + l.materializeNullableFieldsAsNull(a, cloned, res.providesData) + return cloned.MarshalTo(nil) +} + +// materializeNullableFieldsAsNull fills in missing nullable fields before storing an object-shaped +// negative-cache value. Later validation can then satisfy the same selection set from cache, while +// still leaving non-null or otherwise unproven fields absent so they continue to force a refetch. +func (l *Loader) materializeNullableFieldsAsNull(a arena.Arena, entity *astjson.Value, obj *Object) { + if entity == nil || obj == nil || entity.Type() != astjson.TypeObject { + return + } + for _, field := range obj.Fields { + fieldName := l.cacheFieldName(field) + fieldValue := entity.Get(fieldName) + if fieldValue != nil { + if nested, ok := field.Value.(*Object); ok { + l.materializeNullableFieldsAsNull(a, fieldValue, nested) + } + continue + } + if field.Value.NodeNullable() { + entity.Set(a, fieldName, astjson.NullValue) + } + } +} + +// cacheKeyHasPositiveEntityData reports whether either cached or fresh payload already contains +// fields beyond the entity key itself, making it safe to preserve an object shape for negative caching. +func cacheKeyHasPositiveEntityData(ck *CacheKey) bool { + if ck == nil { + return false + } + return entityValueHasNonKeyFields(ck.FromCache, ck) || entityValueHasNonKeyFields(ck.Item, ck) +} + +func entityValueHasNonKeyFields(value *astjson.Value, ck *CacheKey) bool { + if value == nil { + return false + } + entity := value + if len(ck.EntityMergePath) > 0 { + entity = value.Get(ck.EntityMergePath...) + } + if entity == nil || entity.Type() != astjson.TypeObject { + return false + } + allowed := allowedEntityKeyFields(ck.Keys) + entityObject := map[string]json.RawMessage{} + if err := json.Unmarshal(entity.MarshalTo(nil), &entityObject); err != nil { + return false + } + for fieldName := range entityObject { + if _, ok := allowed[fieldName]; !ok { + return true + } + } + return false +} + +func allowedEntityKeyFields(keys []string) map[string]struct{} { + allowed := map[string]struct{}{ + "__typename": {}, + } + if len(keys) == 0 { + return allowed + } + entityKey := keys[0] + start := strings.IndexByte(entityKey, '{') + if start == -1 { + return allowed + } + var decoded struct { + Key map[string]json.RawMessage `json:"key"` + } + if err := json.Unmarshal([]byte(entityKey[start:]), &decoded); err != nil { + return allowed + } + for fieldName := range decoded.Key { + allowed[fieldName] = struct{}{} + } + return allowed +} + +// prepareCacheKeys generates cache keys for L1 and/or L2 based on configuration. +// Called on main thread before any cache lookups. +// Sets res.l1CacheKeys for L1 lookup (no prefix) and res.l2CacheKeys for L2 lookup (with prefix). +// Returns isEntityFetch to indicate if this fetch supports L1 caching. +func (l *Loader) prepareCacheKeys(info *FetchInfo, cfg FetchCacheConfiguration, inputItems []*astjson.Value, res *result) (isEntityFetch bool, err error) { + if cfg.CacheKeyTemplate == nil { + return false, nil + } + + // Skip all cache operations if both L1 and L2 are disabled + if !l.ctx.ExecutionOptions.Caching.EnableL1Cache && !l.ctx.ExecutionOptions.Caching.EnableL2Cache { + return false, nil + } + + res.cacheConfig = cfg + if info != nil { + res.providesData = info.ProvidesData + } + + // Check if this is an entity fetch (L1 only applies to entity fetches) + isEntity := cfg.isEntityFetch() + + // Set analytics entity type for cache event recording + if l.ctx.cacheAnalyticsEnabled() && info != nil && len(info.RootFields) > 0 { + res.analyticsEntityType = info.RootFields[0].TypeName + } + + // Always generate cache keys (needed for merging cached data into response) + // For entity fetches and root fetches: uses keys without prefix for L1 + res.l1CacheKeys, err = cfg.CacheKeyTemplate.RenderCacheKeys(l.jsonArena, l.ctx, inputItems, "") + if err != nil { + return false, err + } + + // Generate L2 keys (with prefix for cache isolation) + if l.ctx.ExecutionOptions.Caching.EnableL2Cache { + // Get cache first to ensure it exists + if l.caches != nil { + res.cache = l.caches[cfg.CacheName] + } + if res.cache != nil { + // Calculate prefix for L2 (global prefix + subgraph header isolation) + var prefix string + globalPrefix := l.ctx.ExecutionOptions.Caching.GlobalCacheKeyPrefix + if cfg.IncludeSubgraphHeaderPrefix && l.ctx.SubgraphHeadersBuilder != nil { + _, headersHash := l.ctx.SubgraphHeadersBuilder.HeadersForSubgraph(info.DataSourceName) + var buf [20]byte + b := strconv.AppendUint(buf[:0], headersHash, 10) + if globalPrefix != "" { + prefix = globalPrefix + ":" + string(b) + } else { + prefix = string(b) + } + res.headerHash = headersHash + // Record that header partitioning is active so the WRITE path + // (rootFieldL2CachePrefix) can build the same prefix even when + // headersHash == 0 (no headers forwarded but partitioning is on). + res.includeHeaderPrefix = true + } else if globalPrefix != "" { + prefix = globalPrefix + } + + // Render L2 cache keys with prefix + res.l2CacheKeys, err = cfg.CacheKeyTemplate.RenderCacheKeys(l.jsonArena, l.ctx, inputItems, prefix) + if err != nil { + return false, err + } + + // Apply user-provided L2 cache key interceptor + if interceptor := l.ctx.ExecutionOptions.Caching.L2CacheKeyInterceptor; interceptor != nil { + interceptorInfo := L2CacheKeyInterceptorInfo{ + SubgraphName: info.DataSourceName, + CacheName: cfg.CacheName, + } + for _, ck := range res.l2CacheKeys { + for i, key := range ck.Keys { + ck.Keys[i] = interceptor(l.ctx.ctx, key, interceptorInfo) + } + } + } + } + } + + if cfg.hasBatchEntityKey() { + cacheKeys := res.l1CacheKeys + if len(cacheKeys) == 0 { + cacheKeys = res.l2CacheKeys + } + if len(cacheKeys) == 0 || (len(cacheKeys) > 0 && cacheKeys[0] != nil && cacheKeys[0].Item == nil) { + res.batchEntityKeyMode = true + res.batchMergePath = res.postProcessing.MergePath + if cfg.PartialBatchLoad && !cfg.ShadowMode { + res.batchPartialFetchEnabled = true + } + } + } + + // When root field uses entity key mapping, set EntityMergePath so that + // store/load can extract/wrap entity-level data at the merge path. + if entityPath := cfg.entityMergePath(res.postProcessing); len(entityPath) > 0 { + // Determine the path to extract entity data from the merged response. + // If MergePath is set (e.g. ["user"]), use it directly. + // Otherwise, the entity data is nested under the root field name in the response + // (e.g. for field "user", response is {"user":{...}} and entity data is at ["user"]). + for _, ck := range res.l1CacheKeys { + ck.EntityMergePath = entityPath + } + for _, ck := range res.l2CacheKeys { + ck.EntityMergePath = entityPath + } + } + + // Transform construction is now ephemeral — built and consumed + // inline at each cache operation site via structuralCopyNormalized / + // structuralCopyDenormalized. No need to pre-build and store on res. + + return isEntity, nil +} + +// tryCacheLoad orchestrates cache lookups for sequential execution paths. +// Uses the 3-function approach: prepareCacheKeys -> tryL1CacheLoad -> tryL2CacheLoad +// Returns skipFetch=true if cache provides complete data. +// +// IMPORTANT: This function is for SEQUENTIAL execution only (main thread). +// For PARALLEL execution, use prepareCacheKeys + tryL1CacheLoad on main thread, +// then tryL2CacheLoad in goroutines. +// +// Lookup Order (entity fetches): L1 -> L2 -> Subgraph Fetch +// Lookup Order (root fetches): L2 -> Subgraph Fetch (no L1) +func (l *Loader) tryCacheLoad(ctx context.Context, info *FetchInfo, cfg FetchCacheConfiguration, inputItems []*astjson.Value, res *result) (skipFetch bool, err error) { + tracingCache := l.ctx.TracingOptions.Enable && !l.ctx.TracingOptions.ExcludeCacheStats + if tracingCache { + res.cacheTraceDurationSinceStartNano = GetDurationNanoSinceTraceStart(l.ctx.ctx) + defer func() { + res.cacheTraceDurationNano = GetDurationNanoSinceTraceStart(l.ctx.ctx) - res.cacheTraceDurationSinceStartNano + }() + } + + // Step 1: Prepare cache keys for L1 and L2 + isEntityFetch, err := l.prepareCacheKeys(info, cfg, inputItems, res) + if err != nil { + return false, err + } + + // Set entity count from cache keys + if len(res.l2CacheKeys) > 0 { + for _, ck := range res.l2CacheKeys { + res.cacheTraceEntityCount += len(ck.Keys) + } + } else if len(res.l1CacheKeys) > 0 { + for _, ck := range res.l1CacheKeys { + res.cacheTraceEntityCount += len(ck.Keys) + } + } + + // No cache keys generated - nothing to do + if len(res.l1CacheKeys) == 0 && len(res.l2CacheKeys) == 0 { + if res.batchEntityKeyMode { + res.cacheSkipFetch = true + return true, nil + } + return false, nil + } + + // Set partial loading flag BEFORE cache lookup so tracking arrays are populated + // Shadow mode forces partial loading off - all items always fetched + if cfg.ShadowMode { + res.partialCacheEnabled = false + } else { + res.partialCacheEnabled = cfg.EnablePartialCacheLoad + } + + // Step 2: L1 Check (per-request, in-memory) - entity fetches only + // Safe to call: this is sequential execution on main thread + // UseL1Cache flag is set by postprocessor to optimize L1 usage + if isEntityFetch && l.ctx.ExecutionOptions.Caching.EnableL1Cache && cfg.UseL1Cache && len(res.l1CacheKeys) > 0 { + allComplete := l.tryL1CacheLoad(info, res.l1CacheKeys, res) + if allComplete { + // All entities found in L1 with complete data - skip fetch + res.cacheSkipFetch = true + return true, nil + } + + if res.partialCacheEnabled && len(res.cachedItemIndices) > 0 { + // Partial hit with partial loading enabled + // cachedItemIndices and fetchItemIndices already populated by tryL1CacheLoad + // Keep FromCache values for cached items, proceed to fetch only missing items + res.cacheMustBeUpdated = true + return false, nil + } + + // All-or-nothing mode OR no hits - clear FromCache and try L2 + for _, ck := range res.l1CacheKeys { + ck.FromCache = nil + } + res.cachedItemIndices = nil + res.fetchItemIndices = nil + } + + // Step 3: L2 Check (external cache) - if L1 missed + // Safe to call: this is sequential execution on main thread + if l.ctx.ExecutionOptions.Caching.EnableL2Cache && len(res.l2CacheKeys) > 0 { + skipFetch, err = l.tryL2CacheLoad(ctx, info, res) + // Merge L2 analytics events and entity sources (sequential path, always on main thread) + if l.ctx.cacheAnalyticsEnabled() { + if len(res.l2AnalyticsEvents) > 0 { + l.ctx.cacheAnalytics.MergeL2Events(res.l2AnalyticsEvents) + res.l2AnalyticsEvents = nil + } + if len(res.l2EntitySources) > 0 { + l.ctx.cacheAnalytics.MergeEntitySources(res.l2EntitySources) + res.l2EntitySources = nil + } + if len(res.l2FetchTimings) > 0 { + l.ctx.cacheAnalytics.MergeL2FetchTimings(res.l2FetchTimings) + res.l2FetchTimings = nil + } + if len(res.l2ErrorEvents) > 0 { + l.ctx.cacheAnalytics.MergeL2Errors(res.l2ErrorEvents) + res.l2ErrorEvents = nil + } + } + if err != nil || skipFetch { + return skipFetch, err + } + + if res.partialCacheEnabled && len(res.cachedItemIndices) > 0 { + // Partial hit from L2 with partial loading enabled + // Keep FromCache values, return false to proceed with fetch for missing items + return false, nil + } + + if res.batchPartialFetchEnabled && len(res.batchCachedIndices) > 0 { + // Batch partial hit: some entities cached, some need fetching + // Keep FromCache values, return false to proceed with fetch for missing IDs + return false, nil + } + } + + // Both missed - fetch required + res.cacheMustBeUpdated = true + return false, nil +} + +// tryL1CacheLoad attempts to load all items from the L1 (per-request) cache. +// MUST be called from main thread only (L1 stats are not atomic). +// Tracks per-entity hits/misses: HIT if entity found with complete data, MISS otherwise. +// Returns true only if ALL items are found in cache with complete data for the fetch. +// L1 uses cache keys WITHOUT subgraph header prefix (same request context). +// NOTE: Only called for entity fetches, not root fetches. +// When res.partialCacheEnabled is true, populates res.cachedItemIndices and res.fetchItemIndices +// to track which items were cached vs need fetching. +func (l *Loader) tryL1CacheLoad(info *FetchInfo, cacheKeys []*CacheKey, res *result) bool { + if info == nil || info.OperationType != ast.OperationTypeQuery { + return false + } + + tracingCache := l.ctx.TracingOptions.Enable && !l.ctx.TracingOptions.ExcludeCacheStats + + // Extract entity type and data source for analytics + var entityType, dataSource string + if l.ctx.cacheAnalyticsEnabled() { + if len(info.RootFields) > 0 { + entityType = info.RootFields[0].TypeName + } + dataSource = info.DataSourceName + } + + allComplete := true + for i, ck := range cacheKeys { + var foundComplete bool + for _, keyStr := range ck.Keys { + if cachedValue, ok := l.l1Cache[keyStr]; ok { + if cachedValue == nil { + continue + } + // Widening check operates on the stored cache pointer directly (read-only). + if info.ProvidesData != nil && !l.validateItemHasRequiredData(cachedValue, info.ProvidesData) { + continue + } + // L1 READ: structural copy with denormalize passthrough (schema→alias). + // L1 stores schema-shape names with all fields (passthrough write). + // Denormalize renames known fields back to aliases while keeping + // unlisted fields intact — they may be needed by later fetches. + ck.FromCache = l.structuralCopyDenormalizedPassthrough(cachedValue, res.providesData) + + analyticsEnabled := l.ctx.cacheAnalyticsEnabled() + var byteSize int + if analyticsEnabled || tracingCache { + byteSize = len(cachedValue.MarshalTo(nil)) + } + if analyticsEnabled { + l.ctx.cacheAnalytics.RecordL1KeyEvent(CacheKeyHit, entityType, keyStr, dataSource, byteSize) + if len(res.cacheConfig.KeyFields) > 0 { + keyJSON := buildEntityKeyJSON(cachedValue, res.cacheConfig.KeyFields) + if len(keyJSON) > 0 { + l.ctx.cacheAnalytics.RecordEntitySource(entityType, string(keyJSON), FieldSourceL1) + } + } + } + if tracingCache { + res.cacheTraceL1Hits++ + if !l.ctx.TracingOptions.ExcludeRawInputData && len(ck.Keys) > 0 { + res.cacheTraceEntityDetails = append(res.cacheTraceEntityDetails, CacheTraceEntity{ + Key: ck.Keys[0], + Source: "l1", + ByteSize: byteSize, + }) + } + } + foundComplete = true + break + } + } + + if foundComplete { + // Track cached item index when partial loading enabled + if res.partialCacheEnabled { + res.cachedItemIndices = append(res.cachedItemIndices, i) + } + } else { + allComplete = false + if l.ctx.cacheAnalyticsEnabled() && len(ck.Keys) > 0 { + l.ctx.cacheAnalytics.RecordL1KeyEvent(CacheKeyMiss, entityType, ck.Keys[0], dataSource, 0) + } + if tracingCache { + res.cacheTraceL1Misses++ + } + // Track fetch item index when partial loading enabled + if res.partialCacheEnabled { + res.fetchItemIndices = append(res.fetchItemIndices, i) + } + } + } + return allComplete +} + +type l2CacheLookupState struct { + analyticsEnabled bool + tracingCache bool + shadowMode bool + hasAliases bool + entityType string + dataSource string + remainingTTLs map[string]time.Duration + batchEntityProvidesData *Object +} + +// tryL2CacheLoad checks the external (L2) cache for entity data. +// Thread-safe: can be called from parallel goroutines (uses atomic L2 stats). +// Expects res.l2CacheKeys to be pre-populated by prepareCacheKeys(). +// Uses subgraph header prefix for cache key isolation across different configurations. +func (l *Loader) tryL2CacheLoad(ctx context.Context, info *FetchInfo, res *result) (skipFetch bool, err error) { + // Skip L2 cache reads for mutations - always fetch fresh data from subgraph. + // We check l.info (root operation type), not info (per-fetch type), because + // nested entity fetches within mutations have OperationType=Query. + // NOTE: L2 cache WRITES are NOT skipped for mutations (see updateL2Cache). + // This is intentional: mutations produce fresh data that should populate L2 + // so subsequent queries benefit from the updated cache. + // Subscriptions are allowed to read from L2 cache because their child entity + // fetches are read operations, just like queries. + if l.info != nil && l.info.OperationType == ast.OperationTypeMutation { + res.cacheMustBeUpdated = true + return false, nil + } + + // L2 keys should be pre-populated by prepareCacheKeys + if len(res.l2CacheKeys) == 0 || res.cache == nil { + res.cacheMustBeUpdated = true + return false, nil + } + + tracingCache := l.ctx.TracingOptions.Enable && !l.ctx.TracingOptions.ExcludeCacheStats + + cacheKeyStrings := l.extractCacheKeysStrings(l.jsonArena, res.l2CacheKeys) + // Skip the L2 round-trip when there's nothing to look up. + // The empty-slice case is the "no keys wired up" path; the all-empty-string case + // guards against CacheKey entries that never got rendered (e.g., a template missed + // a required variable). Either way, sending empty keys to the backend is at best + // a wasted round-trip and at worst interpreted by a backend as a request for an + // entry keyed by "" — skip cleanly instead. + if len(cacheKeyStrings) == 0 || !hasNonEmptyKey(cacheKeyStrings) { + res.cacheMustBeUpdated = true + return false, nil + } + + // Extract entity type and data source for analytics (read-only, goroutine-safe) + analyticsEnabled := l.ctx.cacheAnalyticsEnabled() + var entityType, dataSource string + if analyticsEnabled && info != nil { + if len(info.RootFields) > 0 { + entityType = info.RootFields[0].TypeName + } + dataSource = info.DataSourceName + } + + // Get cache entries from L2 + var l2GetStart time.Time + if analyticsEnabled || tracingCache { + l2GetStart = time.Now() + } + if tracingCache { + res.cacheTraceL2GetAttempted = true + } + cacheEntries, err := res.cache.Get(ctx, cacheKeyStrings) + if analyticsEnabled { + res.l2FetchTimings = append(res.l2FetchTimings, FetchTimingEvent{ + DataSource: dataSource, + EntityType: entityType, + DurationMs: time.Since(l2GetStart).Milliseconds(), + Source: FieldSourceL2, + ItemCount: len(cacheKeyStrings), + IsEntityFetch: len(res.l1CacheKeys) > 0, + }) + } + if tracingCache { + res.cacheTraceL2GetDuration = time.Since(l2GetStart) + } + if err != nil { + // L2 cache errors are non-fatal, continue to fetch. + // Circuit-breaker-open is not a backend error — skip analytics/trace error recording. + if !errors.Is(err, ErrCircuitBreakerOpen) { + if analyticsEnabled { + res.l2CacheOpErrors = append(res.l2CacheOpErrors, CacheOperationError{ + Operation: "get", + CacheName: res.cacheConfig.CacheName, + EntityType: entityType, + DataSource: dataSource, + Message: truncateErrorMessage(err.Error(), 256), + ItemCount: len(cacheKeyStrings), + }) + } + if tracingCache { + res.cacheTraceL2GetError = err.Error() + } + } + res.cacheMustBeUpdated = true + return false, nil + } + + // Populate FromCache fields in L2 CacheKeys (which have prefixed keys) + err = l.populateFromCache(l.jsonArena, res.l2CacheKeys, cacheEntries) + if err != nil { + res.cacheMustBeUpdated = true + return false, nil + } + + state := l.prepareL2LookupState(info, res, cacheEntries, analyticsEnabled, tracingCache, entityType, dataSource) + + // Copy FromCache values from L2 keys to L1 keys (if L1 keys exist) and track per-entity hits/misses + // The keys have the same structure, just different key strings. + var allComplete bool + if len(res.l1CacheKeys) > 0 && !res.batchEntityKeyMode { + allComplete = l.applyEntityFetchL2Results(info, res, state) + } else { + allComplete = l.applyRootFetchL2Results(info, res, state) + } + + // Shadow mode: even if all items were found in cache, we still need to fetch + // fresh data for comparison. Clear FromCache and force fetch. + if state.shadowMode { + for _, ck := range res.l1CacheKeys { + ck.FromCache = nil + } + res.cachedItemIndices = nil + res.fetchItemIndices = nil + res.cacheSkipFetch = false + res.cacheMustBeUpdated = true + return false, nil + } + + if allComplete { + res.cacheSkipFetch = true + if hasMissingRequestedKeys(res.l2CacheKeys) || needsResolvedCacheWriteback(res.l2CacheKeys) { + res.cacheMustBeUpdated = true + } + return true, nil + } + + res.cacheMustBeUpdated = true + return false, nil +} + +// bulkL2Lookup performs the L2 cache read for a parallel batch of fetches in +// a single bulk cache.Get per cache instance, on the main thread, using +// l.parser and l.jsonArena. After this call, every result in `results` has +// res.cacheSkipFetch set correctly and the L2 analytics events accumulated. +// +// Skipped per result: +// - res.cache == nil (no L2 enabled for this fetch) +// - res.fetchSkipped (Phase 1.5 already satisfied via @requestScoped) +// - res.cacheSkipFetch (L1 was a complete hit in Phase 1) +// - mutation root operation (l.info.OperationType == ast.OperationTypeMutation) +// +// Behavior on bulk Get failure: every fetch that requested the failing cache +// instance gets res.cacheMustBeUpdated = true and proceeds to subgraph fetch. +func (l *Loader) bulkL2Lookup(ctx context.Context, nodes []*FetchTreeNode, results []*result) error { + if len(results) == 0 { + return nil + } + if l.info != nil && l.info.OperationType == ast.OperationTypeMutation { + // Mutations skip L2 reads (existing behavior, see tryL2CacheLoad). + for _, res := range results { + if res != nil { + res.cacheMustBeUpdated = true + } + } + return nil + } + + // Phase A: build per-cache-instance plans. + type planEntry struct { + cache LoaderCache + keys []string // deduplicated, deterministic order + owners map[string][]int // key -> list of fetch indices that requested it + } + plans := make(map[LoaderCache]*planEntry) + + for i, res := range results { + if res == nil || res.cache == nil { + continue + } + if res.fetchSkipped || res.cacheSkipFetch { + continue + } + if len(res.l2CacheKeys) == 0 { + res.cacheMustBeUpdated = true + continue + } + plan, ok := plans[res.cache] + if !ok { + plan = &planEntry{cache: res.cache, owners: make(map[string][]int)} + plans[res.cache] = plan + } + for _, ck := range res.l2CacheKeys { + for _, key := range ck.Keys { + if _, seen := plan.owners[key]; !seen { + plan.keys = append(plan.keys, key) + } + plan.owners[key] = append(plan.owners[key], i) + } + } + } + if len(plans) == 0 { + return nil + } + + type indexedEntries struct { + byKey map[string]*CacheEntry + } + indexes := make(map[LoaderCache]indexedEntries, len(plans)) + tracingCache := l.ctx.TracingOptions.Enable && !l.ctx.TracingOptions.ExcludeCacheStats + analyticsEnabled := l.ctx.cacheAnalyticsEnabled() + + for _, plan := range plans { + // Pre-compute unique fetch indices for this plan. + seenFetchIdx := make(map[int]struct{}, 8) + for _, fetchIndices := range plan.owners { + for _, i := range fetchIndices { + seenFetchIdx[i] = struct{}{} + } + } + + if tracingCache { + for i := range seenFetchIdx { + if i >= 0 && i < len(results) && results[i] != nil { + results[i].cacheTraceL2GetAttempted = true + } + } + } + var bulkGetStart time.Time + if analyticsEnabled || tracingCache { + bulkGetStart = time.Now() + } + + entries, err := plan.cache.Get(ctx, plan.keys) + + var bulkGetDuration time.Duration + if analyticsEnabled || tracingCache { + bulkGetDuration = time.Since(bulkGetStart) + } + + // Attribute timing per-fetch. + if tracingCache { + for i := range seenFetchIdx { + if i >= 0 && i < len(results) && results[i] != nil { + results[i].cacheTraceL2GetDuration = bulkGetDuration + } + } + } + if analyticsEnabled { + for i := range seenFetchIdx { + if i < 0 || i >= len(results) || results[i] == nil { + continue + } + res := results[i] + perFetchKeyCount := countUniqueCacheKeyStrings(res.l2CacheKeys) + res.l2FetchTimings = append(res.l2FetchTimings, FetchTimingEvent{ + DataSource: res.ds.Name, + EntityType: res.analyticsEntityType, + DurationMs: bulkGetDuration.Milliseconds(), + Source: FieldSourceL2, + ItemCount: perFetchKeyCount, + IsEntityFetch: len(res.l1CacheKeys) > 0, + }) + } + } + + if err != nil { + // Circuit-breaker-open is not a backend error — treat as a clean miss. + breakerOpen := errors.Is(err, ErrCircuitBreakerOpen) + for i := range seenFetchIdx { + if i < 0 || i >= len(results) || results[i] == nil { + continue + } + res := results[i] + res.cacheMustBeUpdated = true + if breakerOpen { + continue + } + if tracingCache { + res.cacheTraceL2GetError = err.Error() + } + if analyticsEnabled { + perFetchKeyCount := countUniqueCacheKeyStrings(res.l2CacheKeys) + res.l2CacheOpErrors = append(res.l2CacheOpErrors, CacheOperationError{ + Operation: "get", + CacheName: res.cacheConfig.CacheName, + EntityType: res.analyticsEntityType, + DataSource: res.ds.Name, + Message: truncateErrorMessage(err.Error(), 256), + ItemCount: perFetchKeyCount, + }) + } + } + continue + } + idx := indexedEntries{byKey: make(map[string]*CacheEntry, len(entries))} + for _, e := range entries { + if e != nil { + idx.byKey[e.Key] = e + } + } + indexes[plan.cache] = idx + } + + // Phase C: per-fetch — populate FromCache, parse VERBATIM on l.parser/l.jsonArena. + for i, res := range results { + if res == nil || res.cache == nil { + continue + } + if res.fetchSkipped || res.cacheSkipFetch { + continue + } + if len(res.l2CacheKeys) == 0 { + continue + } + idx, ok := indexes[res.cache] + if !ok { + // Get failed earlier — already marked cacheMustBeUpdated above. + continue + } + + info := res.fetchInfo + + if err := l.populateFromCacheBulk(l.jsonArena, res, idx.byKey); err != nil { + res.cacheMustBeUpdated = true + continue + } + + state := l.prepareL2LookupState(info, res, nil, analyticsEnabled, tracingCache, res.analyticsEntityType, res.ds.Name) + + var allComplete bool + if len(res.l1CacheKeys) > 0 && !res.batchEntityKeyMode { + allComplete = l.applyEntityFetchL2Results(info, res, state) + } else { + allComplete = l.applyRootFetchL2Results(info, res, state) + } + + if state.shadowMode { + for _, ck := range res.l1CacheKeys { + ck.FromCache = nil + } + res.cachedItemIndices = nil + res.fetchItemIndices = nil + res.cacheSkipFetch = false + res.cacheMustBeUpdated = true + continue + } + + if allComplete { + res.cacheSkipFetch = true + // Attach cached output to trace — previously done in loadFetchL2Only. + if i >= 0 && i < len(nodes) && nodes[i] != nil && nodes[i].Item != nil { + l.attachCachedOutputToTrace(nodes[i].Item.Fetch, res) + } + if hasMissingRequestedKeys(res.l2CacheKeys) || needsResolvedCacheWriteback(res.l2CacheKeys) { + res.cacheMustBeUpdated = true + } + continue + } + res.cacheMustBeUpdated = true + } + + return nil +} + +// populateFromCacheBulk fills cacheKeys[].FromCache / fromCacheCandidates / +// missingKeys from a pre-indexed map of cache entries. Parses each candidate +// VERBATIM (no Transform) onto the given arena via l.parser. +func (l *Loader) populateFromCacheBulk(a arena.Arena, res *result, byKey map[string]*CacheEntry) error { + return l.populateCacheKeysFromIndex(a, res.l2CacheKeys, byKey) +} + +func (l *Loader) prepareL2LookupState(info *FetchInfo, res *result, cacheEntries []*CacheEntry, analyticsEnabled, tracingCache bool, entityType, dataSource string) l2CacheLookupState { + state := l2CacheLookupState{ + analyticsEnabled: analyticsEnabled, + tracingCache: tracingCache, + shadowMode: res.cacheConfig.ShadowMode, + hasAliases: info != nil && info.ProvidesData != nil && info.ProvidesData.HasAliases, + entityType: entityType, + dataSource: dataSource, + } + + if res.batchEntityKeyMode && len(res.l2CacheKeys) > 0 { + state.batchEntityProvidesData = batchEntityValidationObject(info.ProvidesData, res.l2CacheKeys[0].EntityMergePath) + } + + // When EntityMergePath is set, the cache stores entity-level data (e.g. {"id":"1234","username":"Me"}). + // Root field fetches need response-level data (e.g. {"user":{"id":"1234","username":"Me"}}), + // so wrap the cached entity data back at the merge path before validation. + // Batch entity key lookups keep entity-level values because each cache entry represents + // one array element rather than a complete root field response. + if !res.batchEntityKeyMode { + for _, ck := range res.l2CacheKeys { + if len(ck.EntityMergePath) > 0 && ck.FromCache != nil { + ck.FromCache = wrapCacheValueAtMergePath(l.jsonArena, ck.FromCache, ck.EntityMergePath) + } + } + } + + if analyticsEnabled { + if cacheEntries != nil { + // Sequential path: build from the raw entries returned by tryL2CacheLoad. + state.remainingTTLs = make(map[string]time.Duration, len(cacheEntries)) + for _, entry := range cacheEntries { + if entry != nil && entry.RemainingTTL > 0 { + state.remainingTTLs[entry.Key] = entry.RemainingTTL + } + } + } else { + // Bulk path: derive from the freshest candidate already attached to + // each CacheKey by populateFromCacheBulk. + state.remainingTTLs = make(map[string]time.Duration, len(res.l2CacheKeys)) + for _, ck := range res.l2CacheKeys { + if ck == nil || ck.fromCacheRemainingTTL <= 0 || len(ck.Keys) == 0 { + continue + } + state.remainingTTLs[ck.Keys[0]] = ck.fromCacheRemainingTTL + } + } + } + + return state +} + +// selectBestCacheCandidate decides whether the freshest candidate already +// attached to ck.FromCache is usable as a full hit (true) or must be treated +// as a partial hit (false). When ProvidesData is absent the check is a no-op +// and the value is accepted as-is. When ProvidesData is present the multi- +// candidate walk runs, which may swap ck.FromCache to an older candidate that +// covers the required fields. +func (l *Loader) selectBestCacheCandidate(info *FetchInfo, ck *CacheKey) bool { + if info == nil || info.ProvidesData == nil { + return true + } + return l.resolveMultiCandidateCacheValue(l.jsonArena, ck, info.ProvidesData) +} + +func (l *Loader) applyEntityFetchL2Results(info *FetchInfo, res *result, state l2CacheLookupState) bool { + allComplete := true + + for i := range res.l1CacheKeys { + if i >= len(res.l2CacheKeys) { + continue + } + + res.l1CacheKeys[i].FromCache = res.l2CacheKeys[i].FromCache + res.l1CacheKeys[i].missingKeys = res.l2CacheKeys[i].missingKeys + res.l1CacheKeys[i].cachedData = res.l2CacheKeys[i].cachedData + + if res.l1CacheKeys[i].FromCache == nil { + if state.analyticsEnabled && len(res.l1CacheKeys[i].Keys) > 0 { + res.l2AnalyticsEvents = append(res.l2AnalyticsEvents, CacheKeyEvent{ + CacheKey: res.l1CacheKeys[i].Keys[0], EntityType: state.entityType, + Kind: CacheKeyMiss, DataSource: state.dataSource, ByteSize: 0, + Shadow: state.shadowMode, + }) + } + if state.tracingCache { + res.cacheTraceL2Misses++ + } + allComplete = false + if res.partialCacheEnabled { + res.fetchItemIndices = append(res.fetchItemIndices, i) + } + continue + } + + if res.l1CacheKeys[i].FromCache.Type() == astjson.TypeNull && res.cacheConfig.NegativeCacheTTL > 0 { + if state.analyticsEnabled && len(res.l1CacheKeys[i].Keys) > 0 { + res.l2AnalyticsEvents = append(res.l2AnalyticsEvents, CacheKeyEvent{ + CacheKey: res.l1CacheKeys[i].Keys[0], EntityType: state.entityType, + Kind: CacheKeyHit, DataSource: state.dataSource, ByteSize: 4, + Shadow: state.shadowMode, + }) + } + if state.tracingCache { + res.cacheTraceNegativeHits++ + if !l.ctx.TracingOptions.ExcludeRawInputData && len(res.l1CacheKeys[i].Keys) > 0 { + res.cacheTraceEntityDetails = append(res.cacheTraceEntityDetails, CacheTraceEntity{ + Key: res.l1CacheKeys[i].Keys[0], + Source: "negative_cache", + }) + } + } + if res.partialCacheEnabled { + res.cachedItemIndices = append(res.cachedItemIndices, i) + } + continue + } + + if !l.selectBestCacheCandidate(info, res.l1CacheKeys[i]) { + res.l2CacheKeys[i].FromCache = res.l1CacheKeys[i].FromCache + res.l2CacheKeys[i].cachedData = res.l1CacheKeys[i].cachedData + if state.analyticsEnabled && len(res.l1CacheKeys[i].Keys) > 0 { + res.l2AnalyticsEvents = append(res.l2AnalyticsEvents, CacheKeyEvent{ + CacheKey: res.l1CacheKeys[i].Keys[0], EntityType: state.entityType, + Kind: CacheKeyPartialHit, DataSource: state.dataSource, ByteSize: 0, + Shadow: state.shadowMode, + }) + } + allComplete = false + if res.partialCacheEnabled { + res.fetchItemIndices = append(res.fetchItemIndices, i) + } + continue + } + + res.l2CacheKeys[i].FromCache = res.l1CacheKeys[i].FromCache + res.l2CacheKeys[i].fromCacheRemainingTTL = res.l1CacheKeys[i].fromCacheRemainingTTL + res.l2CacheKeys[i].fromCacheNeedsWriteback = res.l1CacheKeys[i].fromCacheNeedsWriteback + + if state.hasAliases { + res.l1CacheKeys[i].FromCache = l.structuralCopyDenormalizedPassthrough(res.l1CacheKeys[i].FromCache, res.providesData) + } + + var byteSize int + if (state.analyticsEnabled || state.tracingCache) && len(res.l1CacheKeys[i].Keys) > 0 { + byteSize = len(res.l1CacheKeys[i].FromCache.MarshalTo(nil)) + } + + if state.analyticsEnabled && len(res.l1CacheKeys[i].Keys) > 0 { + var cacheAgeMs int64 + if len(res.l2CacheKeys[i].Keys) > 0 { + cacheAgeMs = computeCacheAgeMs(state.remainingTTLs[res.l2CacheKeys[i].Keys[0]], res.cacheConfig.TTL) + } + res.l2AnalyticsEvents = append(res.l2AnalyticsEvents, CacheKeyEvent{ + CacheKey: res.l1CacheKeys[i].Keys[0], EntityType: state.entityType, + Kind: CacheKeyHit, DataSource: state.dataSource, ByteSize: byteSize, + CacheAgeMs: cacheAgeMs, Shadow: state.shadowMode, + }) + if len(res.cacheConfig.KeyFields) > 0 { + keyJSON := buildEntityKeyJSON(res.l1CacheKeys[i].FromCache, res.cacheConfig.KeyFields) + if len(keyJSON) > 0 { + res.l2EntitySources = append(res.l2EntitySources, entitySourceRecord{ + entityType: state.entityType, keyJSON: string(keyJSON), source: FieldSourceL2, + }) + } + } + } + + if state.shadowMode { + var remaining time.Duration + if len(res.l2CacheKeys[i].Keys) > 0 { + remaining = state.remainingTTLs[res.l2CacheKeys[i].Keys[0]] + } + l.saveShadowCachedValue(res, i, res.l1CacheKeys[i].FromCache, res.l1CacheKeys[i].Keys[0], remaining) + if state.tracingCache { + res.cacheTraceShadowHit = true + } + } + + if state.tracingCache { + res.cacheTraceL2Hits++ + if !l.ctx.TracingOptions.ExcludeRawInputData && len(res.l1CacheKeys[i].Keys) > 0 { + entity := CacheTraceEntity{ + Key: res.l1CacheKeys[i].Keys[0], + Source: "l2", + ByteSize: byteSize, + } + if res.l2CacheKeys[i].fromCacheRemainingTTL > 0 { + entity.RemainingTTLSeconds = res.l2CacheKeys[i].fromCacheRemainingTTL.Seconds() + } + res.cacheTraceEntityDetails = append(res.cacheTraceEntityDetails, entity) + } + } + + if res.partialCacheEnabled { + res.cachedItemIndices = append(res.cachedItemIndices, i) + } + } + + return allComplete +} + +func (l *Loader) applyRootFetchL2Results(info *FetchInfo, res *result, state l2CacheLookupState) bool { + allComplete := true + + for i, ck := range res.l2CacheKeys { + if ck.FromCache == nil { + if state.analyticsEnabled && len(ck.Keys) > 0 { + res.l2AnalyticsEvents = append(res.l2AnalyticsEvents, CacheKeyEvent{ + CacheKey: ck.Keys[0], EntityType: state.entityType, + Kind: CacheKeyMiss, DataSource: state.dataSource, ByteSize: 0, + Shadow: state.shadowMode, + }) + } + if state.tracingCache { + res.cacheTraceL2Misses++ + } + allComplete = false + if res.partialCacheEnabled { + res.fetchItemIndices = append(res.fetchItemIndices, i) + } + if res.batchPartialFetchEnabled { + res.batchMissedIndices = append(res.batchMissedIndices, ck.BatchIndex) + } + continue + } + + if ck.FromCache.Type() == astjson.TypeNull && res.cacheConfig.NegativeCacheTTL > 0 { + if state.analyticsEnabled && len(ck.Keys) > 0 { + res.l2AnalyticsEvents = append(res.l2AnalyticsEvents, CacheKeyEvent{ + CacheKey: ck.Keys[0], EntityType: state.entityType, + Kind: CacheKeyHit, DataSource: state.dataSource, ByteSize: 4, + Shadow: state.shadowMode, + }) + } + if state.tracingCache { + res.cacheTraceNegativeHits++ + if !l.ctx.TracingOptions.ExcludeRawInputData && len(ck.Keys) > 0 { + res.cacheTraceEntityDetails = append(res.cacheTraceEntityDetails, CacheTraceEntity{ + Key: ck.Keys[0], + Source: "negative_cache", + }) + } + } + if res.partialCacheEnabled { + res.cachedItemIndices = append(res.cachedItemIndices, i) + } + if res.batchPartialFetchEnabled { + res.batchCachedIndices = append(res.batchCachedIndices, ck.BatchIndex) + } + continue + } + + providesDataForValidation := info != nil && info.ProvidesData != nil + cacheHit := !providesDataForValidation || l.resolveMultiCandidateCacheValue(l.jsonArena, ck, info.ProvidesData) + if res.batchEntityKeyMode { + cacheHit = state.batchEntityProvidesData == nil || l.resolveBatchEntityCacheValue(l.jsonArena, ck, state.batchEntityProvidesData) + } + if !cacheHit { + if state.analyticsEnabled && len(ck.Keys) > 0 { + res.l2AnalyticsEvents = append(res.l2AnalyticsEvents, CacheKeyEvent{ + CacheKey: ck.Keys[0], EntityType: state.entityType, + Kind: CacheKeyPartialHit, DataSource: state.dataSource, ByteSize: 0, + Shadow: state.shadowMode, + }) + } + allComplete = false + if res.partialCacheEnabled { + res.fetchItemIndices = append(res.fetchItemIndices, i) + } + if res.batchPartialFetchEnabled { + res.batchMissedIndices = append(res.batchMissedIndices, ck.BatchIndex) + } + continue + } + + if state.hasAliases { + if res.batchEntityKeyMode && state.batchEntityProvidesData != nil { + res.l2CacheKeys[i].FromCache = l.structuralCopyDenormalized(ck.FromCache, state.batchEntityProvidesData) + } else { + res.l2CacheKeys[i].FromCache = l.structuralCopyDenormalized(ck.FromCache, res.providesData) + } + } + + var byteSize int + if (state.analyticsEnabled || state.tracingCache) && len(ck.Keys) > 0 { + byteSize = len(res.l2CacheKeys[i].FromCache.MarshalTo(nil)) + } + + if state.analyticsEnabled && len(ck.Keys) > 0 { + cacheAgeMs := computeCacheAgeMs(state.remainingTTLs[ck.Keys[0]], res.cacheConfig.TTL) + res.l2AnalyticsEvents = append(res.l2AnalyticsEvents, CacheKeyEvent{ + CacheKey: ck.Keys[0], EntityType: state.entityType, + Kind: CacheKeyHit, DataSource: state.dataSource, ByteSize: byteSize, + CacheAgeMs: cacheAgeMs, Shadow: state.shadowMode, + }) + if len(res.cacheConfig.KeyFields) > 0 { + walkCachedResponseForSources(res.l2CacheKeys[i].FromCache, res.cacheConfig.KeyFields, state.entityType, FieldSourceL2, &res.l2EntitySources) + } + } + + if state.tracingCache { + res.cacheTraceL2Hits++ + if !l.ctx.TracingOptions.ExcludeRawInputData && len(ck.Keys) > 0 { + entity := CacheTraceEntity{ + Key: ck.Keys[0], + Source: "l2", + ByteSize: byteSize, + } + if ck.fromCacheRemainingTTL > 0 { + entity.RemainingTTLSeconds = ck.fromCacheRemainingTTL.Seconds() + } + res.cacheTraceEntityDetails = append(res.cacheTraceEntityDetails, entity) + } + } + + if res.partialCacheEnabled { + res.cachedItemIndices = append(res.cachedItemIndices, i) + } + if res.batchPartialFetchEnabled { + res.batchCachedIndices = append(res.batchCachedIndices, ck.BatchIndex) + } + } + + return allComplete +} + +// populateL1Cache stores entity data in the L1 (per-request) cache for later reuse. +// Always DeepCopies onto l.jsonArena so the stored value is independent of the +// source tree. When there are aliases / arg-suffix fields, uses the per-fetch +// normalize Transform to produce a cache-shape (schema-named) value upfront. +func (l *Loader) populateL1Cache(fetchItem *FetchItem, res *result) { + if !l.ctx.ExecutionOptions.Caching.EnableL1Cache { + return + } + cfg := getFetchCaching(fetchItem.Fetch) + if !cfg.UseL1Cache { + l.populateL1CacheForRootFieldEntities(fetchItem) + return + } + + info := getFetchInfo(fetchItem.Fetch) + var entityType, dataSource string + if l.ctx.cacheAnalyticsEnabled() && info != nil { + if len(info.RootFields) > 0 { + entityType = info.RootFields[0].TypeName + } + dataSource = info.DataSourceName + } + + analyticsEnabled := l.ctx.cacheAnalyticsEnabled() + + for _, ck := range res.l1CacheKeys { + if ck.Item == nil { + continue + } + // L1 WRITE: structural copy with rename but no projection. + // L1 stores the complete entity (all fields, schema-shape names) + // so subsequent fetches can merge additional fields into it. + // Passthrough mode renames aliased fields to schema names while + // keeping unlisted fields (e.g. @key fields) intact. + stored := l.structuralCopyNormalizedPassthrough(ck.Item, res.providesData) + if stored == nil { + continue + } + + for _, keyStr := range ck.Keys { + byteSize := l1AnalyticsSize(analyticsEnabled, stored) + if existingVal, loaded := l.l1Cache[keyStr]; loaded && existingVal != nil { + // SAFETY: merge into a working copy, never the live cache + // entry. astjson.MergeValues mutates its first argument in + // place and failures are NOT atomic (verified at + // astjson/mergevalues.go:30–74). Merging in place could + // corrupt every sibling L1 key pointing at the same entry. + working := l.parser.StructuralCopy(l.jsonArena, existingVal) + _, err := astjson.MergeValues(l.jsonArena, working, stored) + if err != nil { + l.l1Cache[keyStr] = stored + } else { + l.l1Cache[keyStr] = working + byteSize = l1AnalyticsSize(analyticsEnabled, working) + } + } else { + l.l1Cache[keyStr] = stored + } + if analyticsEnabled { + l.ctx.cacheAnalytics.RecordWrite(CacheWriteEvent{ + CacheKey: keyStr, EntityType: entityType, ByteSize: byteSize, + DataSource: dataSource, CacheLevel: CacheLevelL1, Source: l.cacheOperationSource(), + }) + } + } + } + l.populateL1CacheForRootFieldEntities(fetchItem) +} + +// populateL1CacheForRootFieldEntities populates the L1 cache with entities returned by root fields. +// This allows subsequent entity fetches to benefit from L1 cache hits when the same entities +// were already fetched as part of a root field query. +// +// Root-field L1 promotion requires planner ProvidesData in order to derive the +// entity-shaped Object and build a normalize Transform. When ProvidesData is +// unavailable, promotion is silently skipped rather than storing response-shape +// (aliased) values, which would corrupt subsequent entity-fetch L1 reads. +// rootFieldL1PathGroup collects all entity-type templates that share a response +// field path, so the Transform and entity-Object can be derived once per group. +type rootFieldL1PathGroup struct { + fieldPath []string + // entityType → template + templates map[string]*EntityQueryCacheKeyTemplate +} + +func (l *Loader) populateL1CacheForRootFieldEntities(fetchItem *FetchItem) { + // Only applies to SingleFetch (root field fetches) + singleFetch, ok := fetchItem.Fetch.(*SingleFetch) + if !ok { + return + } + + templates := singleFetch.Caching.RootFieldL1EntityCacheKeyTemplates + if len(templates) == 0 { + return + } + + // Fetch-level guard: ProvidesData is required for normalize-on-write. + if singleFetch.Info == nil || singleFetch.Info.ProvidesData == nil { + return + } + + // Get response data + data := l.resolvable.data + if data == nil { + return + } + + groups := groupRootFieldL1Templates(templates) + + l.processNestedL1Items(singleFetch, data, groups) +} + +// groupRootFieldL1Templates buckets the per-composite-key templates by the +// response field path their entity Object is rooted at, so the Transform and +// entity-shape Object can be derived once per path instead of once per key. +func groupRootFieldL1Templates(templates map[string]CacheKeyTemplate) map[string]*rootFieldL1PathGroup { + groups := map[string]*rootFieldL1PathGroup{} // keyed by joined fieldPath + + for compositeKey, template := range templates { + entityTemplate, ok := template.(*EntityQueryCacheKeyTemplate) + if !ok || entityTemplate.Keys == nil || entityTemplate.Keys.Renderer == nil { + continue + } + obj, ok := entityTemplate.Keys.Renderer.Node.(*Object) + if !ok || len(obj.Path) == 0 { + continue + } + + // Extract entity type from composite key "fieldName:entityType" + _, entityType, ok := strings.Cut(compositeKey, ":") + if !ok { + entityType = compositeKey + } + + pathKey := strings.Join(obj.Path, "/") + g, exists := groups[pathKey] + if !exists { + g = &rootFieldL1PathGroup{ + fieldPath: obj.Path, + templates: map[string]*EntityQueryCacheKeyTemplate{}, + } + groups[pathKey] = g + } + g.templates[entityType] = entityTemplate + } + + return groups +} + +// processNestedL1Items walks each path group, resolves the entity-shape Object +// from the fetch's ProvidesData once per group, then delegates to storeL1Entity +// for each individual entity discovered under that path in the response data. +func (l *Loader) processNestedL1Items(singleFetch *SingleFetch, data *astjson.Value, groups map[string]*rootFieldL1PathGroup) { + for _, g := range groups { + entityObj := batchEntityValidationObject(singleFetch.Info.ProvidesData, g.fieldPath) + if entityObj == nil { + continue + } + entitiesValue := data.Get(g.fieldPath...) + if entitiesValue == nil { + continue + } + + var entities []*astjson.Value + switch entitiesValue.Type() { + case astjson.TypeArray: + entities = entitiesValue.GetArray() + case astjson.TypeObject: + entities = []*astjson.Value{entitiesValue} + default: + continue + } + + for _, entity := range entities { + l.storeL1Entity(entity, entityObj, g.templates) + } + } +} + +// storeL1Entity renders the cache keys for a single response entity and +// performs the first-writer-wins L1 write. Skips entities that are nil, lack a +// __typename, have no matching template, or fail normalization/rendering. +func (l *Loader) storeL1Entity(entity *astjson.Value, entityObj *Object, templatesByType map[string]*EntityQueryCacheKeyTemplate) { + if entity == nil { + return + } + typenameValue := entity.Get("__typename") + if typenameValue == nil { + return + } + entityTemplate, ok := templatesByType[string(typenameValue.GetStringBytes())] + if !ok { + return + } + + // L1 WRITE: structural copy with rename but no projection. + stored := l.structuralCopyNormalizedPassthrough(entity, entityObj) + if stored == nil { + return + } + + cacheKeys, err := entityTemplate.RenderCacheKeys(l.jsonArena, l.ctx, []*astjson.Value{entity}, "") + if err != nil || len(cacheKeys) == 0 { + return + } + + // First-writer-wins semantics: a previous entity-fetch L1 write to the + // same key is not overwritten. + for _, ck := range cacheKeys { + if ck == nil { + continue + } + for _, keyStr := range ck.Keys { + if _, exists := l.l1Cache[keyStr]; !exists { + l.l1Cache[keyStr] = stored + } + } + } +} + +// getFetchInfo extracts FetchInfo from a Fetch interface +func getFetchInfo(fetch Fetch) *FetchInfo { + switch f := fetch.(type) { + case *SingleFetch: + return f.Info + case *EntityFetch: + return f.Info + case *BatchEntityFetch: + return f.Info + } + return nil +} + +// getFetchCaching extracts FetchCacheConfiguration from a Fetch interface +func getFetchCaching(fetch Fetch) FetchCacheConfiguration { + switch f := fetch.(type) { + case *SingleFetch: + return f.Caching + case *EntityFetch: + return f.Caching + case *BatchEntityFetch: + return f.Caching + } + return FetchCacheConfiguration{} +} + +func getFetchPostProcessing(fetch Fetch) PostProcessingConfiguration { + switch f := fetch.(type) { + case *SingleFetch: + return f.PostProcessing + case *EntityFetch: + return f.PostProcessing + case *BatchEntityFetch: + return f.PostProcessing + } + return PostProcessingConfiguration{} +} + +// updateL2Cache writes entity data to the L2 (external) cache. +// This enables cross-request caching via external stores like Redis. +func (l *Loader) updateL2Cache(res *result) { + contributor := l.prepareL2CacheSet(res) + if contributor == nil { + return + } + if l.deferL2CacheWrites { + l.deferredL2CacheSets = append(l.deferredL2CacheSets, contributor) + return + } + l.writeL2CacheSetContributors([]*l2CacheSetContributor{contributor}) +} + +func (l *Loader) prepareL2CacheSet(res *result) *l2CacheSetContributor { + if !l.ctx.ExecutionOptions.Caching.EnableL2Cache { + return nil + } + // Skip L2 cache writes for mutations unless explicitly opted in per-mutation-field. + // The flag is set in resolveSingle when processing the mutation root fetch. + if l.info != nil && l.info.OperationType == ast.OperationTypeMutation && + !l.enableMutationL2CachePopulation { + return nil + } + if res.cache == nil || !res.cacheMustBeUpdated { + return nil + } + + keysToStore := l.prepareL2WriteKeys(res) + if len(keysToStore) == 0 { + return nil + } + + // Convert CacheKeys to CacheEntries + cacheEntries, err := l.cacheKeysToEntriesForUpdate(l.jsonArena, res, keysToStore) + if err != nil { + // Cache update errors are non-fatal - silently ignore + return nil + } + + // Determine effective TTL: use mutation override if set, otherwise entity default + ttl := res.cacheConfig.TTL + if l.enableMutationL2CachePopulation && l.mutationCacheTTLOverride > 0 { + ttl = l.mutationCacheTTLOverride + } + for _, entry := range cacheEntries { + if entry != nil { + entry.TTL = ttl + } + } + + var negEntries []*CacheEntry + if res.cacheConfig.NegativeCacheTTL > 0 { + negEntries = l.cacheKeysToNegativeEntries(l.jsonArena, res, keysToStore) + for _, entry := range negEntries { + if entry != nil { + entry.TTL = res.cacheConfig.NegativeCacheTTL + } + } + } + + if len(cacheEntries) == 0 && len(negEntries) == 0 { + return nil + } + + entries := make([]*CacheEntry, 0, len(cacheEntries)+len(negEntries)) + entries = append(entries, cacheEntries...) + entries = append(entries, negEntries...) + return &l2CacheSetContributor{ + res: res, + entries: entries, + regularEntries: cacheEntries, + negativeEntries: negEntries, + } +} + +// prepareL2WriteKeys chooses the write-set of CacheKeys for updateL2Cache, +// syncs entity-fetch L1/L2 keys, normalizes aliased fields on ck.Item, and +// merges any existing cached value into ck.FromCache (for writeback). +// Returns nil when there is nothing to store. +func (l *Loader) prepareL2WriteKeys(res *result) []*CacheKey { + // Use l2CacheKeys (with prefix) if available, otherwise fall back to cacheKeys + // prepareCacheKeys renders both cache-key slices from the same input item pointers, + // so skip-fetch mergeResult updates are visible through res.l2CacheKeys as well. + // Fetch paths additionally rebind both slices to merged objects inside mergeResult. + keysToStore := res.l2CacheKeys + if len(keysToStore) == 0 { + keysToStore = res.l1CacheKeys + } + if len(keysToStore) == 0 { + return nil + } + + // For entity fetches, l1CacheKeys carry the authoritative cached context used during + // resolution while l2CacheKeys carry the external-cache key strings (with prefix/header + // isolation). Build the write set from the L1 context and graft on the L2 keys. + if res.cacheConfig.CacheKeyTemplate != nil && + res.cacheConfig.CacheKeyTemplate.IsEntityFetch() && + len(res.l1CacheKeys) == len(res.l2CacheKeys) && + len(res.l2CacheKeys) > 0 { + syncedKeys := make([]*CacheKey, 0, len(res.l2CacheKeys)) + for i := range res.l2CacheKeys { + if res.l2CacheKeys[i] == nil { + continue + } + if res.l1CacheKeys[i] == nil { + syncedKeys = append(syncedKeys, res.l2CacheKeys[i]) + continue + } + cloned := *res.l1CacheKeys[i] + cloned.Keys = res.l2CacheKeys[i].Keys + cloned.BatchIndex = res.l2CacheKeys[i].BatchIndex + cloned.EntityMergePath = res.l2CacheKeys[i].EntityMergePath + cloned.NegativeCacheHit = res.l2CacheKeys[i].NegativeCacheHit + syncedKeys = append(syncedKeys, &cloned) + } + keysToStore = syncedKeys + } + + // Normalize aliased fields to original schema names before storing. Only + // runs when HasAliases is true: StructuralCopyWithTransform produces a + // cache-shape working tree owned by l.jsonArena (renamed + independent of + // the response tree). When there are no aliases, ck.Item is left as-is — + // the downstream MergeValues writeback operates on ck.FromCache (not + // ck.Item), and cacheKeysToEntriesForUpdate materializes via MarshalTo + // which produces independent bytes, so no extra StructuralCopy is needed + // for isolation in the no-alias path. + if res.providesData != nil && res.providesData.HasAliases { + for _, ck := range keysToStore { + if ck.Item != nil { + ck.Item = l.structuralCopyNormalized(ck.Item, res.providesData) + } + } + } + + // Merge existing cached fields to preserve other arg variants. + // ck.FromCache holds the old L2 entity (set by tryL2CacheLoad when validation failed), + // ck.Item holds the newly fetched and normalized entity. + // MergeValues merges ck.Item fields into ck.FromCache (mutates first arg); + // existing old fields are preserved, new fields win on conflicts. + // On error, skip merge and store only the fresh item (pre-merge behavior). + for _, ck := range keysToStore { + if ck.Item != nil && ck.FromCache != nil { + _, err := astjson.MergeValues(l.jsonArena, ck.FromCache, ck.Item) + if err == nil { + ck.Item = ck.FromCache + } + } + } + + return keysToStore +} + +func (l *Loader) writeL2CacheSetContributors(contributors []*l2CacheSetContributor) { + if len(contributors) == 0 { + return + } + groupsByCache := make(map[LoaderCache]*l2CacheSetGroup, len(contributors)) + groups := make([]*l2CacheSetGroup, 0, len(contributors)) + for _, contributor := range contributors { + if contributor == nil || contributor.res == nil || contributor.res.cache == nil || len(contributor.entries) == 0 { + continue + } + group := groupsByCache[contributor.res.cache] + if group == nil { + group = &l2CacheSetGroup{cache: contributor.res.cache} + groupsByCache[contributor.res.cache] = group + groups = append(groups, group) + } + group.contributors = append(group.contributors, contributor) + group.entries = append(group.entries, contributor.entries...) + } + for _, group := range groups { + l.writeL2CacheSetGroup(group) + } +} + +func (l *Loader) writeL2CacheSetGroup(group *l2CacheSetGroup) { + if group == nil || group.cache == nil || len(group.entries) == 0 { + return + } + tracingCache := l.ctx.TracingOptions.Enable && !l.ctx.TracingOptions.ExcludeCacheStats + ctx := l.ctx.ctx + + var l2SetStart time.Time + if tracingCache { + l2SetStart = time.Now() + for _, contributor := range group.contributors { + res := contributor.res + res.cacheTraceL2SetAttempted = true + if len(contributor.negativeEntries) > 0 { + res.cacheTraceL2SetNegAttempted = true + } + } + } + setErr := group.cache.Set(ctx, group.entries) + if setErr != nil { + for _, contributor := range group.contributors { + res := contributor.res + if tracingCache { + res.cacheTraceL2SetDuration = time.Since(l2SetStart) + if len(contributor.negativeEntries) > 0 { + res.cacheTraceL2SetNegDuration = res.cacheTraceL2SetDuration + } + if !errors.Is(setErr, ErrCircuitBreakerOpen) { + res.cacheTraceL2SetError = setErr.Error() + if len(contributor.negativeEntries) > 0 { + res.cacheTraceL2SetNegError = setErr.Error() + } + } + } + if l.ctx.cacheAnalyticsEnabled() && !errors.Is(setErr, ErrCircuitBreakerOpen) { + l.ctx.cacheAnalytics.RecordCacheOperationError(CacheOperationError{ + Operation: "set", + CacheName: res.cacheConfig.CacheName, + EntityType: res.analyticsEntityType, + DataSource: res.ds.Name, + Message: truncateErrorMessage(setErr.Error(), 256), + ItemCount: len(contributor.entries), + }) + } + } + return + } + + for _, contributor := range group.contributors { + res := contributor.res + if tracingCache { + res.cacheTraceL2SetDuration = time.Since(l2SetStart) + if len(contributor.negativeEntries) > 0 { + res.cacheTraceL2SetNegDuration = res.cacheTraceL2SetDuration + } + } + l.recordL2WriteAnalytics(res, contributor.entries, contributor.regularEntries) + } +} + +// recordL2WriteAnalytics emits the CacheWriteEvent per written entry and, when +// subgraph-header isolation is active, the header-impact hashes that feed +// cross-request analytics. Only the regular cacheEntries are hashed for header +// impact — negative-cache sentinels are not meaningful there. +func (l *Loader) recordL2WriteAnalytics(res *result, writtenEntries []*CacheEntry, cacheEntries []*CacheEntry) { + // Record L2 write events for analytics + if l.ctx.cacheAnalyticsEnabled() { + for _, entry := range writtenEntries { + if entry == nil { + continue + } + l.ctx.cacheAnalytics.RecordWrite(CacheWriteEvent{ + CacheKey: entry.Key, EntityType: res.analyticsEntityType, ByteSize: len(entry.Value), + DataSource: res.ds.Name, CacheLevel: CacheLevelL2, TTL: entry.TTL, + Source: l.cacheOperationSource(), WriteReason: entry.WriteReason, + }) + } + } + + // Record header impact events for cross-request analysis. + // Only when IncludeSubgraphHeaderPrefix is active (headerHash != 0). + if l.ctx.cacheAnalyticsEnabled() && res.headerHash != 0 && len(res.l1CacheKeys) > 0 { + // Build L2-to-L1 key mapping. L1 and L2 cache keys are generated from the same + // inputItems in prepareCacheKeys, so they have matching indices. + l2ToBaseKey := make(map[string]string, len(res.l2CacheKeys)) + for i, l2ck := range res.l2CacheKeys { + if i < len(res.l1CacheKeys) { + for j, l2key := range l2ck.Keys { + if j < len(res.l1CacheKeys[i].Keys) { + l2ToBaseKey[l2key] = res.l1CacheKeys[i].Keys[j] + } + } + } + } + + xxh := l.ctx.cacheAnalytics.xxh + for _, entry := range cacheEntries { + if entry == nil { + continue + } + baseKey, ok := l2ToBaseKey[entry.Key] + if !ok { + continue + } + xxh.Reset() + _, _ = xxh.Write(entry.Value) + l.ctx.cacheAnalytics.RecordHeaderImpactEvent(HeaderImpactEvent{ + BaseKey: baseKey, + HeaderHash: res.headerHash, + ResponseHash: xxh.Sum64(), + EntityType: res.analyticsEntityType, + DataSource: res.ds.Name, + }) + } + } +} + +func (l *Loader) cacheKeysToEntriesForUpdate(a arena.Arena, res *result, cacheKeys []*CacheKey) ([]*CacheEntry, error) { + rootTemplate, ok := res.cacheConfig.CacheKeyTemplate.(*RootQueryCacheKeyTemplate) + if ok && len(rootTemplate.EntityKeyMappings) > 0 { + return l.cacheKeysToExactRootFieldEntityEntries(a, res, cacheKeys, rootTemplate), nil + } + return l.cacheKeysToEntries(a, cacheKeys) +} + +func (l *Loader) cacheKeysToExactRootFieldEntityEntries(a arena.Arena, res *result, cacheKeys []*CacheKey, rootTemplate *RootQueryCacheKeyTemplate) []*CacheEntry { + // Batch entity key mode: each CacheKey already has the correct L2 key in ck.Keys[0] + // and ck.Item points to the individual entity. Use simplified write path. + if res.batchEntityKeyMode { + return l.cacheKeysToEntriesBatch(a, res, cacheKeys) + } + + // Key-format parity assumption: rendering a key from final entity data must produce + // the same string as rendering the requested key from input args when the values match. + prefix := l.rootFieldL2CachePrefix(res) + seen := make(map[string]struct{}, len(cacheKeys)) + out := make([]*CacheEntry, 0, len(cacheKeys)) + + for _, ck := range cacheKeys { + if ck == nil || ck.Item == nil || ck.NegativeCacheHit { + continue + } + + entity := ck.Item + if len(ck.EntityMergePath) > 0 { + entity = ck.Item.Get(ck.EntityMergePath...) + } + if entity == nil || entity.Type() != astjson.TypeObject { + continue + } + + missingKeys := make(map[string]struct{}, len(ck.missingKeys)) + for _, key := range ck.missingKeys { + missingKeys[key] = struct{}{} + } + + valueBytes := entity.MarshalTo(nil) + requestKeyBuf := arena.AllocateSlice[byte](a, 0, 64) + renderedKeyBuf := arena.AllocateSlice[byte](a, 0, 64) + for _, mapping := range rootTemplate.EntityKeyMappings { + requestedKey, requestKeyBufOut := rootTemplate.renderDerivedEntityKey(a, l.ctx, requestKeyBuf, mapping, prefix) + requestKeyBuf = requestKeyBufOut + if requestedKey != "" { + requestedKey = l.applyL2CacheKeyInterceptor(requestedKey, res) + } + + renderedKey, renderedKeyBufOut := rootTemplate.renderDerivedEntityKeyFromValue(a, entity, renderedKeyBuf, mapping, prefix) + renderedKeyBuf = renderedKeyBufOut + if renderedKey != "" { + renderedKey = l.applyL2CacheKeyInterceptor(renderedKey, res) + } + + // Requested key: write with appropriate reason (refresh or backfill). + if requestedKey != "" && shouldWriteRequestedKey(res.cacheSkipFetch, ck.fromCacheNeedsWriteback, requestedKey, renderedKey, missingKeys) { + if _, ok := seen[requestedKey]; !ok { + seen[requestedKey] = struct{}{} + reason := requestedKeyWriteReason(requestedKey, missingKeys) + out = append(out, cacheEntryFromValueBytesWithReason(a, requestedKey, valueBytes, reason)) + } + } + // Rendered key: write when the entity data proves it. + // On the fetch path: always write — the subgraph is the source of truth. + // On the skip-fetch path: only write if the key is genuinely new + // (not an existing cached key that we'd redundantly rewrite). + if renderedKey != "" && shouldWriteRenderedKey(res.cacheSkipFetch, ck.fromCacheNeedsWriteback, renderedKey, missingKeys) { + if _, ok := seen[renderedKey]; !ok { + seen[renderedKey] = struct{}{} + reason := renderedKeyWriteReason(renderedKey, missingKeys) + out = append(out, cacheEntryFromValueBytesWithReason(a, renderedKey, valueBytes, reason)) + } + } + } + } + + return out +} + +// cacheKeysToEntriesBatch converts batch CacheKeys to CacheEntries. +// For batch mode, each CacheKey already has the correct L2 key and Item pointing to entity data. +func (l *Loader) cacheKeysToEntriesBatch(a arena.Arena, res *result, cacheKeys []*CacheKey) []*CacheEntry { + out := make([]*CacheEntry, 0, len(cacheKeys)) + seen := make(map[string]struct{}, len(cacheKeys)) + for _, ck := range cacheKeys { + if ck == nil || ck.Item == nil || ck.NegativeCacheHit { + continue + } + if ck.Item.Type() != astjson.TypeObject { + continue + } + for _, key := range ck.Keys { + if _, ok := seen[key]; ok { + continue + } + seen[key] = struct{}{} + valueBytes := ck.Item.MarshalTo(nil) + entryBuf := make([]byte, len(valueBytes)) + copy(entryBuf, valueBytes) + out = append(out, &CacheEntry{ + Key: key, + Value: entryBuf, + }) + } + } + return out +} + +func shouldWriteRequestedKey(cacheSkipFetch bool, fromCacheNeedsWriteback bool, requestedKey string, renderedKey string, missingKeys map[string]struct{}) bool { + if _, wasMissing := missingKeys[requestedKey]; wasMissing { + if cacheSkipFetch { + // Skip-fetch path: the entity data came from cache, not from a subgraph, so + // there is no fresh proof that this entity matches `requestedKey`. Only write + // when the rendered-from-data key matches — meaning the cached entity itself + // confirms the mapping. + return requestedKey == renderedKey + } + // Fetch path: the subgraph returned this entity for a request whose arguments + // produced `requestedKey`. The subgraph contract — "return the entity that matches + // the supplied args" — is sufficient to write under `requestedKey` even when the + // response payload doesn't carry the @key field (the client selected only non-key + // fields). Suppressing the write here was the cause of the nested-key cache-miss + // bug: every cached read would miss because every write was suppressed. + // Still suppress when both keys are non-empty and disagree (true key skew — + // subgraph returned an entity whose key value differs from the requested one). + return renderedKey == "" || requestedKey == renderedKey + } + if cacheSkipFetch { + return fromCacheNeedsWriteback + } + return true +} + +// shouldWriteRenderedKey decides whether a rendered key (derived from final entity data) +// should be written to L2. On the fetch path, always write — the subgraph returned this data. +// On the skip-fetch path, only write if the key is new (missing or not previously requested), +// not an existing cached key that would be redundantly rewritten. +func shouldWriteRenderedKey(cacheSkipFetch bool, fromCacheNeedsWriteback bool, renderedKey string, missingKeys map[string]struct{}) bool { + if !cacheSkipFetch { + return true + } + // Skip-fetch path: the entity data came from cache, not from a subgraph. + // Write if the key was missing on read (backfill) or if writeback is needed. + if _, wasMissing := missingKeys[renderedKey]; wasMissing { + return true + } + return fromCacheNeedsWriteback +} + +func cacheEntryFromValueBytesWithReason(_ arena.Arena, key string, valueBytes []byte, reason CacheWriteReason) *CacheEntry { + // Value must be heap-allocated: it is handed to the L2 cache (e.g. ristretto) + // which retains the slice across requests. An arena-backed slice would be overwritten + // once the request's arena is reset, producing corrupted cache reads on later requests. + entryValue := make([]byte, len(valueBytes)) + copy(entryValue, valueBytes) + return &CacheEntry{ + Key: key, + Value: entryValue, + WriteReason: reason, + } +} + +// requestedKeyWriteReason returns the write reason for a requested key. +// If the key was missing on read, it's a backfill; otherwise it's a refresh. +func requestedKeyWriteReason(key string, missingKeys map[string]struct{}) CacheWriteReason { + if _, wasMissing := missingKeys[key]; wasMissing { + return CacheWriteReasonBackfill + } + return CacheWriteReasonRefresh +} + +// renderedKeyWriteReason returns the write reason for a rendered (entity-data-derived) key. +// If the key was missing on read, it's a backfill; otherwise it's a derived expansion. +func renderedKeyWriteReason(key string, missingKeys map[string]struct{}) CacheWriteReason { + if _, wasMissing := missingKeys[key]; wasMissing { + return CacheWriteReasonBackfill + } + return CacheWriteReasonDerived +} + +func (l *Loader) rootFieldL2CachePrefix(res *result) string { + globalPrefix := l.ctx.ExecutionOptions.Caching.GlobalCacheKeyPrefix + // includeHeaderPrefix is the source of truth: it tells us "header partitioning + // is on for this fetch" regardless of whether the actual hash happens to be 0. + // Using `headerHash != 0` here was the bug — requests with `IncludeSubgraphHeaderPrefix=true` + // but no headers forwarded computed hash=0 and silently dropped the prefix on writes, + // producing write keys that never matched the read keys (which always built "0:..."). + if res.includeHeaderPrefix { + headerPrefix := strconv.FormatUint(res.headerHash, 10) + if globalPrefix != "" { + return globalPrefix + ":" + headerPrefix + } + return headerPrefix + } + return globalPrefix +} + +func (l *Loader) applyL2CacheKeyInterceptor(key string, res *result) string { + interceptor := l.ctx.ExecutionOptions.Caching.L2CacheKeyInterceptor + if interceptor == nil { + return key + } + info := L2CacheKeyInterceptorInfo{ + SubgraphName: res.ds.Name, + CacheName: res.cacheConfig.CacheName, + } + if res.fetchInfo != nil && res.fetchInfo.DataSourceName != "" { + info.SubgraphName = res.fetchInfo.DataSourceName + } + return interceptor(l.ctx.ctx, key, info) +} + +// saveShadowCachedValue saves a cached L2 value for later staleness comparison in shadow mode. +func (l *Loader) saveShadowCachedValue(res *result, index int, cachedValue *astjson.Value, cacheKey string, remainingTTL time.Duration) { + if res.shadowCachedValues == nil { + res.shadowCachedValues = make(map[int]shadowCacheEntry, len(res.l1CacheKeys)) + } + res.shadowCachedValues[index] = shadowCacheEntry{ + cachedValue: cachedValue, + cacheKey: cacheKey, + remainingTTL: remainingTTL, + } +} + +// compareShadowValues compares cached L2 values with fresh data after a fetch completes. +// Uses structuralCopyProjected to extract only ProvidesData fields, then hashes +// both values with xxhash. Records ShadowComparisonEvent for each comparison. +// Also records per-field hashes of the cached value (FieldSourceShadowCached) so consumers +// can diff individual fields against the fresh-data hashes recorded during resolution. +// Called from mergeResult on the main thread. +func (l *Loader) compareShadowValues(res *result, info *FetchInfo) { + if len(res.shadowCachedValues) == 0 || !l.ctx.cacheAnalyticsEnabled() || info == nil || info.ProvidesData == nil { + return + } + + dataSource := info.DataSourceName + var entityType string + if len(info.RootFields) > 0 { + entityType = info.RootFields[0].TypeName + } + + xxh := l.ctx.cacheAnalytics.xxh + + for i, entry := range res.shadowCachedValues { + if i >= len(res.l1CacheKeys) || res.l1CacheKeys[i].Item == nil { + continue + } + + freshValue := res.l1CacheKeys[i].Item + + // Extract only ProvidesData fields from both cached and fresh values + cachedProvides := l.structuralCopyProjected(entry.cachedValue, info.ProvidesData) + freshProvides := l.structuralCopyProjected(freshValue, info.ProvidesData) + + // Marshal and hash + cachedBytes := cachedProvides.MarshalTo(nil) + freshBytes := freshProvides.MarshalTo(nil) + + xxh.Reset() + _, _ = xxh.Write(cachedBytes) + cachedHash := xxh.Sum64() + + xxh.Reset() + _, _ = xxh.Write(freshBytes) + freshHash := xxh.Sum64() + + // Compute cache age from stored remainingTTL + cacheAgeMs := computeCacheAgeMs(entry.remainingTTL, res.cacheConfig.TTL) + + l.ctx.cacheAnalytics.RecordShadowComparison(ShadowComparisonEvent{ + CacheKey: entry.cacheKey, + EntityType: entityType, + IsFresh: cachedHash == freshHash, + CachedHash: cachedHash, + FreshHash: freshHash, + CachedBytes: len(cachedBytes), + FreshBytes: len(freshBytes), + DataSource: dataSource, + CacheAgeMs: cacheAgeMs, + ConfiguredTTL: res.cacheConfig.TTL, + }) + + // Per-field hashing of cached value for field-level change detection. + // Fresh field hashes are already recorded during resolution (FieldSourceSubgraph). + // Here we record cached field hashes so the consumer can diff per-field. + if info.ProvidesData != nil { + // Build entity key for correlation with resolution-time hashes + var keyRaw string + if len(res.cacheConfig.KeyFields) > 0 { + if keyJSON := buildEntityKeyJSON(entry.cachedValue, res.cacheConfig.KeyFields); len(keyJSON) > 0 { + keyRaw = string(keyJSON) + } + } + for _, field := range info.ProvidesData.Fields { + fieldName := string(field.Name) + fieldVal := cachedProvides.Get(fieldName) + if fieldVal != nil { + fieldBytes := fieldVal.MarshalTo(nil) + l.ctx.cacheAnalytics.HashFieldValue( + entityType, fieldName, fieldBytes, + keyRaw, 0, FieldSourceShadowCached, + ) + } + } + } + } +} + +// detectMutationEntityImpact checks if a mutation response contains a cached entity +// and either invalidates (deletes) the L2 cache entry or compares it for staleness analytics. +// Called from mergeResult on the main thread after the mutation fetch completes. +// Handles both single-entity (object) and list (array) mutation responses. +func (l *Loader) detectMutationEntityImpact(res *result, info *FetchInfo, responseData *astjson.Value) map[string]struct{} { + if info == nil || info.OperationType != ast.OperationTypeMutation { + return nil + } + cfg := res.cacheConfig.MutationEntityImpactConfig + if cfg == nil { + return nil + } + // Proceed if invalidation, populate, or analytics is configured + if !cfg.InvalidateCache && !cfg.PopulateCache && !l.ctx.cacheAnalyticsEnabled() { + return nil + } + if info.ProvidesData == nil || len(info.RootFields) == 0 { + return nil + } + + // Get the LoaderCache for this entity's cache name + if l.caches == nil { + return nil + } + cache := l.caches[cfg.CacheName] + if cache == nil { + return nil + } + + mutationFieldName := info.RootFields[0].FieldName + + // Extract entity data from mutation response + // For root mutation: responseData = {"updateUsername": {"id":"1234","username":"UpdatedMe"}} + // or for list mutations: responseData = {"deleteUsers": [{"id":"1"},{"id":"2"}]} + entityData := responseData.Get(mutationFieldName) + if entityData == nil { + return nil + } + + // Navigate ProvidesData to the entity level. + // ProvidesData describes the mutation response structure: {updateUsername: {id, username}}. + // We need the inner Object that describes the entity's fields. + entityProvidesData := navigateProvidesDataToField(info.ProvidesData, mutationFieldName) + if entityProvidesData == nil { + return nil + } + + switch entityData.Type() { + case astjson.TypeObject: + return l.detectSingleMutationEntityImpact(cache, cfg, info, entityData, entityProvidesData, mutationFieldName) + case astjson.TypeArray: + items, _ := entityData.Array() + var deletedKeys map[string]struct{} + for _, item := range items { + if item == nil || item.Type() != astjson.TypeObject { + continue + } + itemDeleted := l.detectSingleMutationEntityImpact(cache, cfg, info, item, entityProvidesData, mutationFieldName) + for k, v := range itemDeleted { + if deletedKeys == nil { + deletedKeys = make(map[string]struct{}) + } + deletedKeys[k] = v + } + } + return deletedKeys + default: + return nil + } +} + +// detectSingleMutationEntityImpact handles invalidation and analytics for a single entity +// returned by a mutation. Called by detectMutationEntityImpact for each entity. +func (l *Loader) detectSingleMutationEntityImpact( + cache LoaderCache, + cfg *MutationEntityImpactConfig, + info *FetchInfo, + entityData *astjson.Value, + entityProvidesData *Object, + mutationFieldName string, +) map[string]struct{} { + // Build L2 cache key for lookup + cacheKey := l.buildMutationEntityCacheKey(cfg, entityData, info) + if cacheKey == "" { + return nil + } + + // Invalidate L2 cache entry if configured + var deletedKeys map[string]struct{} + if cfg.InvalidateCache { + if delErr := cache.Delete(l.ctx.ctx, []string{cacheKey}); delErr != nil { + if l.ctx.cacheAnalyticsEnabled() { + l.ctx.cacheAnalytics.RecordCacheOperationError(CacheOperationError{ + Operation: "delete", + CacheName: cfg.CacheName, + EntityType: cfg.EntityTypeName, + Message: truncateErrorMessage(delErr.Error(), 256), + ItemCount: 1, + }) + } + } else { + deletedKeys = map[string]struct{}{cacheKey: {}} + } + } + + // Populate L2 cache entry from the mutation response if configured. + // `@cachePopulate` on a single-subgraph mutation has no follow-up entity fetch + // to inherit EnableMutationL2CachePopulation, so the standard updateL2Cache write + // path never fires. Write the entity payload here using the same cache key the + // read path will construct. + if cfg.PopulateCache && l.ctx.ExecutionOptions.Caching.EnableL2Cache { + // Project the entity through the entity-level ProvidesData (already navigated + // by the caller) so the cached payload exactly matches what an entity fetch + // would have returned — no extra mutation-side fields like __typename wrappers + // that the read path doesn't expect. + entityToCache := entityData + if entityProvidesData != nil { + entityToCache = l.structuralCopyProjected(entityData, entityProvidesData) + } + // Heap-allocate: the L2 cache may retain the byte slice across requests. + raw := entityToCache.MarshalTo(nil) + valueBytes := make([]byte, len(raw)) + copy(valueBytes, raw) + if setErr := cache.Set(l.ctx.ctx, []*CacheEntry{{ + Key: cacheKey, + Value: valueBytes, + TTL: cfg.PopulateTTL, + }}); setErr != nil { + if l.ctx.cacheAnalyticsEnabled() { + l.ctx.cacheAnalytics.RecordCacheOperationError(CacheOperationError{ + Operation: "set", + CacheName: cfg.CacheName, + EntityType: cfg.EntityTypeName, + Message: truncateErrorMessage(setErr.Error(), 256), + ItemCount: 1, + }) + } + } + } + + // Analytics comparison requires cacheAnalytics to be enabled + if !l.ctx.cacheAnalyticsEnabled() { + return deletedKeys + } + + // Build display key (without prefix) for analytics + displayKey := l.buildEntityBaseKeyJSON(cfg.EntityTypeName, entityData, cfg.KeyFields) + + // Hash the fresh (mutation response) value + freshProvides := l.structuralCopyProjected(entityData, entityProvidesData) + freshBytes := freshProvides.MarshalTo(nil) + xxh := l.ctx.cacheAnalytics.xxh + xxh.Reset() + _, _ = xxh.Write(freshBytes) + freshHash := xxh.Sum64() + + l.ctx.cacheAnalytics.RecordMutationEvent(MutationEvent{ + MutationRootField: mutationFieldName, + EntityType: cfg.EntityTypeName, + EntityCacheKey: displayKey, + HadCachedValue: false, + IsStale: false, + FreshHash: freshHash, + FreshBytes: len(freshBytes), + }) + return deletedKeys +} + +// buildEntityBaseKeyJSON builds the base JSON key for an entity: {"__typename":"...","key":{...}}. +func (l *Loader) buildEntityBaseKeyJSON(entityTypeName string, entityData *astjson.Value, keyFields []KeyField) string { + keyObj := l.newEntityKeyStruct(entityTypeName, l.buildEntityKeyValue(entityData, keyFields)) + return string(keyObj.MarshalTo(nil)) +} + +// newEntityKeyStruct builds {"__typename":"","key":} on l.jsonArena. +// Used by buildEntityBaseKeyJSON (keyValue derived from KeyFields) and by the +// extension-based invalidation path (keyValue already carried by the extension). +func (l *Loader) newEntityKeyStruct(typeName string, keyValue *astjson.Value) *astjson.Value { + keyObj := astjson.ObjectValue(l.jsonArena) + keyObj.Set(l.jsonArena, "__typename", astjson.StringValue(l.jsonArena, typeName)) + keyObj.Set(l.jsonArena, "key", keyValue) + return keyObj +} + +// buildMutationEntityCacheKey builds the L2 cache key for a mutation-returned entity. +// Format: [prefix:]{"__typename":"User","key":{"id":"1234"}} +func (l *Loader) buildMutationEntityCacheKey(cfg *MutationEntityImpactConfig, entityData *astjson.Value, info *FetchInfo) string { + keyJSON := l.buildEntityBaseKeyJSON(cfg.EntityTypeName, entityData, cfg.KeyFields) + + // Apply global prefix and subgraph header prefix to mirror prepareCacheKeys(). + var cacheKey string + globalPrefix := l.ctx.ExecutionOptions.Caching.GlobalCacheKeyPrefix + if cfg.IncludeSubgraphHeaderPrefix && l.ctx.SubgraphHeadersBuilder != nil { + _, headersHash := l.ctx.SubgraphHeadersBuilder.HeadersForSubgraph(info.DataSourceName) + prefix := strconv.FormatUint(headersHash, 10) + if globalPrefix != "" { + cacheKey = globalPrefix + ":" + prefix + ":" + keyJSON + } else { + cacheKey = prefix + ":" + keyJSON + } + } else if globalPrefix != "" { + cacheKey = globalPrefix + ":" + keyJSON + } else { + cacheKey = keyJSON + } + + // Apply user-provided L2 cache key interceptor + if interceptor := l.ctx.ExecutionOptions.Caching.L2CacheKeyInterceptor; interceptor != nil { + cacheKey = interceptor(l.ctx.ctx, cacheKey, L2CacheKeyInterceptorInfo{ + SubgraphName: info.DataSourceName, + CacheName: cfg.CacheName, + }) + } + return cacheKey +} + +// buildEntityKeyValue recursively builds a JSON object from entity data using only key fields. +func (l *Loader) buildEntityKeyValue(data *astjson.Value, keyFields []KeyField) *astjson.Value { + obj := astjson.ObjectValue(l.jsonArena) + for _, kf := range keyFields { + if len(kf.Children) > 0 { + childData := data.Get(kf.Name) + obj.Set(l.jsonArena, kf.Name, l.buildEntityKeyValue(childData, kf.Children)) + } else { + val := data.Get(kf.Name) + if val != nil { + obj.Set(l.jsonArena, kf.Name, val) + } + } + } + return obj +} + +// processExtensionsCacheInvalidation handles cache invalidation signals from subgraph response extensions. +// +// Subgraphs can signal cache invalidation by including an extensions field in their response: +// +// {"extensions": {"cacheInvalidation": {"keys": [{"typename": "User", "key": {"id": "1"}}]}}} +// +// This function parses the keys array and deletes the corresponding L2 cache entries. +// Works for both query and mutation responses — not restricted to mutations. +// +// The cache key construction pipeline mirrors the storage pipeline: +// +// typename + key fields → build JSON → apply header prefix → apply interceptor → cache.Delete() +func (l *Loader) processExtensionsCacheInvalidation(res *result, cacheInvalidation *astjson.Value, deletedKeys map[string]struct{}) { + // No invalidation data in the response extensions. + if cacheInvalidation == nil { + return + } + // Extensions-based invalidation only applies when L2 caching is enabled, + // since L2 is the cross-request cache that benefits from explicit invalidation. + if !l.ctx.ExecutionOptions.Caching.EnableL2Cache { + return + } + // entityCacheConfigs maps subgraph name → entity type → config (CacheName, IncludeSubgraphHeaderPrefix). + // Without this mapping, we don't know which cache to delete from or how to build the key. + if l.entityCacheConfigs == nil || l.caches == nil { + return + } + + // Extract the "keys" array from the cacheInvalidation object. + // Each entry has {"typename": "User", "key": {"id": "1"}}. + keysArray := cacheInvalidation.GetArray("keys") + if len(keysArray) == 0 { + return + } + + // Look up the entity cache config for the responding subgraph. + // The subgraph that sent the invalidation signal is the same one whose entity configs we use, + // because in federation, the subgraph that caches an entity is the one that resolves it. + subgraphName := res.ds.Name + subgraphConfigs := l.entityCacheConfigs[subgraphName] + if subgraphConfigs == nil { + return + } + + // Build set of L2 keys that updateL2Cache will set after this function returns. + // Deleting a key that's about to be re-set with fresh data is redundant. + keysAboutToBeSet := l.l2KeysAboutToBeSet(res) + + // Group invalidation keys by cache name so we can batch-delete per cache instance. + type cacheDeleteBatch struct { + cache LoaderCache + keys []string + } + batches := map[string]*cacheDeleteBatch{} + + for _, entry := range keysArray { + // Skip malformed entries (must be JSON objects). + if entry == nil || entry.Type() != astjson.TypeObject { + continue + } + + // Extract "typename" (string) and "key" (JSON object) from each invalidation entry. + typenameVal := entry.Get("typename") + keyVal := entry.Get("key") + if typenameVal == nil || keyVal == nil || keyVal.Type() != astjson.TypeObject { + continue + } + typename := string(typenameVal.GetStringBytes()) + if typename == "" { + continue + } + + // Look up the entity cache config for this typename from the responding subgraph. + // This tells us which cache instance to use and whether to apply header prefix. + // Unknown typenames are silently skipped — the subgraph may send invalidation + // for types that aren't configured for caching on this router. + entityConfig := subgraphConfigs[typename] + if entityConfig == nil { + continue + } + + // Resolve the cache instance by name. + cache := l.caches[entityConfig.CacheName] + if cache == nil { + continue + } + + // Build the base cache key JSON matching the format used during cache population: + // {"__typename":"User","key":{"id":"1"}} + // The "key" value is taken directly from the extensions — it's already a JSON object + // with the entity's @key field values. + baseKey := string(l.newEntityKeyStruct(typename, keyVal).MarshalTo(nil)) + cacheKey := baseKey + + // Apply global prefix and subgraph header prefix to mirror prepareCacheKeys(). + // Order: global prefix → header hash prefix → interceptor. + globalPrefix := l.ctx.ExecutionOptions.Caching.GlobalCacheKeyPrefix + if entityConfig.IncludeSubgraphHeaderPrefix && l.ctx.SubgraphHeadersBuilder != nil { + _, headersHash := l.ctx.SubgraphHeadersBuilder.HeadersForSubgraph(subgraphName) + var buf [20]byte + b := strconv.AppendUint(buf[:0], headersHash, 10) + if globalPrefix != "" { + cacheKey = globalPrefix + ":" + string(b) + ":" + cacheKey + } else { + cacheKey = string(b) + ":" + cacheKey + } + } else if globalPrefix != "" { + cacheKey = globalPrefix + ":" + cacheKey + } + + // Apply user-provided L2 cache key interceptor if set. + // This allows user-defined key transformations (e.g., tenant isolation prefixes) + // and mirrors the same interceptor applied during cache population. + if interceptor := l.ctx.ExecutionOptions.Caching.L2CacheKeyInterceptor; interceptor != nil { + cacheKey = interceptor(l.ctx.ctx, cacheKey, L2CacheKeyInterceptorInfo{ + SubgraphName: subgraphName, + CacheName: entityConfig.CacheName, + }) + } + + // Skip L2 delete if: + // - already deleted by detectMutationEntityImpact (deduplication) + // - about to be re-set by updateL2Cache (redundant delete before set) + if _, alreadyDone := deletedKeys[cacheKey]; alreadyDone { + continue + } + if _, aboutToBeSet := keysAboutToBeSet[cacheKey]; aboutToBeSet { + continue + } + + if l.ctx.cacheAnalyticsEnabled() { + source := CacheSourceQuery + mutationRootField := "" + operationType := ast.OperationTypeQuery + if res.fetchInfo != nil { + operationType = res.fetchInfo.OperationType + } else if l.info != nil { + operationType = l.info.OperationType + } + switch operationType { + case ast.OperationTypeMutation: + source = CacheSourceMutation + if res.fetchInfo != nil && len(res.fetchInfo.RootFields) > 0 { + mutationRootField = res.fetchInfo.RootFields[0].FieldName + } + case ast.OperationTypeSubscription: + source = CacheSourceSubscription + } + l.ctx.cacheAnalytics.RecordMutationEvent(MutationEvent{ + MutationRootField: mutationRootField, + EntityType: typename, + EntityCacheKey: baseKey, + HadCachedValue: false, + IsStale: false, + Source: source, + }) + } + + // Accumulate the key into the batch for this cache name. + batch, ok := batches[entityConfig.CacheName] + if !ok { + batch = &cacheDeleteBatch{cache: cache} + batches[entityConfig.CacheName] = batch + } + batch.keys = append(batch.keys, cacheKey) + } + + // Execute batched L2 cache deletes — one Delete call per cache instance. + for cacheName, batch := range batches { + if delErr := batch.cache.Delete(l.ctx.ctx, batch.keys); delErr != nil && + !errors.Is(delErr, ErrCircuitBreakerOpen) && + l.ctx.cacheAnalyticsEnabled() { + l.ctx.cacheAnalytics.RecordCacheOperationError(CacheOperationError{ + Operation: "delete", + CacheName: cacheName, + Message: truncateErrorMessage(delErr.Error(), 256), + ItemCount: len(batch.keys), + }) + } + } +} + +// l2KeysAboutToBeSet returns the set of L2 cache keys that updateL2Cache will store +// after the current fetch. Returns nil if updateL2Cache won't run (e.g., mutations +// without explicit L2 population, or no cache misses to populate). +func (l *Loader) l2KeysAboutToBeSet(res *result) map[string]struct{} { + // updateL2Cache skips for mutations unless L2 population is explicitly enabled. + if l.info != nil && l.info.OperationType == ast.OperationTypeMutation && + !l.enableMutationL2CachePopulation { + return nil + } + if res.cache == nil || !res.cacheMustBeUpdated { + return nil + } + keys := res.l2CacheKeys + if len(keys) == 0 { + keys = res.l1CacheKeys + } + if len(keys) == 0 { + return nil + } + set := make(map[string]struct{}, len(keys)) + for _, ck := range keys { + // Skip keys whose Item is nil — updateL2Cache won't store them + // (can happen if an entity failed to merge during batch processing). + if ck == nil || ck.Item == nil { + continue + } + for _, k := range ck.Keys { + set[k] = struct{}{} + } + } + return set +} + +// navigateProvidesDataToField finds the Object within ProvidesData that corresponds +// to a specific field name. For root mutations, ProvidesData describes the full response +// (e.g., {updateUsername: {id, username}}) and we need the inner Object for comparison. +func navigateProvidesDataToField(providesData *Object, fieldName string) *Object { + if providesData == nil { + return nil + } + for _, field := range providesData.Fields { + if string(field.Name) == fieldName { + if obj, ok := field.Value.(*Object); ok { + return obj + } + } + } + return nil +} + +// validateItemHasRequiredData checks if the given item contains all required data +// as specified by the provided Object schema +func (l *Loader) validateItemHasRequiredData(item *astjson.Value, obj *Object) bool { + if item == nil { + return false + } + // Validate each field in the object + for _, field := range obj.Fields { + if !l.validateFieldData(item, field) { + return false + } + } + + return true +} + +// validateFieldData validates a single field against the item data. +// Uses cacheFieldName() to look up by original name + arg suffix since cached data is normalized. +func (l *Loader) validateFieldData(item *astjson.Value, field *Field) bool { + fieldValue := item.Get(l.cacheFieldName(field)) + + // Check if field exists + if fieldValue == nil { + // Field is missing - this fails validation regardless of nullability + // Even nullable fields must be present (can be null, but not missing) + return false + } + + // Validate the field value against its specification + return l.validateNodeValue(fieldValue, field.Value) +} + +// validateScalarData validates scalar field data +func (l *Loader) validateScalarData(value *astjson.Value, scalar *Scalar) bool { + if value.Type() == astjson.TypeNull { + // Null is only allowed if the scalar is nullable + return scalar.Nullable + } + + // Any non-null value is acceptable for a scalar + return true +} + +// validateObjectData validates object field data +func (l *Loader) validateObjectData(value *astjson.Value, obj *Object) bool { + if value.Type() == astjson.TypeNull { + // Null is only allowed if the object is nullable + return obj.Nullable + } + + if value.Type() != astjson.TypeObject { + // Must be an object (or null if nullable) + return false + } + + // Recursively validate the object's fields + return l.validateItemHasRequiredData(value, obj) +} + +// validateArrayData validates array field data +func (l *Loader) validateArrayData(value *astjson.Value, arr *Array) bool { + if value.Type() == astjson.TypeNull { + // Null is only allowed if the array is nullable + return arr.Nullable + } + + if value.Type() != astjson.TypeArray { + // Must be an array (or null if nullable) + return false + } + + // If there's no item specification, we just validate the array exists + if arr.Item == nil { + return true + } + + // Validate each item in the array + arrayItems, err := value.Array() + if err != nil { + return false + } + + for _, item := range arrayItems { + if !l.validateNodeValue(item, arr.Item) { + return false + } + } + + return true +} + +// validateNodeValue validates a value against a Node specification +func (l *Loader) validateNodeValue(value *astjson.Value, nodeSpec Node) bool { + switch v := nodeSpec.(type) { + case *Scalar: + return l.validateScalarData(value, v) + case *Object: + return l.validateObjectData(value, v) + case *Array: + return l.validateArrayData(value, v) + default: + // Unknown type - assume invalid + return false + } +} + +// cacheFieldName returns the field name to use in cached entity data. +// For fields without arguments, returns SchemaFieldName() (zero overhead). +// For fields with arguments, appends an xxhash suffix based on resolved arg values, +// ensuring that e.g. friends(first:5) and friends(first:20) use different cache field names. +func (l *Loader) cacheFieldName(field *Field) string { + if len(field.CacheArgs) == 0 { + return field.SchemaFieldName() + } + return field.SchemaFieldName() + l.computeArgSuffix(field.CacheArgs) +} + +// computeArgSuffix computes "_<16-hex-chars>" from resolved argument values. +// Args are sorted by ArgName for deterministic output (guaranteed at plan time). +// Each arg value is resolved from ctx.Variables (with RemapVariables support) +// and serialized as JSON for hashing. +func (l *Loader) computeArgSuffix(args []CacheFieldArg) string { + // Ensure sorted by arg name (should already be sorted at plan time) + sorted := args + if !slices.IsSortedFunc(sorted, func(a, b CacheFieldArg) int { + return cmp.Compare(a.ArgName, b.ArgName) + }) { + sorted = slices.Clone(args) + slices.SortFunc(sorted, func(a, b CacheFieldArg) int { + return cmp.Compare(a.ArgName, b.ArgName) + }) + } + + h := pool.Hash64.Get() + for i, arg := range sorted { + if i > 0 { + _, _ = h.WriteString(",") + } + _, _ = h.WriteString(arg.ArgName) + _, _ = h.WriteString(":") + + // Resolve variable from ctx.Variables, applying RemapVariables + varName := arg.VariableName + if l.ctx.RemapVariables != nil { + if nameToUse, hasMapping := l.ctx.RemapVariables[varName]; hasMapping { + varName = nameToUse + } + } + + argValue := l.ctx.Variables.Get(varName) + if argValue == nil { + _, _ = h.WriteString("null") + } else { + writeCanonicalJSON(h, argValue) + } + } + + sum := h.Sum64() + pool.Hash64.Put(h) + + // Format as "_" + 16 zero-padded hex digits without fmt.Sprintf + var buf [17]byte + buf[0] = '_' + const hexDigits = "0123456789abcdef" + for i := 15; i >= 0; i-- { + buf[1+i] = hexDigits[sum&0xf] + sum >>= 4 + } + return string(buf[:]) +} + +// writeCanonicalJSON writes a deterministic JSON representation of v to w. +// For objects, keys are sorted alphabetically to ensure the same logical value +// always produces the same hash regardless of JSON key ordering from the client. +// For arrays, elements are written in order. Scalars are written as-is. +func writeCanonicalJSON(w interface{ WriteString(string) (int, error) }, v *astjson.Value) { + switch v.Type() { + case astjson.TypeObject: + obj, err := v.Object() + if err != nil { + _, _ = w.WriteString("null") + return + } + // Collect keys and sort them + type kv struct { + key string + val *astjson.Value + } + var pairs []kv + obj.Visit(func(key []byte, val *astjson.Value) { + pairs = append(pairs, kv{key: string(key), val: val}) + }) + slices.SortFunc(pairs, func(a, b kv) int { + return cmp.Compare(a.key, b.key) + }) + _, _ = w.WriteString("{") + for i, p := range pairs { + if i > 0 { + _, _ = w.WriteString(",") + } + _, _ = w.WriteString(strconv.Quote(p.key)) + _, _ = w.WriteString(":") + writeCanonicalJSON(w, p.val) + } + _, _ = w.WriteString("}") + case astjson.TypeArray: + arr := v.GetArray() + _, _ = w.WriteString("[") + for i, elem := range arr { + if i > 0 { + _, _ = w.WriteString(",") + } + writeCanonicalJSON(w, elem) + } + _, _ = w.WriteString("]") + default: + // Scalars (string, number, bool, null): MarshalTo produces canonical output + var buf [64]byte + _, _ = w.WriteString(string(v.MarshalTo(buf[:0]))) + } +} + +// mergeEntityFields copies all fields from src into dst that aren't already present. +// Used during L1 cache population to accumulate fields with different arg suffixes +// (e.g., friends_AAA and friends_BBBB coexist in the same cached entity). +// First-writer-wins: for suffixed fields each arg variant has a unique suffix so no conflict; +// for key fields (id, __typename) values are identical across fetches for the same entity. +func (l *Loader) mergeEntityFields(dst, src *astjson.Value) { + if dst == nil || src == nil { + return + } + if dst.Type() != astjson.TypeObject || src.Type() != astjson.TypeObject { + return + } + srcObj, _ := src.Object() + srcObj.Visit(func(key []byte, v *astjson.Value) { + if dst.Get(string(key)) == nil { + dst.Set(l.jsonArena, string(key), v) + } + }) +} + +// tryRequestScopedInjection checks the per-request requestScopedL1 cache for +// all hints in the fetch configuration. If every hinted field is found, it +// injects the cached values onto each entity item and returns true to signal +// the fetch can be skipped. +func (l *Loader) tryRequestScopedInjection(res *result, cfg FetchCacheConfiguration, items []*astjson.Value) bool { + if len(cfg.RequestScopedFields) == 0 { + return false + } + // Gate on L1 being enabled when the context is set (production path). + // Tests may construct a Loader without a ctx — treat that as enabled. + if l.ctx != nil && !l.ctx.ExecutionOptions.Caching.EnableL1Cache { + return false + } + + // Phase 1: Collect all cached values, verify all hints are satisfiable. + // Do NOT mutate items until we know all hints can be satisfied. + type pendingInjection struct { + fieldName string + value *astjson.Value + } + pending := make([]pendingInjection, 0, len(cfg.RequestScopedFields)) + for _, hint := range cfg.RequestScopedFields { + cachedValue, ok := l.requestScopedL1[hint.L1Key] + if !ok || cachedValue == nil { + return false + } + // Widening check: does the cached (normalized, schema-named) value have all + // fields the current query needs? Uses the same validator as entity L1. + if hint.ProvidesData != nil { + if !l.validateItemHasRequiredData(cachedValue, hint.ProvidesData) { + return false + } + } + // Denormalized read: structural copy onto l.jsonArena with optional + // denormalize transform. Materialized value is independent of the + // stored cache value, so the response tree can mutate freely. + injectValue := l.structuralCopyDenormalized(cachedValue, hint.ProvidesData) + if injectValue == nil { + return false + } + pending = append(pending, pendingInjection{ + fieldName: hint.FieldName, + value: injectValue, + }) + } + + // Phase 2: All hints satisfied — inject into items. + // For multiple items sharing the same hint, each item gets its own copy + // to avoid pointer aliasing between entity items. + for _, p := range pending { + if len(items) == 1 { + items[0].Set(l.jsonArena, p.fieldName, p.value) + continue + } + for _, item := range items { + copied := l.parser.StructuralCopy(l.jsonArena, p.value) + if copied == nil { + return false + } + item.Set(l.jsonArena, p.fieldName, copied) + } + } + + // All requestScoped fields injected — the planner only adds hints when + // the fetch's only non-key fields are requestScoped, so we can skip. + res.fetchSkipped = true + return true +} + +// exportRequestScopedFields extracts requestScoped field values from the first +// entity in the response and stores them in the per-request requestScopedL1 +// cache. Since @requestScoped fields have the same value across all entities +// in a request, only the first entity is sampled. +func (l *Loader) exportRequestScopedFields(res *result, cfg FetchCacheConfiguration, items []*astjson.Value) { + if len(cfg.RequestScopedFields) == 0 { + return + } + if l.ctx != nil && !l.ctx.ExecutionOptions.Caching.EnableL1Cache { + return + } + + // Build the list of sources to search: items first, then the root data + // Root field fetches have empty items but the data is in l.resolvable.data + sources := items + if len(sources) == 0 && l.resolvable != nil && l.resolvable.data != nil { + sources = []*astjson.Value{l.resolvable.data} + } + + for _, field := range cfg.RequestScopedFields { + for _, item := range sources { + value := item.Get(field.FieldPath...) + if value == nil || value.Type() == astjson.TypeNull { + continue + } + // Normalize for cache: rename aliases to schema names, apply arg-hash + // suffixes for arg-variant fields, walk nested objects/arrays. + normalized := l.structuralCopyNormalized(value, field.ProvidesData) + if normalized == nil { + continue + } + if existingVal, loaded := l.requestScopedL1[field.L1Key]; loaded && existingVal != nil { + // SAFETY: merge into a working copy of existingVal and + // swap on success. astjson.MergeValues mutates in place + // and failures are non-atomic; merging directly into the + // live cache entry could corrupt it. + working := l.parser.StructuralCopy(l.jsonArena, existingVal) + _, err := astjson.MergeValues(l.jsonArena, working, normalized) + if err == nil { + l.requestScopedL1[field.L1Key] = working + } + // On failure, keep the existing entry intact (drop the working copy). + } else { + l.requestScopedL1[field.L1Key] = normalized + } + break + } + } +} diff --git a/v2/pkg/engine/resolve/loader_cache_copy_bench_test.go b/v2/pkg/engine/resolve/loader_cache_copy_bench_test.go new file mode 100644 index 0000000000..25d56c3bbe --- /dev/null +++ b/v2/pkg/engine/resolve/loader_cache_copy_bench_test.go @@ -0,0 +1,268 @@ +// Benchmarks for the 4 cache-hit merge sites that currently StructuralCopy +// from the cache before merging into the response tree. Matched with the +// invariant tests in loader_cache_copy_invariant_test.go: +// +// - loader.go:1220 — mergeBatchCacheHit → BenchmarkMergeBatchCacheHit +// - loader.go:1372 — mergeBatchPartialResponse → BenchmarkMergeBatchPartialResponse +// - loader.go:1472 — mergeResult cacheSkipFetch → BenchmarkMergeResultCacheSkipFetch +// - loader.go:1491 — mergeResult partialCacheEnabled → BenchmarkMergeResultPartialCache +// +// Each benchmark runs with entity counts {1, 10, 100} to expose how per-copy +// cost scales with batch size. Uses b.ReportAllocs() so ns/op, allocs/op, B/op +// are captured. +// +// Usage: +// +// go test -run=^$ -bench BenchmarkMerge -benchmem ./v2/pkg/engine/resolve/... +package resolve + +import ( + "context" + "strconv" + "testing" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" +) + +// benchCopyEntityJSON is a realistic nested entity shape used across all +// cache-copy benches. Matches the shape used in the invariant tests. +func benchCopyEntityJSON(id string) []byte { + return []byte(`{"__typename":"User","id":"` + id + `","name":"User ` + id + `","profile":{"email":"` + id + `@example.com","age":30,"bio":"Lorem ipsum dolor sit amet"},"tags":["a","b","c"]}`) +} + +var benchCopyEntityCounts = []int{1, 10, 100} + +func newBenchCopyLoader() (*Loader, arena.Arena) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(64 * 1024)) + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL2Cache = true + resolvable := NewResolvable(ar, ResolvableOptions{}) + if err := resolvable.Init(ctx, nil, ast.OperationTypeQuery); err != nil { + panic(err) + } + return &Loader{ + jsonArena: ar, + resolvable: resolvable, + ctx: ctx, + }, ar +} + +// BenchmarkMergeBatchCacheHit exercises loader.go:1220. +// The loader splices N cached entities into a response array via +// entityArray.SetArrayItem(arena, idx, StructuralCopy(entity)). +func BenchmarkMergeBatchCacheHit(b *testing.B) { + for _, n := range benchCopyEntityCounts { + b.Run("entities="+strconv.Itoa(n), func(b *testing.B) { + // Cache-backing arena: holds cached *astjson.Value across iterations. + // Never Reset so pointers stay valid. + cacheArena := arena.NewMonotonicArena(arena.WithMinBufferSize(64 * 1024)) + cached := make([]*astjson.Value, n) + for i := range n { + v, err := astjson.ParseBytesWithArena(cacheArena, + []byte(`{"users":`+string(benchCopyEntityJSON(strconv.Itoa(i)))+`}`)) + if err != nil { + b.Fatal(err) + } + cached[i] = v + } + + l, ar := newBenchCopyLoader() + + b.ReportAllocs() + b.ResetTimer() + for b.Loop() { + ar.Reset() + // Rebuild cacheKeys each iteration (cheap, not the measurement target). + cacheKeys := make([]*CacheKey, n) + for i := range n { + cacheKeys[i] = &CacheKey{ + BatchIndex: i, + FromCache: cached[i], + Keys: []string{"key" + strconv.Itoa(i)}, + EntityMergePath: []string{"users"}, + } + } + res := &result{l2CacheKeys: cacheKeys} + if err := l.mergeBatchCacheHit(&FetchItem{}, res, nil); err != nil { + b.Fatal(err) + } + } + }) + } +} + +// BenchmarkMergeBatchPartialResponse exercises loader.go:1372. +// Half the entities are cache hits (spliced via StructuralCopy), half come +// from the fresh subgraph response (no copy). +func BenchmarkMergeBatchPartialResponse(b *testing.B) { + for _, n := range benchCopyEntityCounts { + b.Run("entities="+strconv.Itoa(n), func(b *testing.B) { + cacheArena := arena.NewMonotonicArena(arena.WithMinBufferSize(64 * 1024)) + cachedCount := n / 2 + if cachedCount == 0 { + cachedCount = 1 + } + cached := make([]*astjson.Value, cachedCount) + for i := range cachedCount { + v, err := astjson.ParseBytesWithArena(cacheArena, benchCopyEntityJSON("c"+strconv.Itoa(i))) + if err != nil { + b.Fatal(err) + } + cached[i] = v + } + + // Pre-build fresh-response JSON: entities at indices [cachedCount, n). + freshJSON := []byte(`{"users":[`) + for i := cachedCount; i < n; i++ { + if i > cachedCount { + freshJSON = append(freshJSON, ',') + } + freshJSON = append(freshJSON, benchCopyEntityJSON("f"+strconv.Itoa(i))...) + } + freshJSON = append(freshJSON, `]}`...) + + cachedIndices := make([]int, cachedCount) + for i := range cachedCount { + cachedIndices[i] = i + } + missedIndices := make([]int, 0, n-cachedCount) + for i := cachedCount; i < n; i++ { + missedIndices = append(missedIndices, i) + } + + l, ar := newBenchCopyLoader() + info := &FetchInfo{RootFields: []GraphCoordinate{{FieldName: "users"}}} + + b.ReportAllocs() + b.ResetTimer() + for b.Loop() { + ar.Reset() + freshResp, err := astjson.ParseBytesWithArena(ar, freshJSON) + if err != nil { + b.Fatal(err) + } + cacheKeys := make([]*CacheKey, cachedCount) + for i := range cachedCount { + cacheKeys[i] = &CacheKey{ + BatchIndex: i, + FromCache: cached[i], + Keys: []string{"key" + strconv.Itoa(i)}, + } + } + res := &result{ + l2CacheKeys: cacheKeys, + batchCachedIndices: cachedIndices, + batchMissedIndices: missedIndices, + } + l.mergeBatchPartialResponse(res, []*astjson.Value{freshResp}, info) + } + }) + } +} + +// BenchmarkMergeResultCacheSkipFetch exercises loader.go:1472. +// N L1 hits, each StructuralCopy'd before MergeValues into the response item. +func BenchmarkMergeResultCacheSkipFetch(b *testing.B) { + for _, n := range benchCopyEntityCounts { + b.Run("entities="+strconv.Itoa(n), func(b *testing.B) { + cacheArena := arena.NewMonotonicArena(arena.WithMinBufferSize(64 * 1024)) + cached := make([]*astjson.Value, n) + for i := range n { + v, err := astjson.ParseBytesWithArena(cacheArena, benchCopyEntityJSON(strconv.Itoa(i))) + if err != nil { + b.Fatal(err) + } + cached[i] = v + } + + l, ar := newBenchCopyLoader() + + b.ReportAllocs() + b.ResetTimer() + for b.Loop() { + ar.Reset() + // Fresh items per iteration — arena reset invalidates the previous ones. + items := make([]*astjson.Value, n) + l1Keys := make([]*CacheKey, n) + for i := range n { + item, err := astjson.ParseBytesWithArena(ar, []byte(`{"id":"`+strconv.Itoa(i)+`"}`)) + if err != nil { + b.Fatal(err) + } + items[i] = item + l1Keys[i] = &CacheKey{ + Item: item, + FromCache: cached[i], + Keys: []string{"key" + strconv.Itoa(i)}, + } + } + res := &result{ + cacheSkipFetch: true, + batchEntityKeyMode: false, + l1CacheKeys: l1Keys, + } + if err := l.mergeResult(&FetchItem{}, res, items); err != nil { + b.Fatal(err) + } + } + }) + } +} + +// BenchmarkMergeResultPartialCache exercises loader.go:1491. +// N L1 hits, merged via the partialCacheEnabled branch (fetchSkipped=true to +// short-circuit the rest of mergeResult). +func BenchmarkMergeResultPartialCache(b *testing.B) { + for _, n := range benchCopyEntityCounts { + b.Run("entities="+strconv.Itoa(n), func(b *testing.B) { + cacheArena := arena.NewMonotonicArena(arena.WithMinBufferSize(64 * 1024)) + cached := make([]*astjson.Value, n) + for i := range n { + v, err := astjson.ParseBytesWithArena(cacheArena, benchCopyEntityJSON(strconv.Itoa(i))) + if err != nil { + b.Fatal(err) + } + cached[i] = v + } + + cachedIndices := make([]int, n) + for i := range n { + cachedIndices[i] = i + } + + l, ar := newBenchCopyLoader() + + b.ReportAllocs() + b.ResetTimer() + for b.Loop() { + ar.Reset() + items := make([]*astjson.Value, n) + l1Keys := make([]*CacheKey, n) + for i := range n { + item, err := astjson.ParseBytesWithArena(ar, []byte(`{"id":"`+strconv.Itoa(i)+`"}`)) + if err != nil { + b.Fatal(err) + } + items[i] = item + l1Keys[i] = &CacheKey{ + Item: item, + FromCache: cached[i], + Keys: []string{"key" + strconv.Itoa(i)}, + } + } + res := &result{ + partialCacheEnabled: true, + cachedItemIndices: cachedIndices, + l1CacheKeys: l1Keys, + fetchSkipped: true, + } + if err := l.mergeResult(&FetchItem{}, res, items); err != nil { + b.Fatal(err) + } + } + }) + } +} diff --git a/v2/pkg/engine/resolve/loader_cache_copy_invariant_test.go b/v2/pkg/engine/resolve/loader_cache_copy_invariant_test.go new file mode 100644 index 0000000000..67328a9854 --- /dev/null +++ b/v2/pkg/engine/resolve/loader_cache_copy_invariant_test.go @@ -0,0 +1,262 @@ +// Package resolve tests. +// +// This file contains "copy invariant" tests that exercise the four +// StructuralCopy call sites in loader.go which sit on the cache-hit merge +// paths: +// +// - loader.go:1220 — mergeBatchCacheHit: entityArray.SetArrayItem(..., StructuralCopy(entity)) +// - loader.go:1372 — mergeBatchPartialResponse: completeArray.SetArrayItem(..., StructuralCopy(entity)) +// - loader.go:1472 — mergeResult cacheSkipFetch: MergeValues(..., Item, StructuralCopy(FromCache)) +// - loader.go:1491 — mergeResult partialCacheEnabled: MergeValues(..., Item, StructuralCopy(FromCache)) +// +// The invariant under test: after the merge runs, mutations to the resulting +// response tree MUST NOT mutate the source `FromCache` values that were read +// from the cache. StructuralCopy is what provides that isolation today. +// +// These tests are designed to: +// 1. Pass on current master (proving the invariant holds today). +// 2. Fail if a candidate StructuralCopy is removed AND it was load-bearing +// (i.e., mutations to the merged tree would corrupt a shared container +// node inside FromCache). +// +// If a test still passes after a removal, the copy is provably redundant at +// that site, given how MergeValues and the response tree interact today. +// +// Mutation strategy: we deliberately mutate a NESTED object under the merged +// tree (not a top-level field), because MergeValues only aliases nested +// containers — top-level fields are always rewritten by the merge itself. +// Mutating a nested container is the real-world corruption risk. +package resolve + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/astjson" +) + +// copyInvariantEntityJSON is a nested entity shape. The `profile` object is +// the nested container whose aliasing is the corruption risk. +const copyInvariantEntityJSON = `{"__typename":"User","id":"u1","name":"Alice","profile":{"email":"alice@example.com","age":30}}` + +// assertFromCacheUnchanged reparses the original JSON to produce a fresh +// reference value and compares against FromCache.MarshalTo. Using a fresh +// parse avoids any chance of the reference itself being mutated. +func assertFromCacheUnchanged(t *testing.T, fromCache *astjson.Value, originalJSON string) { + t.Helper() + require.NotNil(t, fromCache) + assert.Equal(t, originalJSON, string(fromCache.MarshalTo(nil)), + "FromCache was mutated by downstream merge / response-tree mutation — StructuralCopy at this site is load-bearing") +} + +// TestCopyInvariant_MergeBatchCacheHit targets loader.go:1220. +// +// Scenario: batch entity fetch with EntityMergePath — cached entities are +// spliced into a response array via entityArray.SetArrayItem(..., StructuralCopy(entity)), +// then MergeValuesWithPath merges that response into items[0]. +// +// Adversarial mutation: after the merge, reach into the merged response tree +// and mutate the nested `profile` object. If StructuralCopy is removed, the +// merged response's profile node may alias back into FromCache. +func TestCopyInvariant_MergeBatchCacheHit(t *testing.T) { + l, ar := newCacheMergeTestLoader(t) + + wrapped0, err := astjson.ParseBytesWithArena(ar, []byte(`{"users":`+copyInvariantEntityJSON+`}`)) + require.NoError(t, err) + wrapped1, err := astjson.ParseBytesWithArena(ar, []byte(`{"users":`+copyInvariantEntityJSON+`}`)) + require.NoError(t, err) + + // FromCache points at the entity inside the wrapper (as bulk L2 lookup + // produces, before mergeBatchCacheHit unwraps via EntityMergePath). + fromCache0 := wrapped0 + fromCache1 := wrapped1 + + cacheKeys := []*CacheKey{ + {BatchIndex: 0, FromCache: fromCache0, Keys: []string{"key0"}, EntityMergePath: []string{"users"}}, + {BatchIndex: 1, FromCache: fromCache1, Keys: []string{"key1"}, EntityMergePath: []string{"users"}}, + } + res := &result{l2CacheKeys: cacheKeys} + + // Root-level merge: resolvable.data will be set. + err = l.mergeBatchCacheHit(&FetchItem{}, res, nil) + require.NoError(t, err) + + // Sanity: merge produced the expected shape. + got := string(l.resolvable.data.MarshalTo(nil)) + assert.Equal(t, + `{"users":[`+copyInvariantEntityJSON+`,`+copyInvariantEntityJSON+`]}`, + got) + + // Adversarial mutation: reach into the merged response array's FIRST + // entity's nested profile and mutate it. If the copy is redundant, + // fromCache0's nested profile is independent and survives. If the copy + // was load-bearing, fromCache0's profile is now corrupted. + mergedArray := l.resolvable.data.Get("users").GetArray() + require.Len(t, mergedArray, 2) + profile0 := mergedArray[0].Get("profile") + require.NotNil(t, profile0) + profile0.Set(ar, "email", astjson.StringValue(ar, "CORRUPTED")) + profile0.Set(ar, "age", astjson.NumberValue(ar, "999")) + + // Also mutate the SECOND entity's profile via Del + Set to exercise + // multiple mutation kinds. + profile1 := mergedArray[1].Get("profile") + require.NotNil(t, profile1) + profile1.Del("age") + profile1.Set(ar, "email", astjson.StringValue(ar, "ALSO_CORRUPTED")) + + // Invariant: both FromCache pointers must still produce the original JSON. + // Note: FromCache here is the wrapper value; the entity is at FromCache.users. + assertFromCacheUnchanged(t, fromCache0, `{"users":`+copyInvariantEntityJSON+`}`) + assertFromCacheUnchanged(t, fromCache1, `{"users":`+copyInvariantEntityJSON+`}`) +} + +// TestCopyInvariant_MergeBatchPartialResponse targets loader.go:1372. +// +// Scenario: partial batch fetch — some entities are cache hits (interleaved +// into the result via StructuralCopy), others come from the fresh subgraph +// response. completeArray.SetArrayItem(..., StructuralCopy(entity)) is the +// site under test. +func TestCopyInvariant_MergeBatchPartialResponse(t *testing.T) { + l, ar := newCacheMergeTestLoader(t) + + // Cached entity for index 0 (indices 1 and 2 will come from fresh response). + cachedEntity, err := astjson.ParseBytesWithArena(ar, []byte(copyInvariantEntityJSON)) + require.NoError(t, err) + fromCache := cachedEntity + + // Fresh subgraph response already merged into items[0]: contains entities + // at indices 1 and 2. mergeBatchPartialResponse reads from + // items[0].Get(arrayPath...) and rebuilds the full array. + freshResponse, err := astjson.ParseBytesWithArena(ar, []byte( + `{"users":[`+ + `{"__typename":"User","id":"u2","name":"Bob","profile":{"email":"bob@example.com","age":25}},`+ + `{"__typename":"User","id":"u3","name":"Cara","profile":{"email":"cara@example.com","age":40}}`+ + `]}`)) + require.NoError(t, err) + + items := []*astjson.Value{freshResponse} + + res := &result{ + l2CacheKeys: []*CacheKey{ + {BatchIndex: 0, FromCache: fromCache, Keys: []string{"key0"}}, + }, + batchCachedIndices: []int{0}, + batchMissedIndices: []int{1, 2}, + } + + info := &FetchInfo{RootFields: []GraphCoordinate{{FieldName: "users"}}} + + l.mergeBatchPartialResponse(res, items, info) + + // Sanity: the interleaved array has three elements, with the cached + // entity at index 0. + got := string(items[0].MarshalTo(nil)) + assert.Equal(t, + `{"users":[{"__typename":"User","id":"u1","name":"Alice","profile":{"email":"alice@example.com","age":30}},{"__typename":"User","id":"u2","name":"Bob","profile":{"email":"bob@example.com","age":25}},{"__typename":"User","id":"u3","name":"Cara","profile":{"email":"cara@example.com","age":40}}]}`, + got) + + // Adversarial mutation: mutate the nested profile of the entity that + // was spliced from the cache (index 0 in the rebuilt array). + mergedArray := items[0].Get("users").GetArray() + require.GreaterOrEqual(t, len(mergedArray), 1) + profile0 := mergedArray[0].Get("profile") + require.NotNil(t, profile0) + profile0.Set(ar, "email", astjson.StringValue(ar, "CORRUPTED")) + profile0.Del("age") + + // Invariant: the cached entity must still produce the original JSON. + assertFromCacheUnchanged(t, fromCache, copyInvariantEntityJSON) +} + +// TestCopyInvariant_MergeResultCacheSkipFetch targets loader.go:1472. +// +// Scenario: all entities are full L1 hits — mergeResult takes the +// cacheSkipFetch branch and does MergeValues(Item, StructuralCopy(FromCache)) +// for each key. The Item is the destination (a slot in the response tree); +// FromCache is the cached entity. +// +// Adversarial mutation: mutate the nested `profile` under Item after merge. +// If the copy is load-bearing, FromCache's profile container was aliased and +// is now corrupted. +func TestCopyInvariant_MergeResultCacheSkipFetch(t *testing.T) { + l, ar := newCacheMergeTestLoader(t) + + fromCache, err := astjson.ParseBytesWithArena(ar, []byte(copyInvariantEntityJSON)) + require.NoError(t, err) + + // Item is the response-tree slot where the cached entity will be merged. + // Empty object simulates the placeholder in the response items array. + item, err := astjson.ParseBytesWithArena(ar, []byte(`{"id":"u1"}`)) + require.NoError(t, err) + + res := &result{ + cacheSkipFetch: true, + batchEntityKeyMode: false, + l1CacheKeys: []*CacheKey{ + {Item: item, FromCache: fromCache, Keys: []string{"key0"}}, + }, + } + + err = l.mergeResult(&FetchItem{}, res, []*astjson.Value{item}) + require.NoError(t, err) + + // Sanity: item now has the cached fields merged in. + assert.Equal(t, + `{"id":"u1","__typename":"User","name":"Alice","profile":{"email":"alice@example.com","age":30}}`, + string(item.MarshalTo(nil))) + + // Adversarial mutation: mutate nested profile. + profile := item.Get("profile") + require.NotNil(t, profile) + profile.Set(ar, "email", astjson.StringValue(ar, "CORRUPTED")) + profile.Del("age") + + // Invariant: fromCache must still produce the original JSON. + assertFromCacheUnchanged(t, fromCache, copyInvariantEntityJSON) +} + +// TestCopyInvariant_MergeResultPartialCache targets loader.go:1491. +// +// Scenario: partial cache loading — some items are L1 hits, others require +// fetch. mergeResult first merges cached entries (the loop at line 1484-1497) +// via MergeValues(Item, StructuralCopy(FromCache)), then returns early +// because fetchSkipped=true (we only want to exercise the partial-cache +// branch in this test). +func TestCopyInvariant_MergeResultPartialCache(t *testing.T) { + l, ar := newCacheMergeTestLoader(t) + + fromCache, err := astjson.ParseBytesWithArena(ar, []byte(copyInvariantEntityJSON)) + require.NoError(t, err) + + item, err := astjson.ParseBytesWithArena(ar, []byte(`{"id":"u1"}`)) + require.NoError(t, err) + + res := &result{ + partialCacheEnabled: true, + cachedItemIndices: []int{0}, + l1CacheKeys: []*CacheKey{ + {Item: item, FromCache: fromCache, Keys: []string{"key0"}}, + }, + fetchSkipped: true, // short-circuit after the partial-cache merge loop + } + + err = l.mergeResult(&FetchItem{}, res, []*astjson.Value{item}) + require.NoError(t, err) + + // Sanity: item now has the cached fields merged in. + assert.Equal(t, + `{"id":"u1","__typename":"User","name":"Alice","profile":{"email":"alice@example.com","age":30}}`, + string(item.MarshalTo(nil))) + + // Adversarial mutation: mutate nested profile. + profile := item.Get("profile") + require.NotNil(t, profile) + profile.Set(ar, "email", astjson.StringValue(ar, "CORRUPTED")) + profile.Del("age") + + // Invariant: fromCache must still produce the original JSON. + assertFromCacheUnchanged(t, fromCache, copyInvariantEntityJSON) +} diff --git a/v2/pkg/engine/resolve/loader_cache_merge_test.go b/v2/pkg/engine/resolve/loader_cache_merge_test.go new file mode 100644 index 0000000000..61c48a0c66 --- /dev/null +++ b/v2/pkg/engine/resolve/loader_cache_merge_test.go @@ -0,0 +1,409 @@ +package resolve + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" +) + +func newCacheMergeTestLoader(t *testing.T) (*Loader, arena.Arena) { + t.Helper() + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL2Cache = true + resolvable := NewResolvable(ar, ResolvableOptions{}) + require.NoError(t, resolvable.Init(ctx, nil, ast.OperationTypeQuery)) + l := &Loader{ + jsonArena: ar, + resolvable: resolvable, + ctx: ctx, + } + return l, ar +} + +func TestMergeBatchCacheHit(t *testing.T) { + t.Run("empty batch with no items sets response data with empty array at field name", func(t *testing.T) { + // maxIndex < 0 (no cache keys), len(items) == 0 → replaces resolvable.data + l, _ := newCacheMergeTestLoader(t) + + res := &result{ + fetchInfo: &FetchInfo{ + RootFields: []GraphCoordinate{{FieldName: "products"}}, + }, + } + fetchItem := &FetchItem{} + + err := l.mergeBatchCacheHit(fetchItem, res, nil) + require.NoError(t, err) + + // The response data should be {"products":[]} + got := string(l.resolvable.data.MarshalTo(nil)) + assert.Equal(t, `{"products":[]}`, got) + }) + + t.Run("empty batch with one item merges at batchMergePath", func(t *testing.T) { + // maxIndex < 0, len(items) == 1 → merge empty response into items[0] at batchMergePath + l, ar := newCacheMergeTestLoader(t) + + existing, err := astjson.ParseBytesWithArena(ar, []byte(`{"data":"existing"}`)) + require.NoError(t, err) + + items := []*astjson.Value{existing} + res := &result{ + batchMergePath: []string{"nested"}, + fetchInfo: &FetchInfo{ + RootFields: []GraphCoordinate{{FieldName: "products"}}, + }, + } + fetchItem := &FetchItem{} + + err = l.mergeBatchCacheHit(fetchItem, res, items) + require.NoError(t, err) + + // items[0] should now have the empty batch merged at "nested" + got := string(items[0].MarshalTo(nil)) + assert.Equal(t, `{"data":"existing","nested":{"products":[]}}`, got) + }) + + t.Run("normal batch places cached entities at correct positions", func(t *testing.T) { + // Two cache hits at indices 0 and 2, index 1 is a miss → null in result array + l, ar := newCacheMergeTestLoader(t) + + entity0, err := astjson.ParseBytesWithArena(ar, []byte(`{"upc":"top-1","name":"Trilby"}`)) + require.NoError(t, err) + entity2, err := astjson.ParseBytesWithArena(ar, []byte(`{"upc":"top-3","name":"Fedora"}`)) + require.NoError(t, err) + + cacheKeys := []*CacheKey{ + {BatchIndex: 0, FromCache: entity0, Keys: []string{"key0"}}, + {BatchIndex: 1, FromCache: nil, Keys: []string{"key1"}}, + {BatchIndex: 2, FromCache: entity2, Keys: []string{"key2"}}, + } + res := &result{ + l2CacheKeys: cacheKeys, + } + fetchItem := &FetchItem{} + + // No items → sets resolvable.data directly (root-level merge without EntityMergePath) + err = l.mergeBatchCacheHit(fetchItem, res, nil) + require.NoError(t, err) + + // Without EntityMergePath, responseData is an empty object with entities in the array + // but the array is only set under entityMergePath. With no entityMergePath, the object + // is set as resolvable.data directly. Let's verify the data is set. + got := string(l.resolvable.data.MarshalTo(nil)) + assert.Equal(t, `{}`, got) + }) + + t.Run("batch with EntityMergePath extracts entities from wrapper", func(t *testing.T) { + // Entities are wrapped at EntityMergePath (e.g., {"products": {...entity...}}) + // during L2 load. mergeBatchCacheHit extracts them via Get(entityMergePath...). + l, ar := newCacheMergeTestLoader(t) + + wrapped0, err := astjson.ParseBytesWithArena(ar, []byte(`{"products":{"upc":"top-1","name":"Trilby"}}`)) + require.NoError(t, err) + wrapped1, err := astjson.ParseBytesWithArena(ar, []byte(`{"products":{"upc":"top-2","name":"Bowler"}}`)) + require.NoError(t, err) + + cacheKeys := []*CacheKey{ + {BatchIndex: 0, FromCache: wrapped0, Keys: []string{"key0"}, EntityMergePath: []string{"products"}}, + {BatchIndex: 1, FromCache: wrapped1, Keys: []string{"key1"}, EntityMergePath: []string{"products"}}, + } + res := &result{ + l2CacheKeys: cacheKeys, + } + fetchItem := &FetchItem{} + + // Root-level merge: sets resolvable.data + err = l.mergeBatchCacheHit(fetchItem, res, nil) + require.NoError(t, err) + + // With EntityMergePath ["products"], the response is {"products": [entity0, entity1]} + got := string(l.resolvable.data.MarshalTo(nil)) + assert.Equal(t, `{"products":[{"upc":"top-1","name":"Trilby"},{"upc":"top-2","name":"Bowler"}]}`, got) + }) + + t.Run("batch with EntityMergePath merges into items at batchMergePath", func(t *testing.T) { + // Same as above but with items[0] and batchMergePath + l, ar := newCacheMergeTestLoader(t) + + existing, err := astjson.ParseBytesWithArena(ar, []byte(`{"other":"value"}`)) + require.NoError(t, err) + + wrapped0, err := astjson.ParseBytesWithArena(ar, []byte(`{"products":{"upc":"top-1"}}`)) + require.NoError(t, err) + + cacheKeys := []*CacheKey{ + {BatchIndex: 0, FromCache: wrapped0, Keys: []string{"key0"}, EntityMergePath: []string{"products"}}, + } + res := &result{ + l2CacheKeys: cacheKeys, + batchMergePath: []string{"nested"}, + } + fetchItem := &FetchItem{} + items := []*astjson.Value{existing} + + err = l.mergeBatchCacheHit(fetchItem, res, items) + require.NoError(t, err) + + got := string(items[0].MarshalTo(nil)) + assert.Equal(t, `{"other":"value","nested":{"products":[{"upc":"top-1"}]}}`, got) + }) + + t.Run("batch with EntityMergePath matching batchMergePath merges entities into existing root array", func(t *testing.T) { + l, ar := newCacheMergeTestLoader(t) + + existing, err := astjson.ParseBytesWithArena(ar, []byte(`{"catalogs":[{"id":"c1","name":"Electronics","itemCount":342},{"id":"c2","name":"Books","itemCount":1205}]}`)) + require.NoError(t, err) + + wrapped0, err := astjson.ParseBytesWithArena(ar, []byte(`{"catalogs":{"id":"c1","description":"Consumer electronics, gadgets, and accessories.","lastUpdated":"2025-03-15T08:00:00Z"}}`)) + require.NoError(t, err) + wrapped1, err := astjson.ParseBytesWithArena(ar, []byte(`{"catalogs":{"id":"c2","description":"Fiction, non-fiction, technical books, and audiobooks.","lastUpdated":"2025-03-20T12:00:00Z"}}`)) + require.NoError(t, err) + + items := []*astjson.Value{existing} + res := &result{ + l2CacheKeys: []*CacheKey{ + {BatchIndex: 0, FromCache: wrapped0, Keys: []string{"key0"}, EntityMergePath: []string{"catalogs"}}, + {BatchIndex: 1, FromCache: wrapped1, Keys: []string{"key1"}, EntityMergePath: []string{"catalogs"}}, + }, + batchMergePath: []string{"catalogs"}, + postProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities"}, + }, + fetchInfo: &FetchInfo{ + RootFields: []GraphCoordinate{{FieldName: "catalogs"}}, + }, + } + + err = l.mergeBatchCacheHit(&FetchItem{}, res, items) + require.NoError(t, err) + + assert.Equal(t, `{"catalogs":[{"id":"c1","name":"Electronics","itemCount":342,"description":"Consumer electronics, gadgets, and accessories.","lastUpdated":"2025-03-15T08:00:00Z"},{"id":"c2","name":"Books","itemCount":1205,"description":"Fiction, non-fiction, technical books, and audiobooks.","lastUpdated":"2025-03-20T12:00:00Z"}]}`, string(items[0].MarshalTo(nil))) + }) +} + +func TestPopulateBatchCacheKeysFromResponse(t *testing.T) { + t.Run("batchEntityKeyMode false returns immediately", func(t *testing.T) { + // When batchEntityKeyMode is false, the function should not set any Items + l, ar := newCacheMergeTestLoader(t) + + responseObj, err := astjson.ParseBytesWithArena(ar, []byte(`{"products":[{"upc":"top-1"}]}`)) + require.NoError(t, err) + + ck := &CacheKey{BatchIndex: 0, Keys: []string{"key0"}} + res := &result{ + batchEntityKeyMode: false, // disabled + l2CacheKeys: []*CacheKey{ck}, + } + items := []*astjson.Value{responseObj} + + l.populateBatchCacheKeysFromResponse(res, items, &FetchInfo{ + RootFields: []GraphCoordinate{{FieldName: "products"}}, + }) + + // Item should remain nil because batchEntityKeyMode is false + assert.Nil(t, ck.Item) + }) + + t.Run("normal batch assigns array items to cache keys by BatchIndex", func(t *testing.T) { + // Each array element should be assigned to the CacheKey with matching BatchIndex + l, ar := newCacheMergeTestLoader(t) + + responseObj, err := astjson.ParseBytesWithArena(ar, []byte(`{"products":[{"upc":"top-1"},{"upc":"top-2"},{"upc":"top-3"}]}`)) + require.NoError(t, err) + + ck0 := &CacheKey{BatchIndex: 0, Keys: []string{"key0"}} + ck1 := &CacheKey{BatchIndex: 1, Keys: []string{"key1"}} + ck2 := &CacheKey{BatchIndex: 2, Keys: []string{"key2"}} + + res := &result{ + batchEntityKeyMode: true, + l2CacheKeys: []*CacheKey{ck0, ck1, ck2}, + } + items := []*astjson.Value{responseObj} + + l.populateBatchCacheKeysFromResponse(res, items, &FetchInfo{ + RootFields: []GraphCoordinate{{FieldName: "products"}}, + }) + + require.NotNil(t, ck0.Item) + assert.Equal(t, `{"upc":"top-1"}`, string(ck0.Item.MarshalTo(nil))) + require.NotNil(t, ck1.Item) + assert.Equal(t, `{"upc":"top-2"}`, string(ck1.Item.MarshalTo(nil))) + require.NotNil(t, ck2.Item) + assert.Equal(t, `{"upc":"top-3"}`, string(ck2.Item.MarshalTo(nil))) + // EntityMergePath should be cleared after population + assert.Nil(t, ck0.EntityMergePath) + assert.Nil(t, ck1.EntityMergePath) + assert.Nil(t, ck2.EntityMergePath) + }) + + t.Run("items with batchMergePath navigates to nested array", func(t *testing.T) { + // When batchMergePath is set, the function navigates through it first + l, ar := newCacheMergeTestLoader(t) + + responseObj, err := astjson.ParseBytesWithArena(ar, []byte(`{"nested":{"products":[{"id":"1"},{"id":"2"}]}}`)) + require.NoError(t, err) + + ck0 := &CacheKey{BatchIndex: 0, Keys: []string{"key0"}} + ck1 := &CacheKey{BatchIndex: 1, Keys: []string{"key1"}} + + res := &result{ + batchEntityKeyMode: true, + batchMergePath: []string{"nested"}, + l2CacheKeys: []*CacheKey{ck0, ck1}, + } + items := []*astjson.Value{responseObj} + + l.populateBatchCacheKeysFromResponse(res, items, &FetchInfo{ + RootFields: []GraphCoordinate{{FieldName: "products"}}, + }) + + require.NotNil(t, ck0.Item) + assert.Equal(t, `{"id":"1"}`, string(ck0.Item.MarshalTo(nil))) + require.NotNil(t, ck1.Item) + assert.Equal(t, `{"id":"2"}`, string(ck1.Item.MarshalTo(nil))) + }) + + t.Run("empty items slice returns immediately", func(t *testing.T) { + // len(items) == 0 → early return + l, _ := newCacheMergeTestLoader(t) + + ck := &CacheKey{BatchIndex: 0, Keys: []string{"key0"}} + res := &result{ + batchEntityKeyMode: true, + l2CacheKeys: []*CacheKey{ck}, + } + + l.populateBatchCacheKeysFromResponse(res, nil, &FetchInfo{ + RootFields: []GraphCoordinate{{FieldName: "products"}}, + }) + + assert.Nil(t, ck.Item) + }) + + t.Run("l1CacheKeys also populated", func(t *testing.T) { + // The function iterates both l2CacheKeys and l1CacheKeys + l, ar := newCacheMergeTestLoader(t) + + responseObj, err := astjson.ParseBytesWithArena(ar, []byte(`{"products":[{"upc":"a"},{"upc":"b"}]}`)) + require.NoError(t, err) + + l1ck0 := &CacheKey{BatchIndex: 0, Keys: []string{"l1key0"}} + l1ck1 := &CacheKey{BatchIndex: 1, Keys: []string{"l1key1"}} + + res := &result{ + batchEntityKeyMode: true, + l1CacheKeys: []*CacheKey{l1ck0, l1ck1}, + } + items := []*astjson.Value{responseObj} + + l.populateBatchCacheKeysFromResponse(res, items, &FetchInfo{ + RootFields: []GraphCoordinate{{FieldName: "products"}}, + }) + + require.NotNil(t, l1ck0.Item) + assert.Equal(t, `{"upc":"a"}`, string(l1ck0.Item.MarshalTo(nil))) + require.NotNil(t, l1ck1.Item) + assert.Equal(t, `{"upc":"b"}`, string(l1ck1.Item.MarshalTo(nil))) + }) + + t.Run("partial fetch skips cached indices", func(t *testing.T) { + // When batchPartialFetchEnabled=true, cached indices are skipped + l, ar := newCacheMergeTestLoader(t) + + responseObj, err := astjson.ParseBytesWithArena(ar, []byte(`{"products":[{"upc":"a"},{"upc":"b"},{"upc":"c"}]}`)) + require.NoError(t, err) + + ck0 := &CacheKey{BatchIndex: 0, Keys: []string{"key0"}} + ck1 := &CacheKey{BatchIndex: 1, Keys: []string{"key1"}} + ck2 := &CacheKey{BatchIndex: 2, Keys: []string{"key2"}} + + res := &result{ + batchEntityKeyMode: true, + batchPartialFetchEnabled: true, + batchCachedIndices: []int{0, 2}, // indices 0 and 2 are cached + l2CacheKeys: []*CacheKey{ck0, ck1, ck2}, + } + items := []*astjson.Value{responseObj} + + l.populateBatchCacheKeysFromResponse(res, items, &FetchInfo{ + RootFields: []GraphCoordinate{{FieldName: "products"}}, + }) + + // Only index 1 (not cached) should have Item set + assert.Nil(t, ck0.Item) + require.NotNil(t, ck1.Item) + assert.Equal(t, `{"upc":"b"}`, string(ck1.Item.MarshalTo(nil))) + assert.Nil(t, ck2.Item) + }) +} + +func TestFilterBatchVariablesForPartialFetch(t *testing.T) { + t.Run("filters batch variables to only missed indices", func(t *testing.T) { + // Array variable with 5 items, only indices 1 and 3 are missed + l, _ := newCacheMergeTestLoader(t) + + variables, err := astjson.ParseBytes([]byte(`{"upcs":["a","b","c","d","e"]}`)) + require.NoError(t, err) + l.ctx.Variables = variables + + f := &SingleFetch{} + f.Caching = FetchCacheConfiguration{ + CacheKeyTemplate: &RootQueryCacheKeyTemplate{ + EntityKeyMappings: []EntityKeyMappingConfig{ + { + EntityTypeName: "Product", + FieldMappings: []EntityFieldMappingConfig{ + { + EntityKeyField: "upc", + ArgumentPath: []string{"upcs"}, + ArgumentIsEntityKey: true, + }, + }, + }, + }, + }, + } + // Trigger precomputation of batchEntityKeyArgumentPath + f.Caching.CacheKeyTemplate.(*RootQueryCacheKeyTemplate).precomputeDerivedFields() + + res := &result{ + batchMissedIndices: []int{1, 3}, + } + + renderCtx, err := l.filterBatchVariablesForPartialFetch(res, f) + require.NoError(t, err) + require.NotNil(t, renderCtx) + + // The filtered variables should contain only items at indices 1 and 3 + got := string(renderCtx.Variables.MarshalTo(nil)) + assert.Equal(t, `{"upcs":["b","d"]}`, got) + }) + + t.Run("empty argument path returns nil", func(t *testing.T) { + // When batchEntityKeyArgumentPath is empty, returns nil + l, _ := newCacheMergeTestLoader(t) + + f := &SingleFetch{} + f.Caching = FetchCacheConfiguration{ + CacheKeyTemplate: &RootQueryCacheKeyTemplate{}, + } + + res := &result{ + batchMissedIndices: []int{0}, + } + + renderCtx, err := l.filterBatchVariablesForPartialFetch(res, f) + require.NoError(t, err) + assert.Nil(t, renderCtx) + }) +} diff --git a/v2/pkg/engine/resolve/loader_cache_phase2_test.go b/v2/pkg/engine/resolve/loader_cache_phase2_test.go new file mode 100644 index 0000000000..44e3b90be2 --- /dev/null +++ b/v2/pkg/engine/resolve/loader_cache_phase2_test.go @@ -0,0 +1,200 @@ +package resolve + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" +) + +// TestL1Cache_RootFieldPromotionWithAliases verifies that root-field L1 +// promotion stores entity values using SCHEMA field names, not response +// (aliased) names. Without the normalize-on-write fix, an aliased root query +// would silently corrupt entity L1 reads for subsequent entity fetches. +func TestL1Cache_RootFieldPromotionWithAliases(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + + loader := &Loader{ + jsonArena: ar, + l1Cache: map[string]*astjson.Value{}, + ctx: ctx, + resolvable: &Resolvable{ + // Response uses aliased field names ("identifier" for "id", + // "fullName" for "name") — this is what the subgraph returned + // after alias rewriting. + data: mustParseArena(t, ar, `{"users":[{"identifier":"42","fullName":"Alice","__typename":"User"}]}`), + }, + } + + // Entity Object describing the schema-name shape (id, name). + providesData := &Object{ + Fields: []*Field{ + {Name: []byte("users"), Value: &Array{Item: &Object{ + HasAliases: true, + Fields: []*Field{ + {Name: []byte("__typename"), Value: &Scalar{}}, + {Name: []byte("identifier"), OriginalName: []byte("id"), Value: &Scalar{}}, + {Name: []byte("fullName"), OriginalName: []byte("name"), Value: &Scalar{}}, + }, + }}}, + }, + } + + entityTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Path: []string{"users"}, + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + // Template reads the aliased field name from the response. + {Name: []byte("id"), Value: &String{Path: []string{"identifier"}}}, + }, + }), + } + + fetchItem := &FetchItem{ + Fetch: &SingleFetch{ + FetchConfiguration: FetchConfiguration{ + Caching: FetchCacheConfiguration{ + Enabled: true, + UseL1Cache: true, + RootFieldL1EntityCacheKeyTemplates: map[string]CacheKeyTemplate{ + "users:User": entityTemplate, + }, + }, + }, + Info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + }, + } + + loader.populateL1CacheForRootFieldEntities(fetchItem) + + cacheKey := `{"__typename":"User","key":{"id":"42"}}` + cached, ok := loader.l1Cache[cacheKey] + require.True(t, ok, "entity promoted to L1 cache") + + // Stored value must use SCHEMA field names (id, name), not response + // names (identifier, fullName). This is the bug fix: without the + // normalize-on-write step, the cached value would carry alias names + // and later entity fetches using validateItemHasRequiredData against + // schema names would silently miss. + assert.Equal(t, + `{"__typename":"User","id":"42","name":"Alice"}`, + string(cached.MarshalTo(nil))) + + // Verify a subsequent entity fetch for User{id:"42"} can L1-hit. + entityCacheKey := &CacheKey{ + Keys: []string{cacheKey}, + } + entityInfo := &FetchInfo{ + OperationType: ast.OperationTypeQuery, + DataSourceName: "accounts", + RootFields: []GraphCoordinate{ + {TypeName: "User", FieldName: "_entities"}, + }, + ProvidesData: &Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &Scalar{}}, + {Name: []byte("id"), Value: &Scalar{}}, + {Name: []byte("name"), Value: &Scalar{}}, + }, + }, + } + res := &result{} + allComplete := loader.tryL1CacheLoad(entityInfo, []*CacheKey{entityCacheKey}, res) + assert.True(t, allComplete, "entity L1 read should succeed with schema-shape cached value") +} + +// TestL2WritePreservesFieldsOutsideSelection verifies that when a fetch +// writes back to L2 cache, fields that were cached from previous queries but +// not in the current query's selection are preserved via the mergeValues +// writeback. +func TestL2WritePreservesFieldsOutsideSelection(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + + // Simulate a previous L2 entry with {id, name}. + prior := mustParseArena(t, ar, `{"__typename":"User","id":"1","name":"Alice"}`) + // Fresh fetch writeback only contains {id, email} (current query selection). + fresh := mustParseArena(t, ar, `{"__typename":"User","id":"1","email":"alice@example.com"}`) + + merged := mergeCachedValueForWrite(ar, prior, fresh) + require.NotNil(t, merged) + + // The merged value must contain all three fields — name from prior, + // email from fresh. Fresh wins on overlapping fields (id). + assert.Equal(t, + `{"__typename":"User","id":"1","name":"Alice","email":"alice@example.com"}`, + string(merged.MarshalTo(nil))) +} + +// TestExportRequestScopedFields_MergeWorkingCopyOnFailure verifies that when +// MergeValues fails for a request-scoped L1 merge (e.g., differing array +// lengths), the live cache entry is NOT mutated — the working-copy-and-swap +// pattern isolates the failure. +func TestExportRequestScopedFields_MergeWorkingCopyOnFailure(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + ctx: ctx, + } + + // Store an initial cached entry with an array of length 2. + initialBytes := []byte(`{"tags":["a","b"]}`) + initial := mustParseArena(t, ar, string(initialBytes)) + l.requestScopedL1["myKey"] = initial + + // Try to export a value with a conflicting nested shape — an array of + // length 3 vs the existing length 2. astjson.MergeValues returns an + // ErrMergeDifferingArrayLengths error in that case. + sources := []*astjson.Value{ + mustParseArena(t, ar, `{"viewer":{"tags":["x","y","z"]}}`), + } + + cfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "tags", + FieldPath: []string{"viewer", "tags"}, + L1Key: "myKey", + // No ProvidesData → DeepCopy without Transform → no widening check. + }, + }, + } + + // Drop ProvidesData to use the no-Transform path. The export will + // attempt to merge the fresh value ["x","y","z"] into a working copy + // of the existing {"tags":["a","b"]}. Merging a bare array into an + // object of different shape will fail safely. + // Note: FieldPath navigates to the "tags" array, and the new value is + // a 3-element array vs existing entry being an object with "tags":[2]. + l.exportRequestScopedFields(&result{}, cfg, sources) + + // Verify the live cache entry is unchanged. + cached, ok := l.requestScopedL1["myKey"] + require.True(t, ok) + + // The existing entry must be byte-identical to initialBytes (with no + // partial mutation). Accept either the original untouched state or a + // successful merge that preserves the original shape. + stored := string(cached.MarshalTo(nil)) + // The key invariant: the stored value is byte-identical to the original — + // merging an array into an object fails with a type mismatch, so the + // working-copy-and-swap leaves the live entry untouched (never partially corrupted). + assert.Equal(t, `{"tags":["a","b"]}`, stored) +} diff --git a/v2/pkg/engine/resolve/loader_cache_test.go b/v2/pkg/engine/resolve/loader_cache_test.go new file mode 100644 index 0000000000..b3817bfe30 --- /dev/null +++ b/v2/pkg/engine/resolve/loader_cache_test.go @@ -0,0 +1,309 @@ +package resolve + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" +) + +// TestLoader_PopulateFromCache verifies that populateFromCache correctly assigns +// cache hits to FromCache, tracks freshness ordering across multi-key entities, +// and records missing keys for partial hits. Without this, stale or wrong candidates +// could be served from L2 cache. +func TestLoader_PopulateFromCache(t *testing.T) { + t.Parallel() + + t.Run("single key single entry sets FromCache", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{} + + cacheKeys := []*CacheKey{ + { + Item: astjson.MustParse(`{}`), + Keys: []string{`{"__typename":"User","key":{"id":"1234"}}`}, + }, + } + entries := []*CacheEntry{ + { + Key: `{"__typename":"User","key":{"id":"1234"}}`, + Value: []byte(`{"id":"1234","username":"Me"}`), + RemainingTTL: 15 * time.Second, + }, + } + + err := l.populateFromCache(ar, cacheKeys, entries) + require.NoError(t, err) + require.NotNil(t, cacheKeys[0].FromCache) + assert.Equal(t, `{"id":"1234","username":"Me"}`, string(cacheKeys[0].FromCache.MarshalTo(nil))) + assert.Equal(t, 15*time.Second, cacheKeys[0].fromCacheRemainingTTL) + assert.Equal(t, []fromCacheCandidate{ + { + value: []byte(`{"id":"1234","username":"Me"}`), + remainingTTL: 15 * time.Second, + }, + }, cacheKeys[0].fromCacheCandidates) + assert.Nil(t, cacheKeys[0].missingKeys) + assert.False(t, cacheKeys[0].fromCacheNeedsWriteback) + }) + + t.Run("two keys both hit uses freshest candidate and retains stale fallback", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{} + + cacheKeys := []*CacheKey{ + { + Item: astjson.MustParse(`{}`), + Keys: []string{ + `{"__typename":"User","key":{"id":"1234"}}`, + `{"__typename":"User","key":{"username":"Me"}}`, + }, + }, + } + entries := []*CacheEntry{ + { + Key: `{"__typename":"User","key":{"id":"1234"}}`, + Value: []byte(`{"id":"1234","username":"FreshName"}`), + RemainingTTL: 30 * time.Second, + }, + { + Key: `{"__typename":"User","key":{"username":"Me"}}`, + Value: []byte(`{"id":"1234","username":"StaleName"}`), + RemainingTTL: 10 * time.Second, + }, + } + + err := l.populateFromCache(ar, cacheKeys, entries) + require.NoError(t, err) + require.NotNil(t, cacheKeys[0].FromCache) + assert.Equal(t, `{"id":"1234","username":"FreshName"}`, string(cacheKeys[0].FromCache.MarshalTo(nil))) + assert.Equal(t, 30*time.Second, cacheKeys[0].fromCacheRemainingTTL) + assert.Equal(t, []fromCacheCandidate{ + { + value: []byte(`{"id":"1234","username":"FreshName"}`), + remainingTTL: 30 * time.Second, + }, + { + value: []byte(`{"id":"1234","username":"StaleName"}`), + remainingTTL: 10 * time.Second, + }, + }, cacheKeys[0].fromCacheCandidates) + assert.Nil(t, cacheKeys[0].missingKeys) + }) + + t.Run("known freshness outranks unknown freshness", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{} + + cacheKeys := []*CacheKey{ + { + Item: astjson.MustParse(`{}`), + Keys: []string{ + `{"__typename":"User","key":{"id":"1234"}}`, + `{"__typename":"User","key":{"username":"Me"}}`, + }, + }, + } + entries := []*CacheEntry{ + { + Key: `{"__typename":"User","key":{"id":"1234"}}`, + Value: []byte(`{"id":"1234","username":"FreshName"}`), + RemainingTTL: 20 * time.Second, + }, + { + Key: `{"__typename":"User","key":{"username":"Me"}}`, + Value: []byte(`{"id":"1234","username":"UnknownFreshness"}`), + }, + } + + err := l.populateFromCache(ar, cacheKeys, entries) + require.NoError(t, err) + require.NotNil(t, cacheKeys[0].FromCache) + assert.Equal(t, `{"id":"1234","username":"FreshName"}`, string(cacheKeys[0].FromCache.MarshalTo(nil))) + assert.Equal(t, []fromCacheCandidate{ + { + value: []byte(`{"id":"1234","username":"FreshName"}`), + remainingTTL: 20 * time.Second, + }, + { + value: []byte(`{"id":"1234","username":"UnknownFreshness"}`), + remainingTTL: 0, + }, + }, cacheKeys[0].fromCacheCandidates) + assert.Nil(t, cacheKeys[0].missingKeys) + }) + + t.Run("equal freshness preserves cache.Get order", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{} + + cacheKeys := []*CacheKey{ + { + Item: astjson.MustParse(`{}`), + Keys: []string{ + `{"__typename":"User","key":{"id":"1234"}}`, + `{"__typename":"User","key":{"username":"Me"}}`, + }, + }, + } + entries := []*CacheEntry{ + { + Key: `{"__typename":"User","key":{"id":"1234"}}`, + Value: []byte(`{"id":"1234","username":"First"}`), + RemainingTTL: 25 * time.Second, + }, + { + Key: `{"__typename":"User","key":{"username":"Me"}}`, + Value: []byte(`{"id":"1234","username":"Second"}`), + RemainingTTL: 25 * time.Second, + }, + } + + err := l.populateFromCache(ar, cacheKeys, entries) + require.NoError(t, err) + require.NotNil(t, cacheKeys[0].FromCache) + assert.Equal(t, `{"id":"1234","username":"First"}`, string(cacheKeys[0].FromCache.MarshalTo(nil))) + assert.Equal(t, []fromCacheCandidate{ + { + value: []byte(`{"id":"1234","username":"First"}`), + remainingTTL: 25 * time.Second, + }, + { + value: []byte(`{"id":"1234","username":"Second"}`), + remainingTTL: 25 * time.Second, + }, + }, cacheKeys[0].fromCacheCandidates) + assert.Nil(t, cacheKeys[0].missingKeys) + }) + + t.Run("partial hit records exactly which requested keys were missing", func(t *testing.T) { + t.Parallel() + + // Scenario: one CacheKey asks for three concrete L2 keys, but the cache only + // returns a value for the id key. populateFromCache should preserve the hit as + // FromCache and record the exact missing requested keys in order. + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{} + + cacheKeys := []*CacheKey{ + { + Item: astjson.MustParse(`{}`), + Keys: []string{ + `{"__typename":"User","key":{"id":"1234"}}`, + `{"__typename":"User","key":{"email":"me@example.com"}}`, + `{"__typename":"User","key":{"username":"Me"}}`, + }, + }, + } + entries := []*CacheEntry{ + { + Key: `{"__typename":"User","key":{"id":"1234"}}`, + Value: []byte(`{"id":"1234","username":"Me"}`), + RemainingTTL: 20 * time.Second, + }, + } + + err := l.populateFromCache(ar, cacheKeys, entries) + require.NoError(t, err) + // Assert the hit candidate becomes FromCache and missingKeys keeps only the + // two requested keys that did not come back from cache.Get. + require.NotNil(t, cacheKeys[0].FromCache) + assert.Equal(t, `{"id":"1234","username":"Me"}`, string(cacheKeys[0].FromCache.MarshalTo(nil))) + assert.Equal(t, []string{ + `{"__typename":"User","key":{"email":"me@example.com"}}`, + `{"__typename":"User","key":{"username":"Me"}}`, + }, cacheKeys[0].missingKeys) + }) + + t.Run("no keys hit leaves FromCache nil", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{} + + cacheKeys := []*CacheKey{ + { + Item: astjson.MustParse(`{}`), + Keys: []string{ + `{"__typename":"User","key":{"id":"1234"}}`, + `{"__typename":"User","key":{"username":"Me"}}`, + }, + }, + } + entries := []*CacheEntry{nil, nil} + + err := l.populateFromCache(ar, cacheKeys, entries) + require.NoError(t, err) + assert.Nil(t, cacheKeys[0].FromCache) + assert.Zero(t, cacheKeys[0].fromCacheRemainingTTL) + assert.Nil(t, cacheKeys[0].fromCacheCandidates) + assert.Equal(t, []string{ + `{"__typename":"User","key":{"id":"1234"}}`, + `{"__typename":"User","key":{"username":"Me"}}`, + }, cacheKeys[0].missingKeys) + assert.False(t, cacheKeys[0].fromCacheNeedsWriteback) + }) +} + +// TestLoaderBuildCacheTrace_PredictableDebugTimingsNormalizeZeroDurationOperations +// verifies that predictable debug timings normalize zero-duration L2 operations to 1ns. +// Without this, flaky timing values would make trace output non-deterministic in tests. +func TestLoaderBuildCacheTrace_PredictableDebugTimingsNormalizeZeroDurationOperations(t *testing.T) { + ctx := NewContext(context.Background()) + ctx.TracingOptions = TraceOptions{ + Enable: true, + EnablePredictableDebugTimings: true, + } + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + loader := &Loader{ctx: ctx} + res := &result{ + cache: NewFakeLoaderCache(), + cacheTraceL2GetAttempted: true, + cacheTraceL2SetAttempted: true, + cacheTraceL2Misses: 1, + cacheTraceL2SetError: "write failed", + cacheTraceEntityCount: 1, + l2CacheKeys: []*CacheKey{ + {Keys: []string{"key-1"}}, + }, + } + + trace := loader.buildCacheTrace(res, FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: &EntityQueryCacheKeyTemplate{}, + }) + + assert.Equal(t, &CacheTrace{ + DurationSinceStartNano: 1, // predictable debug timing + DurationSinceStartPretty: "1ns", + DurationNano: 1, + DurationPretty: "1ns", + L2Enabled: true, + CacheName: "default", + TTLSeconds: 30, + EntityCount: 1, // 1 cache key + L2Miss: 1, + L2GetDurationNano: 1, + L2GetDurationPretty: "1ns", + L2SetDurationNano: 1, + L2SetDurationPretty: "1ns", + Keys: []string{"key-1"}, + L2SetError: "write failed", + }, trace) +} diff --git a/v2/pkg/engine/resolve/loader_cache_transform.go b/v2/pkg/engine/resolve/loader_cache_transform.go new file mode 100644 index 0000000000..49c3a0259d --- /dev/null +++ b/v2/pkg/engine/resolve/loader_cache_transform.go @@ -0,0 +1,435 @@ +// StructuralCopy helpers for entity caching. +// +// This file hosts the four Loader StructuralCopy variants that isolate cache +// storage from the response tree: +// +// - structuralCopyNormalized — L2 write path: project to +// ProvidesData fields only (rename aliases → schema names, drop unlisted). +// - structuralCopyDenormalized — L2 read path: rename schema names +// back to the current query's aliases, projected to ProvidesData. +// - structuralCopyNormalizedPassthrough — L1 write path: rename aliases but +// KEEP source fields not listed in ProvidesData (@key fields, fields +// contributed by sibling fetches). Driven by Transform.Passthrough = true. +// - structuralCopyDenormalizedPassthrough — L1 read path: restore aliases +// while preserving all accumulated fields from prior fetches. +// +// All four allocate onto l.jsonArena and return an *astjson.Value owned by +// the current request. StructuralCopy clones container nodes (objects, +// arrays) on the arena and ALIASES leaf nodes (strings, numbers, bools, +// nulls) from the source — safe because every live *astjson.Value within a +// request shares the same arena lifetime. +// +// Why the copies are load-bearing: astjson.MergeValues aliases nested +// container nodes from src into dst, so without a StructuralCopy isolating +// cached values, subsequent mutations of the response tree (a later fetch +// merging into the same item, or the L1 merge-into-existing writeback path) +// would reach back into and corrupt the cached entry. The L1 +// merge-into-existing path pushes this further: it must also use +// working-copy-and-swap (StructuralCopy the live entry, MergeValues into +// the copy, Store the copy) because MergeValues is non-atomic on failure +// and a partial mutation of the live entry would corrupt every sibling L1 +// key pointing at the same *Value. +// +// Ephemeral Transforms: the *astjson.Transform trees built here are +// constructed inline on the reusable transformEntries/transforms/ +// transformMetas slabs and consumed by StructuralCopyWithTransform in the +// same call. They depend on per-request state (Context.Variables, +// RemapVariables flow into CacheArgs OutputKey suffixes), so they must NEVER +// be cached on *Object, the plan tree, the Resolver, or anywhere else that +// outlives a single request. +// +// The per-flow minimum-copy budget is tabulated in +// v2/pkg/engine/resolve/CLAUDE.md §"Copy Budget"; see also §"Entity L1 +// Representation" for the full invariant set. Adversarial mutation tests in +// loader_cache_copy_invariant_test.go fail if any of these copies is +// dropped. A few cache-adjacent paths legitimately skip StructuralCopy — +// e.g. extension-based invalidation that consumes the extensions blob once +// and discards it — and document that at the call site. + +package resolve + +import ( + "github.com/wundergraph/astjson" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/internal/unsafebytes" +) + +// structuralCopyNormalized applies a normalize transform (alias→schema name + arg hash) +// to v guided by obj, returning a structural copy on l.jsonArena. +// When obj is nil or has no aliases, falls back to plain StructuralCopy. +func (l *Loader) structuralCopyNormalized(v *astjson.Value, obj *Object) *astjson.Value { + if obj == nil || !obj.HasAliases { + return l.parser.StructuralCopy(l.jsonArena, v) + } + l.resetTransformSlabs(obj) + t := l.buildNormalizeTransform(obj) + return l.parser.StructuralCopyWithTransform(l.jsonArena, v, t) +} + +// structuralCopyNormalizedPassthrough applies a normalize transform (alias→schema name + arg hash) +// with Passthrough=true, so unlisted fields are kept intact. Used for L1 writes +// where we need schema-shape field names but must preserve all entity fields +// (including @key fields not in ProvidesData). +func (l *Loader) structuralCopyNormalizedPassthrough(v *astjson.Value, obj *Object) *astjson.Value { + if obj == nil || !obj.HasAliases { + return l.parser.StructuralCopy(l.jsonArena, v) + } + l.resetTransformSlabs(obj) + t := l.buildNormalizeTransform(obj) + t.Passthrough = true + return l.parser.StructuralCopyWithTransform(l.jsonArena, v, t) +} + +// structuralCopyDenormalizedPassthrough applies a denormalize transform (schema→alias) +// with Passthrough=true, so unlisted fields are kept intact. Used for L1 reads +// where we need response-shape field names but must preserve all entity fields +// (including fields from other fetches not in this fetch's ProvidesData). +func (l *Loader) structuralCopyDenormalizedPassthrough(v *astjson.Value, obj *Object) *astjson.Value { + if obj == nil || !obj.HasAliases { + return l.parser.StructuralCopy(l.jsonArena, v) + } + l.resetTransformSlabs(obj) + t := l.buildDenormalizeTransform(obj) + t.Passthrough = true + return l.parser.StructuralCopyWithTransform(l.jsonArena, v, t) +} + +// structuralCopyDenormalized applies a denormalize transform (schema name→alias) +// to v guided by obj, returning a structural copy on l.jsonArena. +// When obj is nil or has no aliases, falls back to plain StructuralCopy. +func (l *Loader) structuralCopyDenormalized(v *astjson.Value, obj *Object) *astjson.Value { + if obj == nil || !obj.HasAliases { + return l.parser.StructuralCopy(l.jsonArena, v) + } + l.resetTransformSlabs(obj) + t := l.buildDenormalizeTransform(obj) + return l.parser.StructuralCopyWithTransform(l.jsonArena, v, t) +} + +// fieldMeta stages per-field Transform data while children are being built. +// Kept at package level so it can live on the Loader's transformMetas slab +// (avoids a per-call `make([]fieldMeta, ...)` heap allocation). +type fieldMeta struct { + inputKey string + outputKey string + child *astjson.Transform +} + +// resetTransformSlabs resets and pre-grows the transform slabs to avoid +// reallocation during recursive tree building. Without sufficient capacity, +// slice appends during recursion can relocate the backing array, invalidating +// pointers (Transform*) and slice headers (Entries) set earlier. +func (l *Loader) resetTransformSlabs(obj *Object) { + entries, transforms := countTransformAllocations(obj) + + l.transformEntries = l.transformEntries[:0] + if cap(l.transformEntries) < entries { + l.transformEntries = make([]astjson.TransformEntry, 0, entries) + } + + l.transforms = l.transforms[:0] + if cap(l.transforms) < transforms { + l.transforms = make([]astjson.Transform, 0, transforms) + } + + // transformMetas needs at most one slot per field across the tree. + // entries is an upper bound (entries = fields + forced-__typename per object), + // so it's safe and keeps the grow logic simple. + l.transformMetas = l.transformMetas[:0] + if cap(l.transformMetas) < entries { + l.transformMetas = make([]fieldMeta, 0, entries) + } +} + +// countTransformAllocations counts the total TransformEntry and Transform +// allocations needed for an Object tree, so slabs can be pre-grown. +func countTransformAllocations(obj *Object) (entries, transforms int) { + if obj == nil { + return 0, 0 + } + transforms = 1 + // One entry per field + one potential identity entry for __typename + // when the selection set does not include it. + entries = len(obj.Fields) + 1 + for _, field := range obj.Fields { + ce, ct := countChildAllocations(field.Value) + entries += ce + transforms += ct + } + return entries, transforms +} + +func countChildAllocations(node Node) (entries, transforms int) { + switch n := node.(type) { + case *Object: + if n == nil || !n.HasAliases { + return 0, 0 + } + return countTransformAllocations(n) + case *Array: + if n == nil || n.Item == nil { + return 0, 0 + } + ce, ct := countChildAllocations(n.Item) + if ct > 0 { + ct++ + } + return ce, ct + } + return 0, 0 +} + +// allocTransformIndex appends a zero Transform to the slab and returns its index. +func (l *Loader) allocTransformIndex() int { + idx := len(l.transforms) + l.transforms = append(l.transforms, astjson.Transform{}) + return idx +} + +// buildNormalizeTransform builds a normalize transform tree. Children are built +// first (bottom-up) so their appends to transformEntries complete before the +// parent records its Entries slice range. When the selection set does not +// include __typename, an identity entry is appended so polymorphic type +// identity survives projection to the cache shape. +func (l *Loader) buildNormalizeTransform(obj *Object) *astjson.Transform { + tIdx := l.allocTransformIndex() + + // Phase 1: reserve a per-call region on the transformMetas slab and fill it. + // Pre-grown in resetTransformSlabs; recursive children append further down + // the slab, but our `metas` slice stays valid because capacity never shrinks. + metasStart := len(l.transformMetas) + metasEnd := metasStart + len(obj.Fields) + l.transformMetas = l.transformMetas[:metasEnd] + metas := l.transformMetas[metasStart:metasEnd] + hasTypenameField := false + for i, field := range obj.Fields { + metas[i].inputKey = unsafebytes.BytesToString(field.Name) + metas[i].outputKey = l.cacheFieldName(field) + if metas[i].outputKey == "__typename" { + hasTypenameField = true + } + metas[i].child = l.buildNormalizeChild(field.Value) + } + + // Phase 2: append entries contiguously (no interleaved child appends). + entriesStart := len(l.transformEntries) + for _, m := range metas { + l.transformEntries = append(l.transformEntries, astjson.TransformEntry{ + InputKey: m.inputKey, + OutputKey: m.outputKey, + Child: m.child, + }) + } + if !hasTypenameField { + l.transformEntries = append(l.transformEntries, astjson.TransformEntry{ + InputKey: "__typename", OutputKey: "__typename", + }) + } + + t := &l.transforms[tIdx] + t.Entries = l.transformEntries[entriesStart:] + return t +} + +func (l *Loader) buildDenormalizeTransform(obj *Object) *astjson.Transform { + tIdx := l.allocTransformIndex() + + metasStart := len(l.transformMetas) + metasEnd := metasStart + len(obj.Fields) + l.transformMetas = l.transformMetas[:metasEnd] + metas := l.transformMetas[metasStart:metasEnd] + hasTypenameField := false + for i, field := range obj.Fields { + aliasName := unsafebytes.BytesToString(field.Name) + cacheName := l.cacheFieldName(field) + if cacheName == "__typename" { + hasTypenameField = true + } + metas[i].inputKey = cacheName + metas[i].outputKey = aliasName + metas[i].child = l.buildDenormalizeChild(field.Value) + } + + entriesStart := len(l.transformEntries) + for _, m := range metas { + l.transformEntries = append(l.transformEntries, astjson.TransformEntry{ + InputKey: m.inputKey, + OutputKey: m.outputKey, + Child: m.child, + }) + } + if !hasTypenameField { + l.transformEntries = append(l.transformEntries, astjson.TransformEntry{ + InputKey: "__typename", OutputKey: "__typename", + }) + } + + t := &l.transforms[tIdx] + t.Entries = l.transformEntries[entriesStart:] + return t +} + +func (l *Loader) buildNormalizeChild(node Node) *astjson.Transform { + switch n := node.(type) { + case *Object: + if n == nil || !n.HasAliases { + return nil + } + return l.buildNormalizeTransform(n) + case *Array: + if n == nil || n.Item == nil { + return nil + } + inner := l.buildNormalizeChild(n.Item) + if inner == nil { + return nil + } + tIdx := l.allocTransformIndex() + t := &l.transforms[tIdx] + t.ArrayItem = inner + return t + } + return nil +} + +func (l *Loader) buildDenormalizeChild(node Node) *astjson.Transform { + switch n := node.(type) { + case *Object: + if n == nil || !n.HasAliases { + return nil + } + return l.buildDenormalizeTransform(n) + case *Array: + if n == nil || n.Item == nil { + return nil + } + inner := l.buildDenormalizeChild(n.Item) + if inner == nil { + return nil + } + tIdx := l.allocTransformIndex() + t := &l.transforms[tIdx] + t.ArrayItem = inner + return t + } + return nil +} + +// structuralCopyProjected applies a denormalize transform (schema name → alias) +// with Passthrough=false and no forced __typename, so only ProvidesData fields +// are included. Unlike structuralCopyDenormalized, this always builds a Transform +// even when !HasAliases, ensuring field projection at every level. +// Used for shadow comparison and mutation analytics where exact field projection matters. +func (l *Loader) structuralCopyProjected(v *astjson.Value, obj *Object) *astjson.Value { + if obj == nil { + return l.parser.StructuralCopy(l.jsonArena, v) + } + entries, transforms := countProjectAllocations(obj) + l.transformEntries = l.transformEntries[:0] + if cap(l.transformEntries) < entries { + l.transformEntries = make([]astjson.TransformEntry, 0, entries) + } + l.transforms = l.transforms[:0] + if cap(l.transforms) < transforms { + l.transforms = make([]astjson.Transform, 0, transforms) + } + l.transformMetas = l.transformMetas[:0] + if cap(l.transformMetas) < entries { + l.transformMetas = make([]fieldMeta, 0, entries) + } + t := l.buildProjectTransform(obj) + return l.parser.StructuralCopyWithTransform(l.jsonArena, v, t) +} + +// buildProjectTransform builds a denormalize transform for field projection. +// Unlike buildDenormalizeTransform, it does not force __typename and always +// recurses into children regardless of HasAliases. +func (l *Loader) buildProjectTransform(obj *Object) *astjson.Transform { + tIdx := l.allocTransformIndex() + + metasStart := len(l.transformMetas) + metasEnd := metasStart + len(obj.Fields) + l.transformMetas = l.transformMetas[:metasEnd] + metas := l.transformMetas[metasStart:metasEnd] + for i, field := range obj.Fields { + aliasName := unsafebytes.BytesToString(field.Name) + cacheName := l.cacheFieldName(field) + metas[i].inputKey = cacheName + metas[i].outputKey = aliasName + metas[i].child = l.buildProjectChild(field.Value) + } + + entriesStart := len(l.transformEntries) + for _, m := range metas { + l.transformEntries = append(l.transformEntries, astjson.TransformEntry{ + InputKey: m.inputKey, + OutputKey: m.outputKey, + Child: m.child, + }) + } + entriesEnd := len(l.transformEntries) + + t := &l.transforms[tIdx] + t.Entries = l.transformEntries[entriesStart:entriesEnd] + return t +} + +func (l *Loader) buildProjectChild(node Node) *astjson.Transform { + switch n := node.(type) { + case *Object: + if n == nil { + return nil + } + return l.buildProjectTransform(n) + case *Array: + if n == nil || n.Item == nil { + return nil + } + inner := l.buildProjectChild(n.Item) + if inner == nil { + return nil + } + tIdx := l.allocTransformIndex() + t := &l.transforms[tIdx] + t.ArrayItem = inner + return t + } + return nil +} + +// countProjectAllocations counts TransformEntry and Transform allocations +// for field projection. Unlike countTransformAllocations, it always recurses +// into children (no HasAliases short-circuit) and does not count forced __typename. +func countProjectAllocations(obj *Object) (entries, transforms int) { + if obj == nil { + return 0, 0 + } + transforms = 1 + entries = len(obj.Fields) + for _, field := range obj.Fields { + ce, ct := countProjectChildAllocations(field.Value) + entries += ce + transforms += ct + } + return entries, transforms +} + +func countProjectChildAllocations(node Node) (entries, transforms int) { + switch n := node.(type) { + case *Object: + if n == nil { + return 0, 0 + } + return countProjectAllocations(n) + case *Array: + if n == nil || n.Item == nil { + return 0, 0 + } + ce, ct := countProjectChildAllocations(n.Item) + if ct > 0 { + ct++ + } + return ce, ct + } + return 0, 0 +} diff --git a/v2/pkg/engine/resolve/loader_cache_transform_test.go b/v2/pkg/engine/resolve/loader_cache_transform_test.go new file mode 100644 index 0000000000..a11ca6e12b --- /dev/null +++ b/v2/pkg/engine/resolve/loader_cache_transform_test.go @@ -0,0 +1,193 @@ +package resolve + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" +) + +func TestStructuralCopyNormalized_NilAndNoAliases(t *testing.T) { + l := newTestLoader(t) + + // structuralCopyNormalized with nil obj is plain StructuralCopy. + parsed := astjson.MustParseBytes([]byte(`{"id":"1"}`)) + result := l.structuralCopyNormalized(parsed, nil) + assert.Equal(t, `{"id":"1"}`, string(result.MarshalTo(nil))) + + // No aliases: plain StructuralCopy. + noAlias := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{}}, + }, + } + result = l.structuralCopyNormalized(parsed, noAlias) + assert.Equal(t, `{"id":"1"}`, string(result.MarshalTo(nil))) +} + +func TestStructuralCopyNormalized_SingleFieldAlias(t *testing.T) { + l := newTestLoader(t) + + obj := &Object{ + HasAliases: true, + Fields: []*Field{ + {Name: []byte("nickname"), OriginalName: []byte("name"), Value: &Scalar{}}, + }, + } + + parsed := astjson.MustParseBytes([]byte(`{"nickname":"Alice","__typename":"User"}`)) + + // Normalize: alias "nickname" → schema "name". + normalized := l.structuralCopyNormalized(parsed, obj) + assert.Equal(t, `{"name":"Alice","__typename":"User"}`, string(normalized.MarshalTo(nil))) + + // Denormalize: schema "name" → alias "nickname". + schemaShaped := astjson.MustParseBytes([]byte(`{"name":"Alice","__typename":"User"}`)) + denormalized := l.structuralCopyDenormalized(schemaShaped, obj) + assert.Equal(t, `{"nickname":"Alice","__typename":"User"}`, string(denormalized.MarshalTo(nil))) +} + +func TestStructuralCopyNormalized_NestedObjectWithAliases(t *testing.T) { + l := newTestLoader(t) + + inner := &Object{ + HasAliases: true, + Fields: []*Field{ + {Name: []byte("handle"), OriginalName: []byte("name"), Value: &Scalar{}}, + }, + } + outer := &Object{ + HasAliases: true, + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{}}, + {Name: []byte("usr"), OriginalName: []byte("user"), Value: inner}, + }, + } + + parsed := astjson.MustParseBytes([]byte(`{"id":"1","usr":{"handle":"Alice","__typename":"User"},"__typename":"Parent"}`)) + normalized := l.structuralCopyNormalized(parsed, outer) + assert.Equal(t, `{"id":"1","user":{"name":"Alice","__typename":"User"},"__typename":"Parent"}`, string(normalized.MarshalTo(nil))) + + schemaShaped := astjson.MustParseBytes([]byte(`{"id":"1","user":{"name":"Alice","__typename":"User"},"__typename":"Parent"}`)) + denormalized := l.structuralCopyDenormalized(schemaShaped, outer) + assert.Equal(t, `{"id":"1","usr":{"handle":"Alice","__typename":"User"},"__typename":"Parent"}`, string(denormalized.MarshalTo(nil))) +} + +func TestStructuralCopyNormalized_ArrayOfObjectsWithAliases(t *testing.T) { + l := newTestLoader(t) + + itemObj := &Object{ + HasAliases: true, + Fields: []*Field{ + {Name: []byte("handle"), OriginalName: []byte("name"), Value: &Scalar{}}, + }, + } + outer := &Object{ + HasAliases: true, + Fields: []*Field{ + {Name: []byte("users"), Value: &Array{Item: itemObj}}, + }, + } + + parsed := astjson.MustParseBytes([]byte(`{"users":[{"handle":"Alice","__typename":"User"},{"handle":"Bob","__typename":"User"}]}`)) + normalized := l.structuralCopyNormalized(parsed, outer) + assert.Equal(t, `{"users":[{"name":"Alice","__typename":"User"},{"name":"Bob","__typename":"User"}]}`, string(normalized.MarshalTo(nil))) +} + +func TestStructuralCopyNormalized_ArgSuffixField(t *testing.T) { + l := newTestLoader(t) + l.ctx = NewContext(context.Background()) + l.ctx.Variables = astjson.MustParseBytes([]byte(`{"first":5}`)) + + obj := &Object{ + HasAliases: true, + Fields: []*Field{ + { + Name: []byte("friends"), + OriginalName: []byte("friends"), + CacheArgs: []CacheFieldArg{{ArgName: "first", VariableName: "first"}}, + Value: &Scalar{}, + }, + }, + } + + // Build normalize transform and inspect entries. The builder appends an + // identity __typename entry when the selection set doesn't include it, + // so the entity type survives projection to the cache shape. + l.resetTransformSlabs(obj) + normalizeXform := l.buildNormalizeTransform(obj) + require.NotNil(t, normalizeXform) + assert.Equal(t, []astjson.TransformEntry{ + {InputKey: "friends", OutputKey: "friends_08d4d396a3164ad4"}, + {InputKey: "__typename", OutputKey: "__typename"}, + }, normalizeXform.Entries) +} + +func TestStructuralCopyNormalized_RequestScopedInvariant(t *testing.T) { + obj := &Object{ + HasAliases: true, + Fields: []*Field{ + { + Name: []byte("friends"), + OriginalName: []byte("friends"), + CacheArgs: []CacheFieldArg{{ArgName: "first", VariableName: "remapped"}}, + Value: &Scalar{}, + }, + }, + } + + ctx1 := NewContext(context.Background()) + ctx1.Variables = astjson.MustParseBytes([]byte(`{"original":"42"}`)) + ctx1.RemapVariables = map[string]string{"remapped": "original"} + loader1 := newTestLoader(t) + loader1.ctx = ctx1 + + ctx2 := NewContext(context.Background()) + ctx2.Variables = astjson.MustParseBytes([]byte(`{"other":"99"}`)) + ctx2.RemapVariables = map[string]string{"remapped": "other"} + loader2 := newTestLoader(t) + loader2.ctx = ctx2 + + loader1.resetTransformSlabs(obj) + t1 := loader1.buildNormalizeTransform(obj) + + loader2.resetTransformSlabs(obj) + t2 := loader2.buildNormalizeTransform(obj) + + require.NotNil(t, t1) + require.NotNil(t, t2) + assert.NotEqual(t, t1.Entries[0].OutputKey, t2.Entries[0].OutputKey, + "Transforms built under different RemapVariables MUST have different arg-suffix OutputKeys") +} + +func TestStructuralCopyNormalized_MixedAliases(t *testing.T) { + l := newTestLoader(t) + + inner := &Object{ + HasAliases: false, + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{}}, + }, + } + outer := &Object{ + HasAliases: true, + Fields: []*Field{ + {Name: []byte("usr"), OriginalName: []byte("user"), Value: inner}, + }, + } + + parsed := astjson.MustParseBytes([]byte(`{"usr":{"id":"1"}}`)) + normalized := l.structuralCopyNormalized(parsed, outer) + assert.Equal(t, `{"user":{"id":"1"}}`, string(normalized.MarshalTo(nil))) +} + +func newTestLoader(t *testing.T) *Loader { + t.Helper() + return &Loader{ + jsonArena: arena.NewMonotonicArena(arena.WithMinBufferSize(1024)), + } +} diff --git a/v2/pkg/engine/resolve/loader_hooks_test.go b/v2/pkg/engine/resolve/loader_hooks_test.go index 11462d8c3b..e51588b5bc 100644 --- a/v2/pkg/engine/resolve/loader_hooks_test.go +++ b/v2/pkg/engine/resolve/loader_hooks_test.go @@ -104,8 +104,7 @@ func TestLoaderHooks_FetchPipeline(t *testing.T) { t.Run("Subgraph errors are available on resolve context when error propagation is disabled", func(t *testing.T) { ctrl := gomock.NewController(t) - rCtx, cancel := context.WithCancel(context.Background()) - defer cancel() + rCtx := t.Context() r := New(rCtx, ResolverOptions{ MaxConcurrency: 1024, Debug: false, @@ -152,7 +151,7 @@ func TestLoaderHooks_FetchPipeline(t *testing.T) { } buf := &bytes.Buffer{} - _, err := r.ResolveGraphQLResponse(resolveCtx, resp, nil, buf) + _, err := r.ResolveGraphQLResponse(resolveCtx, resp, buf) assert.NoError(t, err) assert.Equal(t, `{"errors":[{"message":"Failed to fetch from Subgraph 'Users' at Path 'query'."}],"data":{"name":null}}`, buf.String()) ctrl.Finish() diff --git a/v2/pkg/engine/resolve/loader_noncaching_bench_test.go b/v2/pkg/engine/resolve/loader_noncaching_bench_test.go new file mode 100644 index 0000000000..f2fe651620 --- /dev/null +++ b/v2/pkg/engine/resolve/loader_noncaching_bench_test.go @@ -0,0 +1,141 @@ +// Benchmarks for the non-caching fetch/merge path. +// +// The non-caching path has no StructuralCopy calls — the theoretical minimum +// is one parse (ParseBytesWithArena) + one merge (MergeValuesWithPath) per +// fetch. These benches measure that floor so we can identify hotspots in +// auxiliary work (res struct allocation, response buffer handling, merge +// pathology for large responses, etc.) separately from the caching work. +// +// Two shapes are measured: +// +// - BenchmarkNonCachingParseMergeCore — raw ParseBytesWithArena + +// MergeValuesWithPath, bypassing mergeResult's boilerplate. This is the +// absolute lower bound. +// - BenchmarkNonCachingMergeResult — the full mergeResult call with +// caching disabled. This includes all the non-cache branches (rejected +// check, response path extraction, error path, etc.) so the delta vs. +// Core reveals how much overhead mergeResult itself adds on the hot +// non-caching path. +// +// Usage: +// +// go test -run=^$ -bench BenchmarkNonCaching -benchmem ./v2/pkg/engine/resolve/... +package resolve + +import ( + "context" + "strconv" + "strings" + "testing" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" +) + +var benchNonCachingEntityCounts = []int{1, 10, 100} + +// buildNonCachingResponse returns a realistic subgraph JSON response wrapping +// N entities under data.users. +func buildNonCachingResponse(n int) []byte { + var sb strings.Builder + sb.WriteString(`{"data":{"users":[`) + for i := range n { + if i > 0 { + sb.WriteByte(',') + } + sb.Write(benchCopyEntityJSON(strconv.Itoa(i))) + } + sb.WriteString(`]}}`) + return []byte(sb.String()) +} + +// BenchmarkNonCachingParseMergeCore measures the raw ParseBytesWithArena + +// MergeValuesWithPath hot loop. This is the floor — no caching, no mergeResult +// boilerplate, no error handling beyond the primitives themselves. +func BenchmarkNonCachingParseMergeCore(b *testing.B) { + for _, n := range benchNonCachingEntityCounts { + b.Run("entities="+strconv.Itoa(n), func(b *testing.B) { + responseJSON := buildNonCachingResponse(n) + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(64 * 1024)) + + b.ReportAllocs() + b.ResetTimer() + for b.Loop() { + ar.Reset() + parsed, err := astjson.ParseBytesWithArena(ar, responseJSON) + if err != nil { + b.Fatal(err) + } + responseData := parsed.Get("data") + // Root-level merge with no pre-existing items → set resolvable.data. + // Mimic the real mergeResult behavior with an empty placeholder + // to exercise MergeValuesWithPath identically to the fetch path. + item, err := astjson.ParseBytesWithArena(ar, []byte(`{}`)) + if err != nil { + b.Fatal(err) + } + _, err = astjson.MergeValuesWithPath(ar, item, responseData) + if err != nil { + b.Fatal(err) + } + } + }) + } +} + +// BenchmarkNonCachingMergeResult measures the full mergeResult path with +// caching disabled on the context. Compared to BenchmarkNonCachingParseMergeCore +// the delta reveals how much non-cache overhead mergeResult contributes. +func BenchmarkNonCachingMergeResult(b *testing.B) { + for _, n := range benchNonCachingEntityCounts { + b.Run("entities="+strconv.Itoa(n), func(b *testing.B) { + responseJSON := buildNonCachingResponse(n) + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(64 * 1024)) + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = false + ctx.ExecutionOptions.Caching.EnableL2Cache = false + resolvable := NewResolvable(ar, ResolvableOptions{}) + if err := resolvable.Init(ctx, nil, ast.OperationTypeQuery); err != nil { + b.Fatal(err) + } + l := &Loader{ + jsonArena: ar, + resolvable: resolvable, + ctx: ctx, + } + + fetchItem := &FetchItem{ + Fetch: &SingleFetch{ + FetchConfiguration: FetchConfiguration{ + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + Info: &FetchInfo{OperationType: ast.OperationTypeQuery}, + }, + } + + b.ReportAllocs() + b.ResetTimer() + for b.Loop() { + ar.Reset() + item, err := astjson.ParseBytesWithArena(ar, []byte(`{}`)) + if err != nil { + b.Fatal(err) + } + res := &result{ + out: responseJSON, + postProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + } + if err := l.mergeResult(fetchItem, res, []*astjson.Value{item}); err != nil { + b.Fatal(err) + } + } + }) + } +} diff --git a/v2/pkg/engine/resolve/loader_parallel_race_test.go b/v2/pkg/engine/resolve/loader_parallel_race_test.go new file mode 100644 index 0000000000..dbc19d00df --- /dev/null +++ b/v2/pkg/engine/resolve/loader_parallel_race_test.go @@ -0,0 +1,368 @@ +package resolve + +import ( + "context" + "net/http" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/go-arena" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/engine/datasource/httpclient" + "github.com/wundergraph/graphql-go-tools/v2/pkg/fastjsonext" +) + +// TestResolveParallel_NoConcurrentArenaRace verifies that parallel entity fetches +// with L2 caching do not race on the arena. This test exercises the goroutine code +// paths in resolveParallel Phase 2 (extractCacheKeysStrings, populateFromCache, +// DeepCopyWithTransform denormalization) which allocate from per-goroutine arenas. +// +// Run with: go test -race -run TestResolveParallel_NoConcurrentArenaRace ./v2/pkg/engine/resolve/... -v -count=1 +func TestResolveParallel_NoConcurrentArenaRace(t *testing.T) { + t.Run("parallel batch entity fetches with L2 cache miss", func(t *testing.T) { + // Scenario: Root fetch → Parallel( + // BatchEntityFetch (products subgraph, L2 miss → subgraph fetch), + // BatchEntityFetch (inventory subgraph, L2 miss → subgraph fetch), + // ) + // Both fetches run as goroutines in Phase 2, exercising arena allocations concurrently. + // With -race, this would detect if goroutines accidentally share l.jsonArena. + + productsDS := &staticDataSource{data: []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Widget"},{"__typename":"Product","id":"prod-2","name":"Gadget"}]}}`)} + inventoryDS := &staticDataSource{data: []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","inStock":true},{"__typename":"Product","id":"prod-2","inStock":false}]}}`)} + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + inventoryCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + // Run 100 iterations to increase the race window probability + for range 100 { + cache := NewFakeLoaderCache() + + rootDS := &staticDataSource{data: []byte(`{"data":{"products":[{"__typename":"Product","id":"prod-1"},{"__typename":"Product","id":"prod-2"}]}}`)} + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{{Data: []byte(`{"method":"POST","url":"http://products"}`), SegmentType: StaticSegmentType}}, + }, + }, "query"), + Parallel( + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{"method":"POST","url":"http://products","body":{"query":"names","variables":{"representations":[`), SegmentType: StaticSegmentType}}}, + Items: []InputTemplate{{Segments: []TemplateSegment{{ + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }}), + }}}}, + Separator: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`,`), SegmentType: StaticSegmentType}}}, + Footer: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`]}}}`), SegmentType: StaticSegmentType}}}, + }, + DataSource: productsDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{ + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + RootFields: []GraphCoordinate{{TypeName: "Product"}}, + ProvidesData: &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}}}, + }, + }, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + TTL: 60_000_000_000, // 60s + }, + }, "query.products", ArrayPath("products")), + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{"method":"POST","url":"http://inventory","body":{"query":"stock","variables":{"representations":[`), SegmentType: StaticSegmentType}}}, + Items: []InputTemplate{{Segments: []TemplateSegment{{ + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }}), + }}}}, + Separator: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`,`), SegmentType: StaticSegmentType}}}, + Footer: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`]}}}`), SegmentType: StaticSegmentType}}}, + }, + DataSource: inventoryDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{ + DataSourceName: "inventory", + OperationType: ast.OperationTypeQuery, + RootFields: []GraphCoordinate{{TypeName: "Product"}}, + ProvidesData: &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}}}, + {Name: []byte("inStock"), Value: &Scalar{Path: []string{"inStock"}}}, + }, + }, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "inventory", + CacheKeyTemplate: inventoryCacheKeyTemplate, + UseL1Cache: true, + TTL: 60_000_000_000, + }, + }, "query.products", ArrayPath("products")), + ), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("products"), + Value: &Array{ + Path: []string{"products"}, + Item: &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + {Name: []byte("inStock"), Value: &Boolean{Path: []string{"inStock"}}}, + }, + }, + }, + }, + }, + }, + } + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{ + jsonArena: ar, + caches: map[string]LoaderCache{"default": cache, "inventory": cache}, + entityCacheConfigs: map[string]map[string]*EntityCacheInvalidationConfig{}, + } + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"products":[{"__typename":"Product","id":"prod-1","name":"Widget","inStock":true},{"__typename":"Product","id":"prod-2","name":"Gadget","inStock":false}]}}`, out) + + loader.Free() + ar.Reset() + } + }) + + t.Run("parallel batch entity fetches with partial L2 cache hit", func(t *testing.T) { + // Scenario: Root fetch → Parallel( + // BatchEntityFetch (products subgraph, L2 hit → populateFromCache), + // BatchEntityFetch (inventory subgraph, L2 miss → subgraph fetch), + // ) + // Products fetch exercises populateFromCache (parsing cached JSON on goroutine arena). + // Inventory fetch exercises concurrent subgraph fetch alongside cache path. + + cache := NewFakeLoaderCache() + // Pre-populate L2 cache with product entities only; inventory entities are NOT cached + cache.SetRawData(`{"__typename":"Product","key":{"id":"prod-1"}}`, []byte(`{"__typename":"Product","id":"prod-1","name":"Widget"}`), 60_000_000_000) + cache.SetRawData(`{"__typename":"Product","key":{"id":"prod-2"}}`, []byte(`{"__typename":"Product","id":"prod-2","name":"Gadget"}`), 60_000_000_000) + + productCacheKeyTemplate := &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } + + productsDS := &staticDataSource{data: []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","name":"Widget"},{"__typename":"Product","id":"prod-2","name":"Gadget"}]}}`)} + inventoryDS := &staticDataSource{data: []byte(`{"data":{"_entities":[{"__typename":"Product","id":"prod-1","inStock":true},{"__typename":"Product","id":"prod-2","inStock":false}]}}`)} + + for range 100 { + rootDS := &staticDataSource{data: []byte(`{"data":{"products":[{"__typename":"Product","id":"prod-1"},{"__typename":"Product","id":"prod-2"}]}}`)} + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{{Data: []byte(`{"method":"POST","url":"http://products"}`), SegmentType: StaticSegmentType}}, + }, + }, "query"), + Parallel( + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{"method":"POST","url":"http://products","body":{"query":"names","variables":{"representations":[`), SegmentType: StaticSegmentType}}}, + Items: []InputTemplate{{Segments: []TemplateSegment{{ + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }}), + }}}}, + Separator: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`,`), SegmentType: StaticSegmentType}}}, + Footer: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`]}}}`), SegmentType: StaticSegmentType}}}, + }, + DataSource: productsDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{ + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + RootFields: []GraphCoordinate{{TypeName: "Product"}}, + ProvidesData: &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}}}, + }, + }, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + TTL: 60_000_000_000, + }, + }, "query.products", ArrayPath("products")), + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`{"method":"POST","url":"http://inventory","body":{"query":"stock","variables":{"representations":[`), SegmentType: StaticSegmentType}}}, + Items: []InputTemplate{{Segments: []TemplateSegment{{ + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }}), + }}}}, + Separator: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`,`), SegmentType: StaticSegmentType}}}, + Footer: InputTemplate{Segments: []TemplateSegment{{Data: []byte(`]}}}`), SegmentType: StaticSegmentType}}}, + }, + DataSource: inventoryDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities"}}, + Info: &FetchInfo{ + DataSourceName: "inventory", + OperationType: ast.OperationTypeQuery, + RootFields: []GraphCoordinate{{TypeName: "Product"}}, + ProvidesData: &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}}}, + {Name: []byte("inStock"), Value: &Scalar{Path: []string{"inStock"}}}, + }, + }, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + CacheKeyTemplate: productCacheKeyTemplate, + UseL1Cache: true, + TTL: 60_000_000_000, + }, + }, "query.products", ArrayPath("products")), + ), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("products"), + Value: &Array{ + Path: []string{"products"}, + Item: &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + {Name: []byte("inStock"), Value: &Boolean{Path: []string{"inStock"}}}, + }, + }, + }, + }, + }, + }, + } + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + loader := &Loader{ + jsonArena: ar, + caches: map[string]LoaderCache{"default": cache}, + entityCacheConfigs: map[string]map[string]*EntityCacheInvalidationConfig{}, + } + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors) + assert.Equal(t, `{"data":{"products":[{"__typename":"Product","id":"prod-1","name":"Widget","inStock":true},{"__typename":"Product","id":"prod-2","name":"Gadget","inStock":false}]}}`, out) + + loader.Free() + ar.Reset() + } + }) +} + +// staticDataSource returns static data for every Load call. Thread-safe. +type staticDataSource struct { + data []byte + mu sync.Mutex +} + +func (s *staticDataSource) Load(ctx context.Context, headers http.Header, input []byte) ([]byte, error) { + s.mu.Lock() + defer s.mu.Unlock() + out := make([]byte, len(s.data)) + copy(out, s.data) + return out, nil +} + +func (s *staticDataSource) LoadWithFiles(ctx context.Context, headers http.Header, input []byte, files []*httpclient.FileUpload) ([]byte, error) { + return s.Load(ctx, headers, input) +} diff --git a/v2/pkg/engine/resolve/loader_skip_fetch_test.go b/v2/pkg/engine/resolve/loader_skip_fetch_test.go new file mode 100644 index 0000000000..da6fadda57 --- /dev/null +++ b/v2/pkg/engine/resolve/loader_skip_fetch_test.go @@ -0,0 +1,961 @@ +package resolve + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "github.com/wundergraph/astjson" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/fastjsonext" +) + +// TestLoader_CanSkipFetch verifies that canSkipFetch correctly detects when all +// requested fields are already present in cached entities, avoiding unnecessary +// subgraph calls. Covers scalars, nested objects, arrays, nullability, and mutations. +func TestLoader_CanSkipFetch(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + info *FetchInfo + items []*astjson.Value + expectSkipFetch bool + }{ + { + name: "single item with Query operation", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Scalar{ + Path: []string{"id"}, + Nullable: false, + }, + }, + }, + }, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"id": "123"}`)), + }, + expectSkipFetch: true, + }, + { + name: "single item with Mutation operation", + info: &FetchInfo{ + OperationType: ast.OperationTypeMutation, + ProvidesData: &Object{ + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Scalar{ + Path: []string{"id"}, + Nullable: false, + }, + }, + }, + }, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"id": "123"}`)), + }, + expectSkipFetch: false, + }, + { + name: "single item with null type", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{Fields: []*Field{}}, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`null`)), + }, + expectSkipFetch: true, + }, + { + name: "single item with all required data", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + { + Name: []byte("user"), + Value: &Object{ + Path: []string{"user"}, + Nullable: false, + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Scalar{ + Path: []string{"id"}, + Nullable: false, + }, + }, + { + Name: []byte("name"), + Value: &Scalar{ + Path: []string{"name"}, + Nullable: false, + }, + }, + }, + }, + }, + }, + }, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"user": {"id": "123", "name": "John"}}`)), + }, + expectSkipFetch: true, + }, + { + name: "single item missing required field", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + { + Name: []byte("user"), + Value: &Object{ + Path: []string{"user"}, + Nullable: false, + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Scalar{ + Path: []string{"id"}, + Nullable: false, + }, + }, + { + Name: []byte("name"), + Value: &Scalar{ + Path: []string{"name"}, + Nullable: false, + }, + }, + }, + }, + }, + }, + }, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"user": {"id": "123"}}`)), // missing "name" + }, + expectSkipFetch: false, + }, + { + name: "single item missing nullable field", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + { + Name: []byte("user"), + Value: &Object{ + Path: []string{"user"}, + Nullable: false, + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Scalar{ + Path: []string{"id"}, + Nullable: false, + }, + }, + { + Name: []byte("email"), + Value: &Scalar{ + Path: []string{"email"}, + Nullable: true, + }, + }, + }, + }, + }, + }, + }, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"user": {"id": "123"}}`)), // missing nullable "email" + }, + expectSkipFetch: false, + }, + { + name: "single item with null value on required path", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + { + Name: []byte("user"), + Value: &Object{ + Path: []string{"user"}, + Nullable: false, + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Scalar{ + Path: []string{"id"}, + Nullable: false, + }, + }, + }, + }, + }, + }, + }, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"user": {"id": null}}`)), // null value on required field + }, + expectSkipFetch: false, + }, + { + name: "single item with null value on nullable path", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + { + Name: []byte("user"), + Value: &Object{ + Path: []string{"user"}, + Nullable: false, + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Scalar{ + Path: []string{"id"}, + Nullable: false, + }, + }, + { + Name: []byte("email"), + Value: &Scalar{ + Path: []string{"email"}, + Nullable: true, + }, + }, + }, + }, + }, + }, + }, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"user": {"id": "123", "email": null}}`)), // null value on nullable field + }, + expectSkipFetch: true, + }, + { + name: "multiple items all can be skipped", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Scalar{ + Path: []string{"id"}, + Nullable: false, + }, + }, + }, + }, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"id": "123"}`)), + astjson.MustParseBytes([]byte(`{"id": "456"}`)), + astjson.MustParseBytes([]byte(`{"id": "789"}`)), + }, + expectSkipFetch: true, + }, + { + name: "multiple items some can be skipped", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + { + Name: []byte("user"), + Value: &Object{ + Path: []string{"user"}, + Nullable: false, + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Scalar{ + Path: []string{"id"}, + Nullable: false, + }, + }, + { + Name: []byte("name"), + Value: &Scalar{ + Path: []string{"name"}, + Nullable: false, + }, + }, + }, + }, + }, + }, + }, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"user": {"id": "123", "name": "John"}}`)), // complete + astjson.MustParseBytes([]byte(`{"user": {"id": "456"}}`)), // missing name + astjson.MustParseBytes([]byte(`{"user": {"id": "789", "name": "Alice"}}`)), // complete + }, + expectSkipFetch: false, + }, + { + name: "multiple items none can be skipped", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + { + Name: []byte("user"), + Value: &Object{ + Path: []string{"user"}, + Nullable: false, + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Scalar{ + Path: []string{"id"}, + Nullable: false, + }, + }, + { + Name: []byte("name"), + Value: &Scalar{ + Path: []string{"name"}, + Nullable: false, + }, + }, + }, + }, + }, + }, + }, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"user": {"id": "123"}}`)), // missing name + astjson.MustParseBytes([]byte(`{"user": {"id": "456"}}`)), // missing name + astjson.MustParseBytes([]byte(`{"user": {"id": "789"}}`)), // missing name + }, + expectSkipFetch: false, + }, + { + name: "nullable array that is null", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + { + Name: []byte("user"), + Value: &Object{ + Path: []string{"user"}, + Nullable: false, + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Scalar{ + Path: []string{"id"}, + Nullable: false, + }, + }, + { + Name: []byte("tags"), + Value: &Array{ + Path: []string{"tags"}, + Nullable: true, + }, + }, + }, + }, + }, + }, + }, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"user": {"id": "123", "tags": null}}`)), + }, + expectSkipFetch: true, + }, + { + name: "nullable array that is empty", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + { + Name: []byte("user"), + Value: &Object{ + Path: []string{"user"}, + Nullable: false, + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Scalar{ + Path: []string{"id"}, + Nullable: false, + }, + }, + { + Name: []byte("tags"), + Value: &Array{ + Path: []string{"tags"}, + Nullable: true, + }, + }, + }, + }, + }, + }, + }, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"user": {"id": "123", "tags": []}}`)), + }, + expectSkipFetch: true, + }, + { + name: "deeply nested structure", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + { + Name: []byte("user"), + Value: &Object{ + Path: []string{"user"}, + Nullable: true, + Fields: []*Field{ + { + Name: []byte("account"), + Value: &Object{ + Path: []string{"account"}, + Nullable: true, + Fields: []*Field{ + { + Name: []byte("__typename"), + Value: &Scalar{ + Path: []string{"__typename"}, + Nullable: false, + }, + }, + { + Name: []byte("id"), + Value: &Scalar{ + Path: []string{"id"}, + Nullable: false, + }, + }, + { + Name: []byte("info"), + Value: &Object{ + Path: []string{"info"}, + Nullable: true, + Fields: []*Field{ + { + Name: []byte("a"), + Value: &Scalar{ + Path: []string{"a"}, + Nullable: false, + }, + }, + { + Name: []byte("b"), + Value: &Scalar{ + Path: []string{"b"}, + Nullable: false, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{ + "user": { + "account": { + "__typename": "Account", + "id": "123", + "info": { + "a": "valueA", + "b": "valueB" + } + } + } + }`)), + }, + expectSkipFetch: true, + }, + { + name: "nil info", + info: nil, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"id": "123"}`)), + }, + expectSkipFetch: false, + }, + { + name: "nil ProvidesData", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: nil, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"id": "123"}`)), + }, + expectSkipFetch: false, + }, + { + name: "array with scalar items - valid", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + { + Name: []byte("tags"), + Value: &Array{ + Path: []string{"tags"}, + Nullable: false, + Item: &Scalar{ + Path: []string{}, + Nullable: false, + }, + }, + }, + }, + }, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"tags": ["tag1", "tag2", "tag3"]}`)), + }, + expectSkipFetch: true, + }, + { + name: "array with scalar items - invalid (null item in non-nullable array)", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + { + Name: []byte("tags"), + Value: &Array{ + Path: []string{"tags"}, + Nullable: false, + Item: &Scalar{ + Path: []string{}, + Nullable: false, + }, + }, + }, + }, + }, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"tags": ["tag1", null, "tag3"]}`)), // null item in non-nullable array + }, + expectSkipFetch: false, + }, + { + name: "array with scalar items - valid (null item in nullable array)", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + { + Name: []byte("tags"), + Value: &Array{ + Path: []string{"tags"}, + Nullable: false, + Item: &Scalar{ + Path: []string{}, + Nullable: true, // nullable scalar items + }, + }, + }, + }, + }, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"tags": ["tag1", null, "tag3"]}`)), // null item in nullable array + }, + expectSkipFetch: true, + }, + { + name: "array with object items - valid", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + { + Name: []byte("users"), + Value: &Array{ + Path: []string{"users"}, + Nullable: false, + Item: &Object{ + Path: []string{}, + Nullable: false, + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Scalar{ + Path: []string{"id"}, + Nullable: false, + }, + }, + { + Name: []byte("name"), + Value: &Scalar{ + Path: []string{"name"}, + Nullable: false, + }, + }, + }, + }, + }, + }, + }, + }, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"users": [{"id": "1", "name": "John"}, {"id": "2", "name": "Jane"}]}`)), + }, + expectSkipFetch: true, + }, + { + name: "array with object items - invalid (missing required field)", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + { + Name: []byte("users"), + Value: &Array{ + Path: []string{"users"}, + Nullable: false, + Item: &Object{ + Path: []string{}, + Nullable: false, + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Scalar{ + Path: []string{"id"}, + Nullable: false, + }, + }, + { + Name: []byte("name"), + Value: &Scalar{ + Path: []string{"name"}, + Nullable: false, + }, + }, + }, + }, + }, + }, + }, + }, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"users": [{"id": "1", "name": "John"}, {"id": "2"}]}`)), // missing "name" field + }, + expectSkipFetch: false, + }, + { + name: "nested arrays - valid", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + { + Name: []byte("matrix"), + Value: &Array{ + Path: []string{"matrix"}, + Nullable: false, + Item: &Array{ + Path: []string{}, + Nullable: false, + Item: &Scalar{ + Path: []string{}, + Nullable: false, + }, + }, + }, + }, + }, + }, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"matrix": [["a", "b"], ["c", "d"], ["e", "f"]]}`)), + }, + expectSkipFetch: true, + }, + { + name: "nested arrays - invalid (null in inner non-nullable array)", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + { + Name: []byte("matrix"), + Value: &Array{ + Path: []string{"matrix"}, + Nullable: false, + Item: &Array{ + Path: []string{}, + Nullable: false, + Item: &Scalar{ + Path: []string{}, + Nullable: false, + }, + }, + }, + }, + }, + }, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"matrix": [["a", "b"], ["c", null], ["e", "f"]]}`)), // null in inner array + }, + expectSkipFetch: false, + }, + { + name: "array of objects with nested arrays - complex valid case", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + { + Name: []byte("groups"), + Value: &Array{ + Path: []string{"groups"}, + Nullable: false, + Item: &Object{ + Path: []string{}, + Nullable: false, + Fields: []*Field{ + { + Name: []byte("name"), + Value: &Scalar{ + Path: []string{"name"}, + Nullable: false, + }, + }, + { + Name: []byte("members"), + Value: &Array{ + Path: []string{"members"}, + Nullable: false, + Item: &Object{ + Path: []string{}, + Nullable: false, + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Scalar{ + Path: []string{"id"}, + Nullable: false, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"groups": [{"name": "admins", "members": [{"id": "1"}, {"id": "2"}]}, {"name": "users", "members": [{"id": "3"}]}]}`)), + }, + expectSkipFetch: true, + }, + { + name: "array of objects with nested arrays - complex invalid case", + info: &FetchInfo{ + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{ + Fields: []*Field{ + { + Name: []byte("groups"), + Value: &Array{ + Path: []string{"groups"}, + Nullable: false, + Item: &Object{ + Path: []string{}, + Nullable: false, + Fields: []*Field{ + { + Name: []byte("name"), + Value: &Scalar{ + Path: []string{"name"}, + Nullable: false, + }, + }, + { + Name: []byte("members"), + Value: &Array{ + Path: []string{"members"}, + Nullable: false, + Item: &Object{ + Path: []string{}, + Nullable: false, + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Scalar{ + Path: []string{"id"}, + Nullable: false, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + items: []*astjson.Value{ + astjson.MustParseBytes([]byte(`{"groups": [{"name": "admins", "members": [{"id": "1"}, {}]}, {"name": "users", "members": [{"id": "3"}]}]}`)), // missing id in one member + }, + expectSkipFetch: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + loader := &Loader{} + + // Make a copy of items to avoid mutation affecting the test data + itemsCopy := make([]*astjson.Value, len(tt.items)) + copy(itemsCopy, tt.items) + + // Create cache keys with Item set to the corresponding test items + cacheKeys := make([]*CacheKey, len(itemsCopy)) + for i, item := range itemsCopy { + cacheKeys[i] = &CacheKey{ + FromCache: item, + } + } + + // Create a result struct for canSkipFetch + res := &result{ + l1CacheKeys: cacheKeys, + } + + canSkipFetch := loader.canSkipFetch(tt.info, res) + assert.Equal(t, tt.expectSkipFetch, canSkipFetch) + }) + } +} + +// TestLoader_BatchEntityKeyEmptyListShortCircuit verifies that when the batch entity +// key argument is an empty list, the fetch is skipped entirely (no subgraph call). +// Without this, empty batches would send pointless requests to subgraphs. +func TestLoader_BatchEntityKeyEmptyListShortCircuit(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ds := NewMockDataSource(ctrl) + ds.EXPECT().Load(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("products"), + Value: &Array{ + Path: []string{"products"}, + Item: &Object{ + Fields: []*Field{ + { + Name: []byte("upc"), + Value: &String{Path: []string{"upc"}}, + }, + }, + }, + }, + }, + }, + }, + Fetches: Sequence( + Single(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: ds, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + Caching: FetchCacheConfiguration{ + BatchEntityKeyArgumentPathHint: []string{"upcs"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://products"}`), + SegmentType: StaticSegmentType, + }, + }, + }, + Info: &FetchInfo{ + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + RootFields: []GraphCoordinate{ + {TypeName: "Query", FieldName: "products"}, + }, + }, + }), + ), + } + + ctx := NewContext(context.Background()) + ctx.Variables = astjson.MustParse(`{"upcs":[]}`) + + resolvable := NewResolvable(nil, ResolvableOptions{}) + loader := &Loader{} + + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + assert.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + assert.NoError(t, err) + + assert.Equal(t, `{"data":{"products":[]}}`, fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors)) +} diff --git a/v2/pkg/engine/resolve/loader_test.go b/v2/pkg/engine/resolve/loader_test.go index cdf1789405..3c7259fbad 100644 --- a/v2/pkg/engine/resolve/loader_test.go +++ b/v2/pkg/engine/resolve/loader_test.go @@ -1020,7 +1020,7 @@ func BenchmarkLoader_LoadGraphQLResponseData(b *testing.B) { b.SetBytes(int64(len(expected))) b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { loader.Free() resolvable.Reset() err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) @@ -1495,7 +1495,6 @@ func TestRewriteErrorPaths(t *testing.T) { } for _, tc := range testCases { - tc := tc // capture range variable t.Run(tc.name, func(t *testing.T) { // Create FetchItem with the test response path elements fetchItem := &FetchItem{ @@ -1521,8 +1520,11 @@ func TestRewriteErrorPaths(t *testing.T) { for i, expectedError := range tc.expectedErrors { expectedData := expectedError.MarshalTo(nil) actualData := values[i].MarshalTo(nil) - assert.JSONEq(t, string(expectedData), string(actualData), - "Error %d should match expected", i) + assert.Equal(t, + compactJSONForAssert(t, string(expectedData)), + compactJSONForAssert(t, string(actualData)), + "Error %d should match expected", i, + ) } }) } @@ -2094,7 +2096,7 @@ func TestLoader_OptionallyOmitErrorLocations(t *testing.T) { actualJSON := inputValue.MarshalTo(nil) // Compare with expected - assert.JSONEq(t, tt.expectedJSON, string(actualJSON)) + assert.Equal(t, compactJSONForAssert(t, tt.expectedJSON), compactJSONForAssert(t, string(actualJSON))) }) } } diff --git a/v2/pkg/engine/resolve/mutation_cache_test.go b/v2/pkg/engine/resolve/mutation_cache_test.go new file mode 100644 index 0000000000..85a4bc7de9 --- /dev/null +++ b/v2/pkg/engine/resolve/mutation_cache_test.go @@ -0,0 +1,1070 @@ +package resolve + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/fastjsonext" +) + +// --------------------------------------------------------------------------- +// navigateProvidesDataToField +// --------------------------------------------------------------------------- + +// TestNavigateProvidesDataToField verifies the ProvidesData tree navigation used +// by mutation cache impact detection to find the entity object under a root field. +func TestNavigateProvidesDataToField(t *testing.T) { + t.Run("valid field name returns inner Object", func(t *testing.T) { + inner := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}}}, + {Name: []byte("username"), Value: &Scalar{Path: []string{"username"}}}, + }, + } + provides := &Object{ + Fields: []*Field{ + {Name: []byte("updateUsername"), Value: inner}, + }, + } + + got := navigateProvidesDataToField(provides, "updateUsername") + assert.Equal(t, inner, got) + }) + + t.Run("missing field name returns nil", func(t *testing.T) { + provides := &Object{ + Fields: []*Field{ + {Name: []byte("updateUsername"), Value: &Object{}}, + }, + } + + got := navigateProvidesDataToField(provides, "deleteUser") + assert.Nil(t, got) + }) + + t.Run("nil providesData returns nil", func(t *testing.T) { + got := navigateProvidesDataToField(nil, "anything") + assert.Nil(t, got) + }) + + t.Run("field value is not Object returns nil", func(t *testing.T) { + provides := &Object{ + Fields: []*Field{ + {Name: []byte("scalarField"), Value: &Scalar{Path: []string{"scalarField"}}}, + }, + } + + got := navigateProvidesDataToField(provides, "scalarField") + assert.Nil(t, got) + }) +} + +// --------------------------------------------------------------------------- +// buildEntityKeyValue (Loader method) +// --------------------------------------------------------------------------- + +// testBuildEntityKeyValue is a test helper that creates a minimal Loader +// to call the buildEntityKeyValue method. +func testBuildEntityKeyValue(ar arena.Arena, data *astjson.Value, keyFields []KeyField) *astjson.Value { + l := &Loader{jsonArena: ar} + return l.buildEntityKeyValue(data, keyFields) +} + +// TestBuildEntityKeyValue verifies that entity key construction from response data +// handles simple, composite, and nested @key fields correctly. +func TestBuildEntityKeyValue(t *testing.T) { + t.Run("simple key", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + data, err := astjson.ParseWithArena(ar, `{"id":"123","name":"Alice"}`) + require.NoError(t, err) + + result := testBuildEntityKeyValue(ar, data, []KeyField{{Name: "id"}}) + got := string(result.MarshalTo(nil)) + + assert.Equal(t, `{"id":"123"}`, got) + }) + + t.Run("composite key", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + data, err := astjson.ParseWithArena(ar, `{"id":"1","orgId":"acme","name":"Bob"}`) + require.NoError(t, err) + + result := testBuildEntityKeyValue(ar, data, []KeyField{{Name: "id"}, {Name: "orgId"}}) + got := string(result.MarshalTo(nil)) + + assert.Equal(t, `{"id":"1","orgId":"acme"}`, got) + }) + + t.Run("nested key", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + data, err := astjson.ParseWithArena(ar, `{"key":{"subId":"x"},"name":"Carol"}`) + require.NoError(t, err) + + result := testBuildEntityKeyValue(ar, data, []KeyField{ + {Name: "key", Children: []KeyField{{Name: "subId"}}}, + }) + got := string(result.MarshalTo(nil)) + + assert.Equal(t, `{"key":{"subId":"x"}}`, got) + }) + + t.Run("missing field in data omits field from output", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + data, err := astjson.ParseWithArena(ar, `{"name":"Dave"}`) + require.NoError(t, err) + + result := testBuildEntityKeyValue(ar, data, []KeyField{{Name: "id"}}) + got := string(result.MarshalTo(nil)) + + // "id" is missing in data, so it is omitted from the result + assert.Equal(t, `{}`, got) + }) +} + +// --------------------------------------------------------------------------- +// buildMutationEntityCacheKey +// --------------------------------------------------------------------------- + +// TestBuildMutationEntityCacheKey verifies that mutation cache key construction +// applies header prefix, global prefix, and L2 interceptor transformations correctly. +func TestBuildMutationEntityCacheKey(t *testing.T) { + t.Run("basic key without prefix", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + ctx := NewContext(context.Background()) + + l := &Loader{ + jsonArena: ar, + ctx: ctx, + } + + entityData, err := astjson.ParseWithArena(ar, `{"id":"1234","username":"Alice"}`) + require.NoError(t, err) + + cfg := &MutationEntityImpactConfig{ + EntityTypeName: "User", + KeyFields: []KeyField{{Name: "id"}}, + CacheName: "default", + } + info := &FetchInfo{ + DataSourceName: "accounts", + } + + got := l.buildMutationEntityCacheKey(cfg, entityData, info) + assert.Equal(t, `{"__typename":"User","key":{"id":"1234"}}`, got) + }) + + t.Run("with header prefix", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + ctx := NewContext(context.Background()) + ctx.SubgraphHeadersBuilder = &mockSubgraphHeadersBuilder{ + hashes: map[string]uint64{"accounts": 99887766}, + } + + l := &Loader{ + jsonArena: ar, + ctx: ctx, + } + + entityData, err := astjson.ParseWithArena(ar, `{"id":"1234","username":"Alice"}`) + require.NoError(t, err) + + cfg := &MutationEntityImpactConfig{ + EntityTypeName: "User", + KeyFields: []KeyField{{Name: "id"}}, + CacheName: "default", + IncludeSubgraphHeaderPrefix: true, + } + info := &FetchInfo{ + DataSourceName: "accounts", + } + + got := l.buildMutationEntityCacheKey(cfg, entityData, info) + assert.Equal(t, `99887766:{"__typename":"User","key":{"id":"1234"}}`, got) + }) + + t.Run("with interceptor", func(t *testing.T) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.L2CacheKeyInterceptor = func(_ context.Context, key string, info L2CacheKeyInterceptorInfo) string { + return "tenant-42:" + key + } + + l := &Loader{ + jsonArena: ar, + ctx: ctx, + } + + entityData, err := astjson.ParseWithArena(ar, `{"id":"1234"}`) + require.NoError(t, err) + + cfg := &MutationEntityImpactConfig{ + EntityTypeName: "User", + KeyFields: []KeyField{{Name: "id"}}, + CacheName: "default", + } + info := &FetchInfo{ + DataSourceName: "accounts", + } + + got := l.buildMutationEntityCacheKey(cfg, entityData, info) + assert.Equal(t, `tenant-42:{"__typename":"User","key":{"id":"1234"}}`, got) + }) +} + +// --------------------------------------------------------------------------- +// detectMutationEntityImpact +// --------------------------------------------------------------------------- + +// TestDetectMutationEntityImpact verifies that after a mutation completes, the resolver +// correctly detects impacted entities and invalidates/records analytics for them. +// Without this, stale cached entities would persist after mutations. +func TestDetectMutationEntityImpact(t *testing.T) { + // Helper: builds a Loader with minimal fields for detectMutationEntityImpact. + makeLoader := func(ctx *Context, cache LoaderCache, cacheName string) *Loader { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + return &Loader{ + jsonArena: ar, + ctx: ctx, + caches: map[string]LoaderCache{cacheName: cache}, + l1Cache: map[string]*astjson.Value{}, + } + } + + // Helper: builds a result with MutationEntityImpactConfig. + makeResult := func(cfg *MutationEntityImpactConfig) *result { + return &result{ + cacheConfig: FetchCacheConfiguration{ + MutationEntityImpactConfig: cfg, + }, + } + } + + // Helper: builds FetchInfo for a mutation. + makeMutationInfo := func(rootFieldName string, providesData *Object) *FetchInfo { + return &FetchInfo{ + OperationType: ast.OperationTypeMutation, + DataSourceName: "accounts", + RootFields: []GraphCoordinate{ + {TypeName: "Mutation", FieldName: rootFieldName}, + }, + ProvidesData: providesData, + } + } + + // Common ProvidesData: mutation returns an object with id and username. + entityProvidesData := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}}}, + {Name: []byte("username"), Value: &Scalar{Path: []string{"username"}}}, + }, + } + mutationProvidesData := &Object{ + Fields: []*Field{ + {Name: []byte("updateUsername"), Value: entityProvidesData}, + }, + } + + t.Run("non-mutation operation returns nil", func(t *testing.T) { + ctx := NewContext(context.Background()) + l := makeLoader(ctx, NewFakeLoaderCache(), "default") + + info := &FetchInfo{ + OperationType: ast.OperationTypeQuery, // not a mutation + } + res := makeResult(&MutationEntityImpactConfig{ + EntityTypeName: "User", + KeyFields: []KeyField{{Name: "id"}}, + CacheName: "default", + InvalidateCache: true, + }) + responseData := astjson.MustParse(`{"updateUsername":{"id":"1234","username":"NewMe"}}`) + + got := l.detectMutationEntityImpact(res, info, responseData) + assert.Nil(t, got) + }) + + t.Run("nil info returns nil", func(t *testing.T) { + ctx := NewContext(context.Background()) + l := makeLoader(ctx, NewFakeLoaderCache(), "default") + + res := makeResult(&MutationEntityImpactConfig{ + EntityTypeName: "User", + KeyFields: []KeyField{{Name: "id"}}, + CacheName: "default", + InvalidateCache: true, + }) + responseData := astjson.MustParse(`{"updateUsername":{"id":"1234","username":"NewMe"}}`) + + got := l.detectMutationEntityImpact(res, nil, responseData) + assert.Nil(t, got) + }) + + t.Run("no MutationEntityImpactConfig returns nil", func(t *testing.T) { + ctx := NewContext(context.Background()) + l := makeLoader(ctx, NewFakeLoaderCache(), "default") + + info := makeMutationInfo("updateUsername", mutationProvidesData) + res := makeResult(nil) // no config + responseData := astjson.MustParse(`{"updateUsername":{"id":"1234","username":"NewMe"}}`) + + got := l.detectMutationEntityImpact(res, info, responseData) + assert.Nil(t, got) + }) + + t.Run("InvalidateCache true deletes cache entry and returns deletedKeys", func(t *testing.T) { + cache := NewFakeLoaderCache() + // Pre-populate cache with the entity + cacheKey := `{"__typename":"User","key":{"id":"1234"}}` + _ = cache.Set(context.Background(), withCacheEntryTTL([]*CacheEntry{ + {Key: cacheKey, Value: []byte(`{"id":"1234","username":"OldMe"}`)}, + }, 0)) + cache.ClearLog() + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + ctx.initCacheAnalytics() + + l := makeLoader(ctx, cache, "default") + + cfg := &MutationEntityImpactConfig{ + EntityTypeName: "User", + KeyFields: []KeyField{{Name: "id"}}, + CacheName: "default", + InvalidateCache: true, + } + info := makeMutationInfo("updateUsername", mutationProvidesData) + res := makeResult(cfg) + + responseData, err := astjson.ParseWithArena(l.jsonArena, `{"updateUsername":{"id":"1234","username":"NewMe"}}`) + require.NoError(t, err) + + deletedKeys := l.detectMutationEntityImpact(res, info, responseData) + + // Should return the deleted key + assert.Equal(t, map[string]struct{}{cacheKey: {}}, deletedKeys) + + // Verify cache entry was actually deleted + entries, _ := cache.Get(context.Background(), []string{cacheKey}) + assert.Nil(t, entries[0], "cache entry should be deleted") + }) + + t.Run("PopulateCache true writes mutation response payload to L2", func(t *testing.T) { + // Single-subgraph mutations annotated with @cachePopulate have no follow-up + // entity fetch to inherit EnableMutationL2CachePopulation. The populate path + // inside detectSingleMutationEntityImpact must write the entity payload to L2 + // directly so a subsequent read by the same key hits cache. + cache := NewFakeLoaderCache() + cacheKey := `{"__typename":"User","key":{"id":"u-pop"}}` + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL2Cache = true + l := makeLoader(ctx, cache, "default") + + cfg := &MutationEntityImpactConfig{ + EntityTypeName: "User", + KeyFields: []KeyField{{Name: "id"}}, + CacheName: "default", + PopulateCache: true, + PopulateTTL: 60 * time.Second, + } + info := makeMutationInfo("updateUsername", mutationProvidesData) + res := makeResult(cfg) + + responseData, err := astjson.ParseWithArena(l.jsonArena, + `{"updateUsername":{"id":"u-pop","username":"PopMe"}}`) + require.NoError(t, err) + + _ = l.detectMutationEntityImpact(res, info, responseData) + + // Verify the entity payload was written to L2 under the entity cache key. + entries, err := cache.Get(context.Background(), []string{cacheKey}) + require.NoError(t, err) + require.NotNil(t, entries[0], "PopulateCache should write the entity to L2") + assert.Equal(t, `{"id":"u-pop","username":"PopMe"}`, string(entries[0].Value), + "cached payload must equal the entity projection through ProvidesData") + }) + + t.Run("PopulateCache true does not write to L2 when L2 is disabled", func(t *testing.T) { + cache := NewFakeLoaderCache() + cacheKey := `{"__typename":"User","key":{"id":"u-pop-disabled"}}` + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL2Cache = false + l := makeLoader(ctx, cache, "default") + + cfg := &MutationEntityImpactConfig{ + EntityTypeName: "User", + KeyFields: []KeyField{{Name: "id"}}, + CacheName: "default", + PopulateCache: true, + PopulateTTL: 60 * time.Second, + } + info := makeMutationInfo("updateUsername", mutationProvidesData) + res := makeResult(cfg) + + responseData, err := astjson.ParseWithArena(l.jsonArena, + `{"updateUsername":{"id":"u-pop-disabled","username":"PopMe"}}`) + require.NoError(t, err) + + _ = l.detectMutationEntityImpact(res, info, responseData) + + entries, err := cache.Get(context.Background(), []string{cacheKey}) + require.NoError(t, err) + assert.Nil(t, entries[0], "PopulateCache must respect EnableL2Cache=false") + }) + + t.Run("PopulateCache false does not write to L2", func(t *testing.T) { + // Defensive: when neither PopulateCache nor InvalidateCache is set and + // analytics is off, detectMutationEntityImpact must not touch the cache. + cache := NewFakeLoaderCache() + + ctx := NewContext(context.Background()) + l := makeLoader(ctx, cache, "default") + + cfg := &MutationEntityImpactConfig{ + EntityTypeName: "User", + KeyFields: []KeyField{{Name: "id"}}, + CacheName: "default", + // PopulateCache: false, InvalidateCache: false, no analytics + } + info := makeMutationInfo("updateUsername", mutationProvidesData) + res := makeResult(cfg) + + responseData, err := astjson.ParseWithArena(l.jsonArena, + `{"updateUsername":{"id":"u1","username":"NoPop"}}`) + require.NoError(t, err) + + _ = l.detectMutationEntityImpact(res, info, responseData) + + // Cache must be untouched. + assert.Empty(t, cache.GetLog(), "with no impact config flags set, cache must not be touched") + }) + + t.Run("analytics enabled, no cached value records MutationEvent with HadCachedValue=false", func(t *testing.T) { + cache := NewFakeLoaderCache() // empty cache + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + ctx.initCacheAnalytics() + + l := makeLoader(ctx, cache, "default") + + cfg := &MutationEntityImpactConfig{ + EntityTypeName: "User", + KeyFields: []KeyField{{Name: "id"}}, + CacheName: "default", + InvalidateCache: true, + } + info := makeMutationInfo("updateUsername", mutationProvidesData) + res := makeResult(cfg) + + responseData, err := astjson.ParseWithArena(l.jsonArena, `{"updateUsername":{"id":"1234","username":"NewMe"}}`) + require.NoError(t, err) + + _ = l.detectMutationEntityImpact(res, info, responseData) + + stats := ctx.GetCacheStats() + require.Len(t, stats.MutationEvents, 1) + + event := stats.MutationEvents[0] + assert.Equal(t, "updateUsername", event.MutationRootField) + assert.Equal(t, "User", event.EntityType) + assert.Equal(t, `{"__typename":"User","key":{"id":"1234"}}`, event.EntityCacheKey) // display key (no prefix) + assert.Equal(t, false, event.HadCachedValue) // no cached value in empty cache + assert.Equal(t, false, event.IsStale) + assert.Equal(t, uint64(0), event.CachedHash) // zero because no cached value + assert.NotEqual(t, uint64(0), event.FreshHash) + assert.Equal(t, 0, event.CachedBytes) + assert.NotEqual(t, 0, event.FreshBytes) + }) + + t.Run("analytics enabled still avoids mutation-time cache reads for stale entries", func(t *testing.T) { + cache := NewFakeLoaderCache() + cacheKey := `{"__typename":"User","key":{"id":"1234"}}` + // Cached value has username="OldMe" (differs from mutation response) + _ = cache.Set(context.Background(), withCacheEntryTTL([]*CacheEntry{ + {Key: cacheKey, Value: []byte(`{"id":"1234","username":"OldMe"}`)}, + }, 0)) + cache.ClearLog() + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + ctx.initCacheAnalytics() + + l := makeLoader(ctx, cache, "default") + + cfg := &MutationEntityImpactConfig{ + EntityTypeName: "User", + KeyFields: []KeyField{{Name: "id"}}, + CacheName: "default", + InvalidateCache: true, + } + info := makeMutationInfo("updateUsername", mutationProvidesData) + res := makeResult(cfg) + + responseData, err := astjson.ParseWithArena(l.jsonArena, `{"updateUsername":{"id":"1234","username":"NewMe"}}`) + require.NoError(t, err) + + _ = l.detectMutationEntityImpact(res, info, responseData) + + stats := ctx.GetCacheStats() + require.Len(t, stats.MutationEvents, 1) + + event := stats.MutationEvents[0] + assert.Equal(t, "updateUsername", event.MutationRootField) + assert.Equal(t, "User", event.EntityType) + assert.Equal(t, false, event.HadCachedValue) + assert.Equal(t, false, event.IsStale) + assert.Equal(t, uint64(0), event.CachedHash) + assert.NotEqual(t, uint64(0), event.FreshHash) + assert.Equal(t, 0, event.CachedBytes) + assert.NotEqual(t, 0, event.FreshBytes) + assert.Equal(t, []CacheLogEntry{{Operation: "delete", Items: []CacheLogItem{{Key: cacheKey}}}}, cache.GetLog()) + }) + + t.Run("analytics enabled still avoids mutation-time cache reads for fresh entries", func(t *testing.T) { + cache := NewFakeLoaderCache() + cacheKey := `{"__typename":"User","key":{"id":"1234"}}` + // Cached value matches the mutation response exactly + _ = cache.Set(context.Background(), withCacheEntryTTL([]*CacheEntry{ + {Key: cacheKey, Value: []byte(`{"id":"1234","username":"NewMe"}`)}, + }, 0)) + cache.ClearLog() + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + ctx.initCacheAnalytics() + + l := makeLoader(ctx, cache, "default") + + cfg := &MutationEntityImpactConfig{ + EntityTypeName: "User", + KeyFields: []KeyField{{Name: "id"}}, + CacheName: "default", + InvalidateCache: true, + } + info := makeMutationInfo("updateUsername", mutationProvidesData) + res := makeResult(cfg) + + responseData, err := astjson.ParseWithArena(l.jsonArena, `{"updateUsername":{"id":"1234","username":"NewMe"}}`) + require.NoError(t, err) + + _ = l.detectMutationEntityImpact(res, info, responseData) + + stats := ctx.GetCacheStats() + require.Len(t, stats.MutationEvents, 1) + + event := stats.MutationEvents[0] + assert.Equal(t, "updateUsername", event.MutationRootField) + assert.Equal(t, "User", event.EntityType) + assert.Equal(t, false, event.HadCachedValue) + assert.Equal(t, false, event.IsStale) + assert.Equal(t, uint64(0), event.CachedHash) + assert.Equal(t, 0, event.CachedBytes) + assert.NotEqual(t, 0, event.FreshBytes) + assert.Equal(t, []CacheLogEntry{{Operation: "delete", Items: []CacheLogItem{{Key: cacheKey}}}}, cache.GetLog()) + }) + + t.Run("InvalidateCache false with analytics records event but no Delete", func(t *testing.T) { + cache := NewFakeLoaderCache() + cacheKey := `{"__typename":"User","key":{"id":"1234"}}` + _ = cache.Set(context.Background(), withCacheEntryTTL([]*CacheEntry{ + {Key: cacheKey, Value: []byte(`{"id":"1234","username":"OldMe"}`)}, + }, 0)) + cache.ClearLog() + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + ctx.initCacheAnalytics() + + l := makeLoader(ctx, cache, "default") + + cfg := &MutationEntityImpactConfig{ + EntityTypeName: "User", + KeyFields: []KeyField{{Name: "id"}}, + CacheName: "default", + InvalidateCache: false, // no deletion + } + info := makeMutationInfo("updateUsername", mutationProvidesData) + res := makeResult(cfg) + + responseData, err := astjson.ParseWithArena(l.jsonArena, `{"updateUsername":{"id":"1234","username":"NewMe"}}`) + require.NoError(t, err) + + deletedKeys := l.detectMutationEntityImpact(res, info, responseData) + assert.Nil(t, deletedKeys, "no keys should be deleted when InvalidateCache=false") + + // Verify mutation analytics does not issue a cache read. + log := cache.GetLog() + require.Len(t, log, 0, "mutation impact analytics must not read from cache") + + // Verify cache entry still exists + entries, _ := cache.Get(context.Background(), []string{cacheKey}) + assert.NotNil(t, entries[0], "cache entry should still exist") + + // Verify MutationEvent was recorded + stats := ctx.GetCacheStats() + require.Len(t, stats.MutationEvents, 1) + assert.Equal(t, false, stats.MutationEvents[0].HadCachedValue) + assert.Equal(t, false, stats.MutationEvents[0].IsStale) + }) + + t.Run("no caches map returns nil", func(t *testing.T) { + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + ctx.initCacheAnalytics() + + l := &Loader{ + jsonArena: arena.NewMonotonicArena(arena.WithMinBufferSize(1024)), + ctx: ctx, + caches: nil, // no caches + } + + cfg := &MutationEntityImpactConfig{ + EntityTypeName: "User", + KeyFields: []KeyField{{Name: "id"}}, + CacheName: "default", + InvalidateCache: true, + } + info := makeMutationInfo("updateUsername", mutationProvidesData) + res := makeResult(cfg) + + responseData := astjson.MustParse(`{"updateUsername":{"id":"1234","username":"NewMe"}}`) + + got := l.detectMutationEntityImpact(res, info, responseData) + assert.Nil(t, got) + }) + + t.Run("nil ProvidesData returns nil", func(t *testing.T) { + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + ctx.initCacheAnalytics() + + l := makeLoader(ctx, NewFakeLoaderCache(), "default") + + cfg := &MutationEntityImpactConfig{ + EntityTypeName: "User", + KeyFields: []KeyField{{Name: "id"}}, + CacheName: "default", + InvalidateCache: true, + } + info := &FetchInfo{ + OperationType: ast.OperationTypeMutation, + DataSourceName: "accounts", + RootFields: []GraphCoordinate{ + {TypeName: "Mutation", FieldName: "updateUsername"}, + }, + ProvidesData: nil, // no ProvidesData + } + res := makeResult(cfg) + + responseData := astjson.MustParse(`{"updateUsername":{"id":"1234","username":"NewMe"}}`) + + got := l.detectMutationEntityImpact(res, info, responseData) + assert.Nil(t, got) + }) + + t.Run("response data not an object returns nil", func(t *testing.T) { + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + ctx.initCacheAnalytics() + + l := makeLoader(ctx, NewFakeLoaderCache(), "default") + + cfg := &MutationEntityImpactConfig{ + EntityTypeName: "User", + KeyFields: []KeyField{{Name: "id"}}, + CacheName: "default", + InvalidateCache: true, + } + info := makeMutationInfo("updateUsername", mutationProvidesData) + res := makeResult(cfg) + + // Mutation returns a string instead of object + responseData := astjson.MustParse(`{"updateUsername":"not-an-object"}`) + + got := l.detectMutationEntityImpact(res, info, responseData) + assert.Nil(t, got) + }) + + t.Run("array response invalidates all entities in the list", func(t *testing.T) { + cache := NewFakeLoaderCache() + // Pre-populate cache with two entities + cacheKey1 := `{"__typename":"User","key":{"id":"1"}}` + cacheKey2 := `{"__typename":"User","key":{"id":"2"}}` + _ = cache.Set(context.Background(), withCacheEntryTTL([]*CacheEntry{ + {Key: cacheKey1, Value: []byte(`{"id":"1","username":"Alice"}`)}, + {Key: cacheKey2, Value: []byte(`{"id":"2","username":"Bob"}`)}, + }, 0)) + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableCacheAnalytics = true + ctx.initCacheAnalytics() + + l := makeLoader(ctx, cache, "default") + + // ProvidesData for a list mutation: {deleteUsers: [{id, username}]} + listEntityProvidesData := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}}}, + {Name: []byte("username"), Value: &Scalar{Path: []string{"username"}}}, + }, + } + listMutationProvidesData := &Object{ + Fields: []*Field{ + {Name: []byte("deleteUsers"), Value: listEntityProvidesData}, + }, + } + + cfg := &MutationEntityImpactConfig{ + EntityTypeName: "User", + KeyFields: []KeyField{{Name: "id"}}, + CacheName: "default", + InvalidateCache: true, + } + info := makeMutationInfo("deleteUsers", listMutationProvidesData) + res := makeResult(cfg) + + // Mutation returns an array of entities + responseData, err := astjson.ParseWithArena(l.jsonArena, `{"deleteUsers":[{"id":"1","username":"Alice"},{"id":"2","username":"Bob"}]}`) + require.NoError(t, err) + + deletedKeys := l.detectMutationEntityImpact(res, info, responseData) + + // Both entities should be invalidated + assert.Equal(t, map[string]struct{}{cacheKey1: {}, cacheKey2: {}}, deletedKeys) + + // Verify both cache entries were deleted + entries, _ := cache.Get(context.Background(), []string{cacheKey1, cacheKey2}) + assert.Nil(t, entries[0], "first entity should be deleted") + assert.Nil(t, entries[1], "second entity should be deleted") + + // Verify analytics recorded events for both entities + stats := ctx.GetCacheStats() + require.Len(t, stats.MutationEvents, 2, "should record mutation event for each entity in the list") + assert.Equal(t, cacheKey1, stats.MutationEvents[0].EntityCacheKey) + assert.Equal(t, false, stats.MutationEvents[0].HadCachedValue) + assert.Equal(t, cacheKey2, stats.MutationEvents[1].EntityCacheKey) + assert.Equal(t, false, stats.MutationEvents[1].HadCachedValue) + }) + + t.Run("array response with non-object items skips them", func(t *testing.T) { + cache := NewFakeLoaderCache() + cacheKey := `{"__typename":"User","key":{"id":"1"}}` + _ = cache.Set(context.Background(), withCacheEntryTTL([]*CacheEntry{ + {Key: cacheKey, Value: []byte(`{"id":"1","username":"Alice"}`)}, + }, 0)) + + ctx := NewContext(context.Background()) + l := makeLoader(ctx, cache, "default") + + listEntityProvidesData := &Object{ + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{Path: []string{"id"}}}, + {Name: []byte("username"), Value: &Scalar{Path: []string{"username"}}}, + }, + } + listMutationProvidesData := &Object{ + Fields: []*Field{ + {Name: []byte("deleteUsers"), Value: listEntityProvidesData}, + }, + } + + cfg := &MutationEntityImpactConfig{ + EntityTypeName: "User", + KeyFields: []KeyField{{Name: "id"}}, + CacheName: "default", + InvalidateCache: true, + } + info := makeMutationInfo("deleteUsers", listMutationProvidesData) + res := makeResult(cfg) + + // Array with mixed types: one valid object, one null, one string + responseData, err := astjson.ParseWithArena(l.jsonArena, `{"deleteUsers":[{"id":"1","username":"Alice"},null,"invalid"]}`) + require.NoError(t, err) + + deletedKeys := l.detectMutationEntityImpact(res, info, responseData) + + // Only the valid object entity should be invalidated + assert.Equal(t, map[string]struct{}{cacheKey: {}}, deletedKeys) + }) +} + +// --------------------------------------------------------------------------- +// MutationCacheTTLOverride +// --------------------------------------------------------------------------- + +// TestMutationCacheTTLOverride verifies that MutationCacheTTLOverride takes precedence +// over the entity's default TTL when mutations populate L2 cache. +// Without this, mutation-written cache entries could have inappropriately long TTLs. +func TestMutationCacheTTLOverride(t *testing.T) { + t.Run("mutation with TTL override uses override value", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"updateUser":{"__typename":"User","id":"u1"}}}`), nil + }).Times(1) + + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"name":"Alice"}]}}`), nil + }).Times(1) + + response := buildMutationTTLResponse( + rootDS, entityDS, + newMutationUserCacheKeyTemplate(), newMutationUserProvidesData(), + true, // enableL2Population + 60*time.Second, // mutationTTLOverride + 300*time.Second, // entityTTL (entity default) + ) + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeMutation) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := string(fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors)) + assert.Equal(t, `{"data":{"updateUser":{"__typename":"User","id":"u1","name":"Alice"}}}`, out) + + // No L2 "get" because mutations skip L2 reads (AC-MUT-01). + // L2 Set uses override TTL (60s), not entity default (300s), + // because EnableMutationL2CachePopulation=true and MutationCacheTTLOverride=60s. + cacheLog := cache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"u1"}}`, TTL: 60 * time.Second}}}, // L2 write uses mutation TTL override (60s), not entity default (300s); no prior "get" because mutations skip L2 reads + }, cacheLog) + }) + + t.Run("mutation without TTL override uses entity default", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"updateUser":{"__typename":"User","id":"u1"}}}`), nil + }).Times(1) + + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"name":"Bob"}]}}`), nil + }).Times(1) + + response := buildMutationTTLResponse( + rootDS, entityDS, + newMutationUserCacheKeyTemplate(), newMutationUserProvidesData(), + true, // enableL2Population + 0, // mutationTTLOverride=0 means no override + 300*time.Second, // entityTTL (entity default) + ) + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeMutation) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := string(fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors)) + assert.Equal(t, `{"data":{"updateUser":{"__typename":"User","id":"u1","name":"Bob"}}}`, out) + + // No L2 "get" because mutations skip L2 reads (AC-MUT-01). + // L2 Set uses entity default TTL (300s) because MutationCacheTTLOverride=0. + cacheLog := cache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"User","key":{"id":"u1"}}`, TTL: 300 * time.Second}}}, // L2 write uses entity default TTL (300s); no mutation override (MutationCacheTTLOverride=0) + }, cacheLog) + }) + + t.Run("TTL override not applied when mutation L2 population disabled", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"updateUser":{"__typename":"User","id":"u1"}}}`), nil + }).Times(1) + + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[{"name":"Carol"}]}}`), nil + }).Times(1) + + response := buildMutationTTLResponse( + rootDS, entityDS, + newMutationUserCacheKeyTemplate(), newMutationUserProvidesData(), + false, // enableL2Population=false — mutations do NOT write to L2 + 60*time.Second, // mutationTTLOverride is set but irrelevant since L2 writes are disabled + 300*time.Second, // entityTTL + ) + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeMutation) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + out := string(fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors)) + assert.Equal(t, `{"data":{"updateUser":{"__typename":"User","id":"u1","name":"Carol"}}}`, out) + + // No L2 operations at all — mutations skip L2 entirely when EnableMutationL2CachePopulation=false + cacheLog := cache.GetLog() + assert.Equal(t, []CacheLogEntry{}, cacheLog) + }) +} + +// --------------------------------------------------------------------------- +// Helpers for mutation cache tests +// --------------------------------------------------------------------------- + +// buildMutationTTLResponse creates a GraphQLResponse for testing mutation TTL override. +// The root fetch is a mutation that sets EnableMutationL2CachePopulation and MutationCacheTTLOverride +// on the Loader. The entity fetch that follows inherits these flags via resolveSingle propagation. +func buildMutationTTLResponse( + rootDS, entityDS DataSource, + cacheKeyTemplate CacheKeyTemplate, + providesData *Object, + enableL2Population bool, + mutationTTLOverride time.Duration, + entityTTL time.Duration, +) *GraphQLResponse { + return &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeMutation}, + Fetches: Sequence( + // Root mutation fetch — propagates EnableMutationL2CachePopulation and MutationCacheTTLOverride to Loader + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data"}}, + Caching: FetchCacheConfiguration{ + EnableMutationL2CachePopulation: enableL2Population, + MutationCacheTTLOverride: mutationTTLOverride, + }, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://accounts.service","body":{"query":"mutation{updateUser(id:\"u1\",name:\"Alice\"){__typename id}}"}}`), SegmentType: StaticSegmentType}, + }}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + Info: &FetchInfo{ + DataSourceID: "accounts", DataSourceName: "accounts", + RootFields: []GraphCoordinate{{TypeName: "Mutation", FieldName: "updateUser"}}, + OperationType: ast.OperationTypeMutation, + }, + }, "mutation"), + + // Entity fetch — inherits mutation L2 flags, uses caching config with entity TTL + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{SelectResponseDataPath: []string{"data", "_entities", "0"}}, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: entityTTL, + CacheKeyTemplate: cacheKeyTemplate, + UseL1Cache: true, + }, + }, + InputTemplate: InputTemplate{Segments: []TemplateSegment{ + {Data: []byte(`{"method":"POST","url":"http://accounts.service","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){... on User {name}}}","variables":{"representations":[`), SegmentType: StaticSegmentType}, + {SegmentType: VariableSegmentType, VariableKind: ResolvableObjectVariableKind, Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + })}, + {Data: []byte(`]}}}`), SegmentType: StaticSegmentType}, + }}, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + Info: &FetchInfo{ + DataSourceID: "accounts", DataSourceName: "accounts", + RootFields: []GraphCoordinate{{TypeName: "User", FieldName: "name"}}, + OperationType: ast.OperationTypeQuery, // Entity fetches resolve from non-root types, so planner sets Query + ProvidesData: providesData, + }, + }, "mutation.updateUser", ObjectPath("updateUser")), + ), + Data: &Object{ + Fields: []*Field{{ + Name: []byte("updateUser"), + Value: &Object{ + Path: []string{"updateUser"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}}}, + }, + }, + }}, + }, + } +} + +// newMutationUserCacheKeyTemplate returns a cache key template for User entities in mutation tests. +func newMutationUserCacheKeyTemplate() CacheKeyTemplate { + return &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + } +} + +// newMutationUserProvidesData returns a ProvidesData for User entities in mutation tests. +func newMutationUserProvidesData() *Object { + return &Object{ + Fields: []*Field{ + {Name: []byte("name"), Value: &Scalar{Path: []string{"name"}, Nullable: false}}, + }, + } +} diff --git a/v2/pkg/engine/resolve/negative_cache_test.go b/v2/pkg/engine/resolve/negative_cache_test.go new file mode 100644 index 0000000000..27681a59cd --- /dev/null +++ b/v2/pkg/engine/resolve/negative_cache_test.go @@ -0,0 +1,953 @@ +package resolve + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" + "github.com/wundergraph/graphql-go-tools/v2/pkg/fastjsonext" +) + +// newNegativeCacheProductProvidesData returns a ProvidesData object for negative cache tests. +// Uses only "name" since that's what the entity fetch requests (unlike the interceptor +// helper which includes "id" + "name"). +func newNegativeCacheProductProvidesData() *Object { + return &Object{ + Fields: []*Field{ + { + Name: []byte("name"), + Value: &Scalar{ + Path: []string{"name"}, + Nullable: false, + }, + }, + }, + } +} + +// newNegativeCacheEntitySegments returns input template segments for negative cache entity fetches. +func newNegativeCacheEntitySegments() []TemplateSegment { + return []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://products.service","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){... on Product {name}}}","variables":{"representations":[`), + SegmentType: StaticSegmentType, + }, + { + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{Path: []string{"__typename"}}}, + {Name: []byte("id"), Value: &String{Path: []string{"id"}}}, + }, + }), + }, + { + Data: []byte(`]}}}`), + SegmentType: StaticSegmentType, + }, + } +} + +// TestNegativeCache_NullEntityBehavior verifies the negative cache lifecycle: storing +// null entity results as sentinels, serving them on subsequent requests, TTL behavior, +// mutation interaction, and overwriting sentinels with real data after TTL expiry. +func TestNegativeCache_NullEntityBehavior(t *testing.T) { + t.Run("null entity stored as negative sentinel and served on second request", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + // Root fetch provides the product reference + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).AnyTimes() + + // Entity fetch returns null (entity not found in this subgraph) + productDS := NewMockDataSource(ctrl) + productDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[null]}}`), nil + }).Times(1) // Only called ONCE — second request uses negative cache + + cacheKeyTemplate := newProductCacheKeyTemplate() + providesData := newNegativeCacheProductProvidesData() + + buildResponse := func() *GraphQLResponse { + return &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Fetches: Sequence( + // Root fetch to populate product reference + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{product {__typename id}}"}}`), + SegmentType: StaticSegmentType, + }, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + + // Entity fetch that returns null + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: productDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: cacheKeyTemplate, + NegativeCacheTTL: 10 * time.Second, + }, + }, + InputTemplate: InputTemplate{ + Segments: newNegativeCacheEntitySegments(), + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Nullable: true, + Fields: []*Field{ + { + Name: []byte("name"), + Value: &String{ + Path: []string{"name"}, + Nullable: true, + }, + }, + }, + }, + }, + }, + }, + } + } + + execute := func() string { + loader := &Loader{ + caches: map[string]LoaderCache{ + "default": cache, + }, + } + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, buildResponse(), resolvable) + require.NoError(t, err) + + return string(fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors)) + } + + // First execution: subgraph is called, returns null + out1 := execute() + t.Logf("First output: %s", out1) + + // Verify the null sentinel was stored in L2 + cacheLog := cache.GetLog() + var setFound bool + for _, entry := range cacheLog { + if entry.Operation == "set" { + for _, item := range entry.Items { + t.Logf("Stored cache key: %s", item.Key) + } + setFound = true + } + } + assert.True(t, setFound) + + // Find the last set operation's first key and verify stored value is "null" + for i := len(cacheLog) - 1; i >= 0; i-- { + if cacheLog[i].Operation == "set" && len(cacheLog[i].Items) > 0 { + storedValue := cache.GetValue(cacheLog[i].Items[0].Key) + assert.Equal(t, "null", string(storedValue)) + break + } + } + + cache.ClearLog() + + // Second execution: should NOT call the subgraph (negative cache hit) + out2 := execute() + t.Logf("Second output: %s", out2) + + // Verify L2 cache was read (GET) and returned a hit + cacheLog2 := cache.GetLog() + var getFound bool + for _, entry := range cacheLog2 { + if entry.Operation == "get" { + for _, item := range entry.Items { + t.Logf("Cache key %s: hit=%v", item.Key, item.Hit) + if item.Hit { + getFound = true + } + } + } + } + assert.True(t, getFound) + }) + + t.Run("negative caching disabled when NegativeCacheTTL is 0", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + // Root fetch provides the product reference + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).AnyTimes() + + // Subgraph returns null both times — no negative caching + productDS := NewMockDataSource(ctrl) + productDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[null]}}`), nil + }).Times(2) // Called TWICE because negative caching is disabled + + cacheKeyTemplate := newProductCacheKeyTemplate() + providesData := newNegativeCacheProductProvidesData() + + buildResponse := func() *GraphQLResponse { + return &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{product {__typename id}}"}}`), + SegmentType: StaticSegmentType, + }, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: productDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: cacheKeyTemplate, + NegativeCacheTTL: 0, // Negative caching disabled + }, + }, + InputTemplate: InputTemplate{ + Segments: newNegativeCacheEntitySegments(), + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Nullable: true, + Fields: []*Field{ + { + Name: []byte("name"), + Value: &String{ + Path: []string{"name"}, + Nullable: true, + }, + }, + }, + }, + }, + }, + }, + } + } + + execute := func() { + loader := &Loader{ + caches: map[string]LoaderCache{ + "default": cache, + }, + } + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, buildResponse(), resolvable) + require.NoError(t, err) + } + + // Both calls should hit the subgraph (no negative caching) + execute() + cache.ClearLog() + execute() + // gomock verifies Times(2) — both calls went to subgraph + }) + + t.Run("negative cache sentinel uses NegativeCacheTTL not regular TTL", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + // Root fetch provides the product reference + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + // Entity fetch returns null + productDS := NewMockDataSource(ctrl) + productDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[null]}}`), nil + }).Times(1) + + cacheKeyTemplate := newProductCacheKeyTemplate() + providesData := newNegativeCacheProductProvidesData() + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{product {__typename id}}"}}`), + SegmentType: StaticSegmentType, + }, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: productDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 60 * time.Second, + CacheKeyTemplate: cacheKeyTemplate, + NegativeCacheTTL: 5 * time.Second, // Much shorter than regular TTL + }, + }, + InputTemplate: InputTemplate{ + Segments: newNegativeCacheEntitySegments(), + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Nullable: true, + Fields: []*Field{ + { + Name: []byte("name"), + Value: &String{ + Path: []string{"name"}, + Nullable: true, + }, + }, + }, + }, + }, + }, + }, + } + + loader := &Loader{ + caches: map[string]LoaderCache{ + "default": cache, + }, + } + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + // Verify the TTL used for the negative sentinel + cacheLog := cache.GetLog() + for _, entry := range cacheLog { + if entry.Operation == "set" { + t.Logf("Set: items=%v", entry.Items) + // The negative sentinel should use NegativeCacheTTL (5s), not regular TTL (60s) + // Negative sentinel should use NegativeCacheTTL (5s), not regular TTL (60s) + assert.Equal(t, 5*time.Second, entry.Items[0].TTL) + } + } + }) + + t.Run("negative cache with mutation population stores sentinel with NegativeCacheTTL", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + // Root mutation fetch + mutationDS := NewMockDataSource(ctrl) + mutationDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"createProduct":{"__typename":"Product","id":"prod-new"}}}`), nil + }).Times(1) + + // Entity fetch returns null (entity not found after creation — edge case) + productDS := NewMockDataSource(ctrl) + productDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[null]}}`), nil + }).Times(1) + + cacheKeyTemplate := newProductCacheKeyTemplate() + providesData := newNegativeCacheProductProvidesData() + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeMutation, + }, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: mutationDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + Caching: FetchCacheConfiguration{ + EnableMutationL2CachePopulation: true, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://mutation.service","body":{"query":"mutation{createProduct{__typename id}}"}}`), + SegmentType: StaticSegmentType, + }, + }, + }, + Info: &FetchInfo{ + DataSourceID: "mutations", + DataSourceName: "mutations", + OperationType: ast.OperationTypeMutation, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "mutation"), + + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: productDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 60 * time.Second, + CacheKeyTemplate: cacheKeyTemplate, + NegativeCacheTTL: 10 * time.Second, + }, + }, + InputTemplate: InputTemplate{ + Segments: newNegativeCacheEntitySegments(), + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, // Entity fetch within mutation gets Query type + ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "mutation.createProduct", ObjectPath("createProduct")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("createProduct"), + Value: &Object{ + Path: []string{"createProduct"}, + Nullable: true, + Fields: []*Field{ + { + Name: []byte("name"), + Value: &String{ + Path: []string{"name"}, + Nullable: true, + }, + }, + }, + }, + }, + }, + }, + } + + loader := &Loader{ + caches: map[string]LoaderCache{ + "default": cache, + }, + } + + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeMutation) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + // Verify the full cache log: no L2 read (mutations skip L2 reads per AC-MUT-01), + // only the negative sentinel write with NegativeCacheTTL (10s) + cacheLog := cache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "set", Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"id":"prod-new"}}`, TTL: 10 * time.Second}}}, // Negative sentinel stored with NegativeCacheTTL (10s), not entity TTL (60s); no prior "get" because mutations skip L2 reads + }, cacheLog) + + // Verify the stored value is the null sentinel + storedValue := cache.GetValue(`{"__typename":"Product","key":{"id":"prod-new"}}`) + assert.Equal(t, "null", string(storedValue)) + }) + + t.Run("negative cache entry overwritten by real data on subsequent fetch", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + // Root fetch provides the product reference + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).AnyTimes() + + callCount := 0 + // Entity fetch: first call returns null, second returns real data + productDS := NewMockDataSource(ctrl) + productDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + callCount++ + if callCount == 1 { + return []byte(`{"data":{"_entities":[null]}}`), nil + } + return []byte(`{"data":{"_entities":[{"name":"Widget"}]}}`), nil + }).Times(2) // Called twice: first stores null, second after cache eviction stores real data + + cacheKeyTemplate := newProductCacheKeyTemplate() + providesData := newNegativeCacheProductProvidesData() + + buildResponse := func() *GraphQLResponse { + return &GraphQLResponse{ + Info: &GraphQLResponseInfo{ + OperationType: ast.OperationTypeQuery, + }, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{product {__typename id}}"}}`), + SegmentType: StaticSegmentType, + }, + }, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: productDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: cacheKeyTemplate, + NegativeCacheTTL: 5 * time.Second, + }, + }, + InputTemplate: InputTemplate{ + Segments: newNegativeCacheEntitySegments(), + }, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: providesData, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Nullable: true, + Fields: []*Field{ + { + Name: []byte("name"), + Value: &String{ + Path: []string{"name"}, + Nullable: true, + }, + }, + }, + }, + }, + }, + }, + } + } + + execute := func() string { + loader := &Loader{ + caches: map[string]LoaderCache{ + "default": cache, + }, + } + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, buildResponse(), resolvable) + require.NoError(t, err) + + return string(fastjsonext.PrintGraphQLResponse(resolvable.data, resolvable.errors)) + } + + // Request 1: returns null for the entity fetch → product has __typename/id from root but no "name" + out1 := execute() + // First request: only root fields, no entity data (null entity) + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`, out1) + + productKey := `{"__typename":"Product","key":{"id":"prod-1"}}` + + // Verify request 1 cache log: L2 miss → negative sentinel stored + cacheLog := cache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: false}}}, // L2 miss: cache empty on first request + {Operation: "set", Items: []CacheLogItem{{Key: productKey, TTL: 5 * time.Second}}}, // Negative sentinel stored with NegativeCacheTTL (5s) + }, cacheLog) + + // Evict the negative sentinel to simulate TTL expiry + _ = cache.Delete(context.Background(), []string{productKey}) + cache.ClearLog() + + // Request 2: negative sentinel evicted, subgraph called again, returns real data + out2 := execute() + // Second request: real product data after negative cache eviction + assert.Equal(t, `{"data":{"product":{"__typename":"Product","id":"prod-1","name":"Widget"}}}`, out2) + + // Verify request 2 cache log: L2 miss (sentinel evicted) → real data stored with entity TTL + cacheLog2 := cache.GetLog() + assert.Equal(t, []CacheLogEntry{ + {Operation: "get", Items: []CacheLogItem{{Key: productKey, Hit: false}}}, // L2 miss: negative sentinel was evicted (TTL expiry simulated) + {Operation: "set", Items: []CacheLogItem{{Key: productKey, TTL: 30 * time.Second}}}, // Real entity data stored with regular TTL (30s), replacing the evicted sentinel + }, cacheLog2) + + // Verify the cache now holds real data, not the null sentinel + // Cache now holds real data, not the null sentinel + storedValue := cache.GetValue(productKey) + assert.Equal(t, `{"__typename":"Product","id":"prod-1","name":"Widget"}`, string(storedValue)) + }) +} + +// TestNegativeCachingResolveRegression_PreservesParentObjectForNullableField guards +// against a regression where a null entity fetch would drop the parent object entirely. +// The parent object with its already-known fields (e.g., id) must survive the null merge. +func TestNegativeCachingResolveRegression_PreservesParentObjectForNullableField(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + cache := NewFakeLoaderCache() + + // The root fetch discovers the Product identity and creates the parent object that the + // entity fetch will later extend. It does not provide `name`. + rootDS := NewMockDataSource(ctrl) + rootDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"product":{"__typename":"Product","id":"prod-1"}}}`), nil + }).Times(1) + + // The entity fetch comes back as `null`, which triggers negative caching for this Product key. + // The regression here was that resolve could lose the already-built parent object and return + // `product: null` instead of preserving `product.id` and filling the nullable child as `null`. + entityDS := NewMockDataSource(ctrl) + entityDS.EXPECT(). + Load(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, headers any, input []byte) ([]byte, error) { + return []byte(`{"data":{"_entities":[null]}}`), nil + }).Times(1) + + response := &GraphQLResponse{ + Info: &GraphQLResponseInfo{OperationType: ast.OperationTypeQuery}, + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + FetchConfiguration: FetchConfiguration{ + DataSource: rootDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{{ + Data: []byte(`{"method":"POST","url":"http://root.service","body":{"query":"{product {__typename id}}"}}`), + SegmentType: StaticSegmentType, + }}, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query"), + SingleWithPath(&SingleFetch{ + // This entity fetch asks only for the nullable `name` field. Negative caching is enabled + // so the resolver has to merge a negative-cache result back into the existing `product` object. + FetchConfiguration: FetchConfiguration{ + DataSource: entityDS, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities", "0"}, + }, + Caching: FetchCacheConfiguration{ + Enabled: true, + CacheName: "default", + TTL: 30 * time.Second, + CacheKeyTemplate: newProductCacheKeyTemplate(), + NegativeCacheTTL: 10 * time.Second, + }, + }, + InputTemplate: InputTemplate{Segments: newNegativeCacheEntitySegments()}, + Info: &FetchInfo{ + DataSourceID: "products", + DataSourceName: "products", + OperationType: ast.OperationTypeQuery, + ProvidesData: &Object{Fields: []*Field{{ + Name: []byte("name"), + Value: &String{Path: []string{"name"}, Nullable: true}, + }}}, + }, + DataSourceIdentifier: []byte("graphql_datasource.Source"), + }, "query.product", ObjectPath("product")), + ), + Data: &Object{Fields: []*Field{{ + Name: []byte("product"), + Value: &Object{ + Path: []string{"product"}, + Nullable: true, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}, Nullable: false}}, + // `name` is nullable, so a negative-cache hit should materialize it as `null` + // while still preserving the parent object and its non-null `id`. + {Name: []byte("name"), Value: &String{Path: []string{"name"}, Nullable: true}}, + }, + }, + }}}, + } + + loader := &Loader{caches: map[string]LoaderCache{"default": cache}} + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.DisableSubgraphRequestDeduplication = true + ctx.ExecutionOptions.Caching.EnableL2Cache = true + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + resolvable := NewResolvable(ar, ResolvableOptions{}) + err := resolvable.Init(ctx, nil, ast.OperationTypeQuery) + require.NoError(t, err) + + err = loader.LoadGraphQLResponseData(ctx, response, resolvable) + require.NoError(t, err) + + buf := &bytes.Buffer{} + err = resolvable.Resolve(context.Background(), response.Data, response.Fetches, buf) + require.NoError(t, err) + // The parent object must survive the negative entity result. The regression would have + // dropped the object entirely instead of returning the already-known `id` plus `name: null`. + assert.Equal(t, `{"data":{"product":{"id":"prod-1","name":null}}}`, buf.String()) +} + +// TestLoader_cacheKeysToNegativeEntries_PreservesPositiveEntityDataWithNullableFields +// verifies that when an entity already has non-key fields from a prior fetch, the +// negative cache entry preserves them and adds the newly requested nullable field as null. +func TestLoader_cacheKeysToNegativeEntries_PreservesPositiveEntityDataWithNullableFields(t *testing.T) { + t.Parallel() + + a := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + + loader := &Loader{} + // Start from an existing cached entity that already has non-key fields. This is the + // branch where negative caching keeps an object-shaped payload instead of plain `null`. + fromCache, err := astjson.ParseBytesWithArena(a, []byte(`{"__typename":"Item","id":"1","name":"Widget"}`)) + require.NoError(t, err) + + res := &result{ + providesData: &Object{ + Fields: []*Field{ + { + Name: []byte("summary"), + Value: &String{ + Path: []string{"summary"}, + Nullable: true, + }, + }, + }, + }, + } + + // Simulate a negative-cache write for the same entity key. The helper should preserve + // the existing object shape and materialize the requested nullable field as explicit null. + entries := loader.cacheKeysToNegativeEntries(a, res, []*CacheKey{{ + FromCache: fromCache, + Keys: []string{`{"__typename":"Item","key":{"id":"1"}}`}, + NegativeCacheHit: true, + }}) + + require.Len(t, entries, 1) + // `summary` was not present in the old payload, but because it is nullable in ProvidesData + // the negative-cache value must include `"summary": null` so the same selection can validate from cache. + require.Equal(t, + compactJSONForAssert(t, `{"__typename":"Item","id":"1","name":"Widget","summary":null}`), + compactJSONForAssert(t, string(entries[0].Value)), + ) +} + +// TestLoader_cacheKeysToNegativeEntries_UsesNullSentinelWithoutPositiveEntityData +// verifies that with no prior entity data, the negative cache entry collapses to +// the literal "null" sentinel instead of storing key-only scaffolding. +func TestLoader_cacheKeysToNegativeEntries_UsesNullSentinelWithoutPositiveEntityData(t *testing.T) { + t.Parallel() + + a := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + + loader := &Loader{} + // With no existing non-key entity data, negative caching must collapse to the literal + // `null` sentinel rather than storing key-only scaffolding as if it were a real entity. + entries := loader.cacheKeysToNegativeEntries(a, &result{}, []*CacheKey{{ + Keys: []string{`{"__typename":"Item","key":{"id":"1"}}`}, + NegativeCacheHit: true, + }}) + + require.Len(t, entries, 1) + require.Equal(t, "null", string(entries[0].Value)) +} diff --git a/v2/pkg/engine/resolve/node_object.go b/v2/pkg/engine/resolve/node_object.go index 7f5e94a4c6..91b8d0f9e7 100644 --- a/v2/pkg/engine/resolve/node_object.go +++ b/v2/pkg/engine/resolve/node_object.go @@ -3,16 +3,51 @@ package resolve import ( "bytes" "slices" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/internal/unsafebytes" ) +// KeyField represents a field in an @key directive. Supports nested keys: +// @key(fields: "id") → [{Name:"id"}] +// @key(fields: "id address { city }") → [{Name:"id"}, {Name:"address", Children:[{Name:"city"}]}] +type KeyField struct { + Name string + Children []KeyField // non-nil for nested object key fields +} + +// ObjectCacheAnalytics holds entity analytics configuration set at plan time. +// Nil for non-entity types. For polymorphic types (interface/union), ByTypeName +// maps concrete type names to their analytics config. +type ObjectCacheAnalytics struct { + // Concrete entity type (ByTypeName == nil): use KeyFields/HashKeys directly + KeyFields []KeyField // full @key structure (without __typename) + HashKeys bool // true = hash entity keys, false = raw (default) + + // Polymorphic type (ByTypeName != nil): resolve __typename at runtime, then look up + // Only populated for interface/union types where at least one implementor is an entity + ByTypeName map[string]*ObjectCacheAnalytics // concreteName → analytics (nil = not entity) +} + +// IsKeyField returns true if fieldName is a top-level @key field. +func (a *ObjectCacheAnalytics) IsKeyField(name string) bool { + for _, kf := range a.KeyFields { + if kf.Name == name { + return true + } + } + return false +} + type Object struct { - Nullable bool - Path []string - Fields []*Field + Nullable bool + Path []string + Fields []*Field + HasAliases bool // True if any field in this object or descendants has an alias or CacheArgs (triggers cache normalization) - PossibleTypes map[string]struct{} `json:"-"` - SourceName string `json:"-"` - TypeName string `json:"-"` + PossibleTypes map[string]struct{} `json:"-"` + SourceName string `json:"-"` + TypeName string `json:"-"` + CacheAnalytics *ObjectCacheAnalytics `json:"-"` // nil for non-entity types } func (o *Object) Copy() Node { @@ -21,9 +56,10 @@ func (o *Object) Copy() Node { fields[i] = f.Copy() } return &Object{ - Nullable: o.Nullable, - Path: o.Path, - Fields: fields, + Nullable: o.Nullable, + Path: o.Path, + Fields: fields, + HasAliases: o.HasAliases, } } @@ -86,8 +122,17 @@ func (*EmptyObject) Copy() Node { return &EmptyObject{} } +// CacheFieldArg captures one argument's variable name for cache key suffix computation. +// At plan time, field arguments become variable references after normalization (e.g., friends(first: $a)). +// At resolve time, we resolve the variable from ctx.Variables to compute the actual suffix. +type CacheFieldArg struct { + ArgName string // GraphQL argument name (e.g., "first") + VariableName string // Variable name in ctx.Variables (e.g., "a" for normalized variable $a) +} + type Field struct { Name []byte + OriginalName []byte // Schema field name when Name is an alias; nil if Name IS the original Value Node Position Position Defer *DeferField @@ -95,6 +140,7 @@ type Field struct { OnTypeNames [][]byte ParentOnTypeNames []ParentOnTypeNames Info *FieldInfo + CacheArgs []CacheFieldArg // nil when field has no arguments; sorted by ArgName } type ParentOnTypeNames struct { @@ -104,14 +150,26 @@ type ParentOnTypeNames struct { func (f *Field) Copy() *Field { return &Field{ - Name: f.Name, - Value: f.Value.Copy(), - Position: f.Position, - Defer: f.Defer, - Stream: f.Stream, - OnTypeNames: f.OnTypeNames, - Info: f.Info, + Name: f.Name, + OriginalName: f.OriginalName, + Value: f.Value.Copy(), + Position: f.Position, + Defer: f.Defer, + Stream: f.Stream, + OnTypeNames: f.OnTypeNames, + Info: f.Info, + CacheArgs: f.CacheArgs, + } +} + +// SchemaFieldName returns the original schema field name. +// If OriginalName is set (field has an alias), returns OriginalName. +// Otherwise returns Name (which IS the original name). +func (f *Field) SchemaFieldName() string { + if f.OriginalName != nil { + return unsafebytes.BytesToString(f.OriginalName) } + return unsafebytes.BytesToString(f.Name) } func (f *Field) Equals(n *Field) bool { @@ -149,6 +207,10 @@ type FieldInfo struct { // IndirectInterfaceNames is set to the interfaces name if the field is on a concrete type that implements an interface which wraps it // It's plural because interfaces and be overlapping with types that implement multiple interfaces IndirectInterfaceNames []string + // CacheAnalyticsHash is true if this field should be hashed for cache analytics. + // Set at plan time for non-key scalar fields on concrete entity types. + // At runtime, replaces both IsEntityType() and IsKeyField() checks with a single bool. + CacheAnalyticsHash bool } func (i *FieldInfo) Merge(other *FieldInfo) { @@ -180,3 +242,36 @@ type StreamField struct { } type DeferField struct{} + +// ComputeHasAliases recursively checks whether any field in the object tree has an alias +// or CacheArgs, and sets HasAliases on each Object accordingly. +// HasAliases gates cache normalization: aliased fields need renaming, and fields with +// CacheArgs need arg-suffix renaming. Both require the normalize/denormalize path. +func ComputeHasAliases(obj *Object) bool { + if obj == nil { + return false + } + hasAliases := false + for _, field := range obj.Fields { + if field.OriginalName != nil || len(field.CacheArgs) > 0 { + hasAliases = true + } + if computeNodeHasAliases(field.Value) { + hasAliases = true + } + } + obj.HasAliases = hasAliases + return hasAliases +} + +func computeNodeHasAliases(node Node) bool { + switch n := node.(type) { + case *Object: + return ComputeHasAliases(n) + case *Array: + if n != nil && n.Item != nil { + return computeNodeHasAliases(n.Item) + } + } + return false +} diff --git a/v2/pkg/engine/resolve/request_scoped_test.go b/v2/pkg/engine/resolve/request_scoped_test.go new file mode 100644 index 0000000000..aa385a9367 --- /dev/null +++ b/v2/pkg/engine/resolve/request_scoped_test.go @@ -0,0 +1,1657 @@ +package resolve + +import ( + "bytes" + "context" + "reflect" + "runtime/debug" + "testing" + "unsafe" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" + + "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" +) + +// mustParseArena is a test helper that parses JSON into an arena-allocated value. +func mustParseArena(t *testing.T, ar arena.Arena, data string) *astjson.Value { + t.Helper() + v, err := astjson.ParseBytesWithArena(ar, []byte(data)) + require.NoError(t, err) + return v +} + +// newViewerObj constructs a ProvidesData Object describing a nullable viewer +// with the given scalar sub-fields. Callers may append alias/CacheArgs fields +// afterwards. ComputeHasAliases is invoked so the HasAliases gate is set. +func newViewerObj(fieldNames ...string) *Object { + fields := make([]*Field, 0, len(fieldNames)) + for _, name := range fieldNames { + fields = append(fields, &Field{ + Name: []byte(name), + Value: &Scalar{Nullable: true}, + }) + } + obj := &Object{ + Nullable: true, + Fields: fields, + } + ComputeHasAliases(obj) + return obj +} + +func valueLivesOnArena(a arena.Arena, value *astjson.Value) bool { + if a == nil || value == nil { + return false + } + + arenaValue := reflect.ValueOf(a) + if arenaValue.Kind() == reflect.Ptr { + arenaValue = arenaValue.Elem() + } + if !arenaValue.IsValid() { + return false + } + + buffers := arenaValue.FieldByName("buffers") + if !buffers.IsValid() { + return false + } + + ptr := uintptr(unsafe.Pointer(value)) + for i := 0; i < buffers.Len(); i++ { + bufferValue := buffers.Index(i) + if bufferValue.IsNil() { + continue + } + bufferValue = bufferValue.Elem() + start := uintptr(bufferValue.FieldByName("ptr").Pointer()) + size := uintptr(bufferValue.FieldByName("size").Uint()) + if start == 0 || size == 0 { + continue + } + if ptr >= start && ptr < start+size { + return true + } + } + + return false +} + +func TestRequestScopedInjection_MultipleItemsSurvivesGCWhileRendering(t *testing.T) { + t.Parallel() + + old := debug.SetGCPercent(1) + defer debug.SetGCPercent(old) + + renderShape := &Object{ + Nullable: true, + Fields: []*Field{ + { + Name: []byte("articles"), + Value: &Array{ + Path: []string{"articles"}, + Nullable: true, + Item: &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}, Nullable: true}}, + { + Name: []byte("currentViewer"), + Value: &Object{ + Nullable: true, + Path: []string{"currentViewer"}, + Fields: []*Field{ + {Name: []byte("id"), Value: &String{Path: []string{"id"}, Nullable: true}}, + {Name: []byte("name"), Value: &String{Path: []string{"name"}, Nullable: true}}, + {Name: []byte("email"), Value: &String{Path: []string{"email"}, Nullable: true}}, + }, + }, + }, + }, + }, + }, + }, + }, + } + + injectCfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: "viewer.Personalized.currentViewer", + ProvidesData: newViewerObj("id", "name", "email"), + }, + }, + } + + for i := range gcIterations { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + resolvable := NewResolvable(ar, ResolvableOptions{}) + require.NoError(t, resolvable.Init(ctx, []byte(`{"articles":[{"id":"a1"},{"id":"a2"},{"id":"a3"}]}`), ast.OperationTypeQuery)) + + loader := &Loader{ + jsonArena: ar, + ctx: ctx, + resolvable: resolvable, + requestScopedL1: map[string]*astjson.Value{}, + } + loader.requestScopedL1["viewer.Personalized.currentViewer"] = mustParseArena(t, ar, `{"id":"v1","name":"Alice","email":"alice@example.com"}`) + + items := resolvable.data.Get("articles").GetArray() + require.Len(t, items, 3) + require.True(t, loader.tryRequestScopedInjection(&result{}, injectCfg, items)) + + forceGC() + heapChurn := make([][]byte, 0, 256) + for range 256 { + heapChurn = append(heapChurn, bytes.Repeat([]byte("x"), 1024)) + } + forceGC() + + out := &bytes.Buffer{} + err := resolvable.Resolve(ctx.ctx, renderShape, nil, out) + require.NoError(t, err, "iteration %d", i) + assert.Equal(t, + `{"data":{"articles":[{"id":"a1","currentViewer":{"id":"v1","name":"Alice","email":"alice@example.com"}},{"id":"a2","currentViewer":{"id":"v1","name":"Alice","email":"alice@example.com"}},{"id":"a3","currentViewer":{"id":"v1","name":"Alice","email":"alice@example.com"}}]}}`, + out.String(), + "iteration %d", + i, + ) + + _ = heapChurn + } +} + +func TestRequestScopedInjection_MultipleItemsStoresValuesOnRequestArena(t *testing.T) { + t.Parallel() + + old := debug.SetGCPercent(1) + defer debug.SetGCPercent(old) + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + ctx := NewContext(context.Background()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + resolvable := NewResolvable(ar, ResolvableOptions{}) + require.NoError(t, resolvable.Init(ctx, []byte(`{"articles":[{"id":"a1"},{"id":"a2"}]}`), ast.OperationTypeQuery)) + + loader := &Loader{ + jsonArena: ar, + ctx: ctx, + resolvable: resolvable, + requestScopedL1: map[string]*astjson.Value{}, + } + loader.requestScopedL1["viewer.Personalized.currentViewer"] = mustParseArena(t, ar, `{"id":"v1","name":"Alice","email":"alice@example.com"}`) + + items := resolvable.data.Get("articles").GetArray() + require.Len(t, items, 2) + require.True(t, loader.tryRequestScopedInjection(&result{}, FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: "viewer.Personalized.currentViewer", + ProvidesData: newViewerObj("id", "name", "email"), + }, + }, + }, items)) + + firstInjected := items[0].Get("currentViewer") + secondInjected := items[1].Get("currentViewer") + require.NotNil(t, firstInjected) + require.NotNil(t, secondInjected) + assert.True(t, valueLivesOnArena(ar, firstInjected), "first injected value must be allocated on the request arena") + assert.True(t, valueLivesOnArena(ar, secondInjected), "second injected value must be allocated on the request arena") +} + +func TestTryRequestScopedInjection(t *testing.T) { + t.Parallel() + + t.Run("no hints returns false", func(t *testing.T) { + t.Parallel() + + l := &Loader{ + jsonArena: arena.NewMonotonicArena(arena.WithMinBufferSize(1024)), + requestScopedL1: map[string]*astjson.Value{}, + } + cfg := FetchCacheConfiguration{} + items := []*astjson.Value{astjson.MustParse(`{"id":"1"}`)} + + ok := l.tryRequestScopedInjection(&result{}, cfg, items) + assert.False(t, ok) + }) + + t.Run("hint not in cache returns false", func(t *testing.T) { + t.Parallel() + + l := &Loader{ + jsonArena: arena.NewMonotonicArena(arena.WithMinBufferSize(1024)), + requestScopedL1: map[string]*astjson.Value{}, + } + cfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: "viewer.Personalized.currentViewer", + }, + }, + } + items := []*astjson.Value{astjson.MustParse(`{"id":"1"}`)} + + ok := l.tryRequestScopedInjection(&result{}, cfg, items) + assert.False(t, ok) + assert.Equal(t, `{"id":"1"}`, string(items[0].MarshalTo(nil))) + }) + + t.Run("all hints found injects and returns true", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + cachedViewer := mustParseArena(t, ar, `{"name":"Alice","role":"admin"}`) + l.requestScopedL1["viewer.Personalized.currentViewer"] = cachedViewer + + cfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: "viewer.Personalized.currentViewer", + ProvidesData: newViewerObj("name", "role"), + }, + }, + } + items := []*astjson.Value{ + mustParseArena(t, ar, `{"id":"1"}`), + mustParseArena(t, ar, `{"id":"2"}`), + } + + ok := l.tryRequestScopedInjection(&result{}, cfg, items) + assert.True(t, ok) + + assert.Equal(t, `{"id":"1","currentViewer":{"name":"Alice","role":"admin"}}`, string(items[0].MarshalTo(nil))) + assert.Equal(t, `{"id":"2","currentViewer":{"name":"Alice","role":"admin"}}`, string(items[1].MarshalTo(nil))) + }) + + t.Run("field widening blocks injection when cached value missing required fields", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + cachedViewer := mustParseArena(t, ar, `{"id":"1","name":"Alice"}`) + l.requestScopedL1["viewer.Personalized.currentViewer"] = cachedViewer + + cfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: "viewer.Personalized.currentViewer", + ProvidesData: newViewerObj("id", "name", "email"), + }, + }, + } + items := []*astjson.Value{ + mustParseArena(t, ar, `{"id":"99"}`), + } + + ok := l.tryRequestScopedInjection(&result{}, cfg, items) + assert.False(t, ok) + // Items should NOT be modified + assert.Equal(t, `{"id":"99"}`, string(items[0].MarshalTo(nil))) + }) + + t.Run("field widening allows injection when cached value has all required fields", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + cachedViewer := mustParseArena(t, ar, `{"id":"1","name":"Alice","email":"a@b.com"}`) + l.requestScopedL1["viewer.Personalized.currentViewer"] = cachedViewer + + cfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: "viewer.Personalized.currentViewer", + ProvidesData: newViewerObj("id", "name"), + }, + }, + } + items := []*astjson.Value{ + mustParseArena(t, ar, `{"id":"99"}`), + } + + ok := l.tryRequestScopedInjection(&result{}, cfg, items) + assert.True(t, ok) + // DeepCopy preserves all fields from the cached value. Extra fields + // beyond the hint's ProvidesData are harmless — the response walker + // only renders fields listed in the query, so "email" is ignored + // downstream even though it appears in the injected value. + assert.Equal(t, `{"id":"99","currentViewer":{"id":"1","name":"Alice","email":"a@b.com"}}`, string(items[0].MarshalTo(nil))) + }) + + t.Run("nil ProvidesData allows injection for backward compat", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + cachedViewer := mustParseArena(t, ar, `{"id":"1"}`) + l.requestScopedL1["viewer.Personalized.currentViewer"] = cachedViewer + + cfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: "viewer.Personalized.currentViewer", + // ProvidesData intentionally nil — legacy byte-copy fast path + }, + }, + } + items := []*astjson.Value{ + mustParseArena(t, ar, `{"id":"99"}`), + } + + ok := l.tryRequestScopedInjection(&result{}, cfg, items) + assert.True(t, ok) + assert.Equal(t, `{"id":"99","currentViewer":{"id":"1"}}`, string(items[0].MarshalTo(nil))) + }) + + t.Run("partial hints returns false but does not mutate items", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + cachedViewer := mustParseArena(t, ar, `{"name":"Alice"}`) + l.requestScopedL1["viewer.Personalized.currentViewer"] = cachedViewer + + cfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: "viewer.Personalized.currentViewer", + ProvidesData: newViewerObj("name"), + }, + { + FieldName: "settings", + FieldPath: []string{"settings"}, + L1Key: "viewer.Personalized.settings", + ProvidesData: newViewerObj("theme"), + }, + }, + } + items := []*astjson.Value{ + mustParseArena(t, ar, `{"id":"1"}`), + } + + ok := l.tryRequestScopedInjection(&result{}, cfg, items) + assert.False(t, ok) + + // With collect-then-inject, items are NOT mutated when any hint fails. + assert.Equal(t, `{"id":"1"}`, string(items[0].MarshalTo(nil))) + }) +} + +func TestExportRequestScopedFields(t *testing.T) { + t.Parallel() + + t.Run("no exports is a no-op", func(t *testing.T) { + t.Parallel() + + l := &Loader{ + jsonArena: arena.NewMonotonicArena(arena.WithMinBufferSize(1024)), + requestScopedL1: map[string]*astjson.Value{}, + } + cfg := FetchCacheConfiguration{} + items := []*astjson.Value{astjson.MustParse(`{"id":"1"}`)} + + l.exportRequestScopedFields(&result{}, cfg, items) + count := len(l.requestScopedL1) + assert.Equal(t, 0, count) + }) + + t.Run("exports value from first entity", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + cfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldPath: []string{"currentViewer"}, + L1Key: "viewer.Personalized.currentViewer", + }, + }, + } + items := []*astjson.Value{ + mustParseArena(t, ar, `{"id":"1","currentViewer":{"name":"Alice"}}`), + mustParseArena(t, ar, `{"id":"2","currentViewer":{"name":"Alice"}}`), + } + + l.exportRequestScopedFields(&result{}, cfg, items) + + cached, ok := l.requestScopedL1["viewer.Personalized.currentViewer"] + require.True(t, ok) + assert.Equal(t, `{"name":"Alice"}`, string(cached.MarshalTo(nil))) + }) + + t.Run("skips null values", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + cfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldPath: []string{"currentViewer"}, + L1Key: "viewer.Personalized.currentViewer", + }, + }, + } + items := []*astjson.Value{ + mustParseArena(t, ar, `{"id":"1","currentViewer":null}`), + } + + l.exportRequestScopedFields(&result{}, cfg, items) + + _, ok := l.requestScopedL1["viewer.Personalized.currentViewer"] + assert.False(t, ok) + }) + + t.Run("merges into existing cached value", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + existing := mustParseArena(t, ar, `{"name":"Alice"}`) + l.requestScopedL1["viewer.Personalized.currentViewer"] = existing + + cfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldPath: []string{"currentViewer"}, + L1Key: "viewer.Personalized.currentViewer", + }, + }, + } + items := []*astjson.Value{ + mustParseArena(t, ar, `{"id":"1","currentViewer":{"name":"Alice","role":"admin"}}`), + } + + l.exportRequestScopedFields(&result{}, cfg, items) + + cached, ok := l.requestScopedL1["viewer.Personalized.currentViewer"] + require.True(t, ok) + marshaled := string(cached.MarshalTo(nil)) + assert.Equal(t, `{"name":"Alice","role":"admin"}`, marshaled) + }) +} + +func TestRequestScopedRoundTrip(t *testing.T) { + t.Parallel() + + t.Run("export then inject round-trip with field widening", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + // Step 1: Export {id, name} from root field + exportCfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldPath: []string{"currentViewer"}, + L1Key: "viewer.Personalized.currentViewer", + }, + }, + } + exportItems := []*astjson.Value{ + mustParseArena(t, ar, `{"id":"1","currentViewer":{"id":"1","name":"Alice"}}`), + } + l.exportRequestScopedFields(&result{}, exportCfg, exportItems) + + // Step 2: Try injection with ProvidesData that demands "email" (missing) — should fail + injectCfg1 := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: "viewer.Personalized.currentViewer", + ProvidesData: newViewerObj("id", "name", "email"), + }, + }, + } + injectItems1 := []*astjson.Value{ + mustParseArena(t, ar, `{"id":"99"}`), + } + ok := l.tryRequestScopedInjection(&result{}, injectCfg1, injectItems1) + assert.False(t, ok) + + // Step 3: Try injection with ProvidesData that is satisfied — should succeed + injectCfg2 := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: "viewer.Personalized.currentViewer", + ProvidesData: newViewerObj("id", "name"), + }, + }, + } + injectItems2 := []*astjson.Value{ + mustParseArena(t, ar, `{"id":"99"}`), + } + ok = l.tryRequestScopedInjection(&result{}, injectCfg2, injectItems2) + assert.True(t, ok) + assert.Equal(t, `{"id":"99","currentViewer":{"id":"1","name":"Alice"}}`, string(injectItems2[0].MarshalTo(nil))) + }) + + t.Run("multiple hints one blocked by field widening other cached", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + // Store two cached values + l.requestScopedL1["key1"] = mustParseArena(t, ar, `{"id":"1"}`) + l.requestScopedL1["key2"] = mustParseArena(t, ar, `{"x":"y","z":"w"}`) + + cfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "hint1", + FieldPath: []string{"hint1"}, + L1Key: "key1", + ProvidesData: newViewerObj("id", "name"), // "name" missing from cached value + }, + { + FieldName: "hint2", + FieldPath: []string{"hint2"}, + L1Key: "key2", + ProvidesData: newViewerObj("x"), // satisfied + }, + }, + } + items := []*astjson.Value{ + mustParseArena(t, ar, `{"id":"99"}`), + } + + ok := l.tryRequestScopedInjection(&result{}, cfg, items) + assert.False(t, ok) // Not all hints satisfied + + // With collect-then-inject, items are NOT mutated when any hint fails. + // Neither hint1 nor hint2 should be injected. + marshaled := string(items[0].MarshalTo(nil)) + assert.NotContains(t, marshaled, `"hint2"`) + assert.NotContains(t, marshaled, `"hint1"`) + }) + + t.Run("export then inject round-trip", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + // Step 1: First fetch exports the value (no ProvidesData — byte-copy path) + exportCfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldPath: []string{"currentViewer"}, + L1Key: "viewer.Personalized.currentViewer", + }, + }, + } + exportItems := []*astjson.Value{ + mustParseArena(t, ar, `{"id":"1","currentViewer":{"name":"Alice","role":"admin"}}`), + } + l.exportRequestScopedFields(&result{}, exportCfg, exportItems) + + // Step 2: Second fetch attempts injection (nil ProvidesData — byte-copy path) + injectCfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: "viewer.Personalized.currentViewer", + }, + }, + } + injectItems := []*astjson.Value{ + mustParseArena(t, ar, `{"id":"99"}`), + mustParseArena(t, ar, `{"id":"100"}`), + } + + ok := l.tryRequestScopedInjection(&result{}, injectCfg, injectItems) + assert.True(t, ok) + + assert.Equal(t, `{"id":"99","currentViewer":{"name":"Alice","role":"admin"}}`, string(injectItems[0].MarshalTo(nil))) + assert.Equal(t, `{"id":"100","currentViewer":{"name":"Alice","role":"admin"}}`, string(injectItems[1].MarshalTo(nil))) + }) +} + +func TestExportedValuesAreIndependentCopies(t *testing.T) { + t.Parallel() + + t.Run("exported values are structurally independent from source", func(t *testing.T) { + t.Parallel() + + // Both source and copy live on the same arena (the Loader's jsonArena), + // which matches the real runtime: exportRequestScopedFields is called + // from the main thread where items are already on l.jsonArena. + // StructuralCopy gives structural isolation (mutating the copy's + // container nodes doesn't affect the source) while aliasing leaf + // values for efficiency. + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + cfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldPath: []string{"currentViewer"}, + L1Key: "viewer.Personalized.currentViewer", + }, + }, + } + + sourceData := mustParseArena(t, ar, `{"id":"1","currentViewer":{"id":"v1","name":"Alice"}}`) + items := []*astjson.Value{sourceData} + + // Export the value + l.exportRequestScopedFields(&result{}, cfg, items) + + // Verify the value was stored + cached, ok := l.requestScopedL1["viewer.Personalized.currentViewer"] + require.True(t, ok) + assert.Equal(t, `{"id":"v1","name":"Alice"}`, string(cached.MarshalTo(nil))) + + // Mutate the source to verify structural independence. + sourceData.Get("currentViewer").Set(ar, "name", astjson.StringValue(ar, "Mutated")) + + // The stored value must still produce the original JSON because + // exportRequestScopedFields creates a structurally independent copy. + assert.Equal(t, `{"id":"v1","name":"Alice"}`, string(cached.MarshalTo(nil))) + + // Injection using the stored value must succeed with original data. + injectCfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: "viewer.Personalized.currentViewer", + }, + }, + } + injectItems := []*astjson.Value{ + mustParseArena(t, ar, `{"id":"99"}`), + } + injected := l.tryRequestScopedInjection(&result{}, injectCfg, injectItems) + assert.True(t, injected) + assert.Equal(t, `{"id":"99","currentViewer":{"id":"v1","name":"Alice"}}`, string(injectItems[0].MarshalTo(nil))) + }) + + t.Run("export then inject with multiple items", func(t *testing.T) { + t.Parallel() + + // Single arena — mirrors real runtime where all values live on l.jsonArena. + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + cfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldPath: []string{"currentViewer"}, + L1Key: "viewer.Personalized.currentViewer", + }, + }, + } + + sourceItem := mustParseArena(t, ar, `{"id":"1","currentViewer":{"id":"v1","name":"Alice","role":"admin"}}`) + l.exportRequestScopedFields(&result{}, cfg, []*astjson.Value{sourceItem}) + + injectCfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: "viewer.Personalized.currentViewer", + }, + }, + } + injectItems := []*astjson.Value{ + mustParseArena(t, ar, `{"id":"entity1"}`), + mustParseArena(t, ar, `{"id":"entity2"}`), + } + + ok := l.tryRequestScopedInjection(&result{}, injectCfg, injectItems) + assert.True(t, ok) + assert.Equal(t, `{"id":"entity1","currentViewer":{"id":"v1","name":"Alice","role":"admin"}}`, string(injectItems[0].MarshalTo(nil))) + assert.Equal(t, `{"id":"entity2","currentViewer":{"id":"v1","name":"Alice","role":"admin"}}`, string(injectItems[1].MarshalTo(nil))) + }) +} + +// TestRequestScopedAliasHandling verifies that aliasing of the @requestScoped field +// is transparent to the L1 cache: the L1Key is schema-based and the stored value is +// normalized to schema field names, so any alias combination on export and inject +// operates on the same cache entry. +func TestRequestScopedAliasHandling(t *testing.T) { + t.Parallel() + + const l1Key = "viewer.Personalized.currentViewer" + + t.Run("root uses alias, entity fetch uses schema name", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + // Root query: { myViewer: currentViewer { id name } } + // Response shape has the field under the alias "myViewer". + rootData := mustParseArena(t, ar, `{"myViewer":{"id":"v1","name":"Alice"}}`) + exportCfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldPath: []string{"myViewer"}, // response path (alias) + L1Key: l1Key, + }, + }, + } + l.exportRequestScopedFields(&result{}, exportCfg, []*astjson.Value{rootData}) + + // Verify L1 stored the inner object keyed by schema field names + cached, ok := l.requestScopedL1[l1Key] + require.True(t, ok) + assert.Equal(t, `{"id":"v1","name":"Alice"}`, string(cached.MarshalTo(nil))) + + // Entity fetch uses schema name "currentViewer" (no alias at entity-fetch location) + injectCfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", // response key at entity-fetch location + FieldPath: []string{"currentViewer"}, + L1Key: l1Key, + ProvidesData: newViewerObj("id", "name"), + }, + }, + } + items := []*astjson.Value{mustParseArena(t, ar, `{"id":"a1"}`)} + injected := l.tryRequestScopedInjection(&result{}, injectCfg, items) + assert.True(t, injected) + assert.Equal(t, `{"id":"a1","currentViewer":{"id":"v1","name":"Alice"}}`, string(items[0].MarshalTo(nil))) + }) + + t.Run("root no alias, entity fetch uses alias", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + // Root query: { currentViewer { id name } } — no alias + rootData := mustParseArena(t, ar, `{"currentViewer":{"id":"v1","name":"Alice"}}`) + exportCfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldPath: []string{"currentViewer"}, + L1Key: l1Key, + }, + }, + } + l.exportRequestScopedFields(&result{}, exportCfg, []*astjson.Value{rootData}) + + // Entity fetch: { articles { cv: currentViewer { id name } } } — alias "cv" + injectCfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "cv", // alias at entity-fetch location + FieldPath: []string{"cv"}, + L1Key: l1Key, + ProvidesData: newViewerObj("id", "name"), + }, + }, + } + items := []*astjson.Value{mustParseArena(t, ar, `{"id":"a1"}`)} + injected := l.tryRequestScopedInjection(&result{}, injectCfg, items) + assert.True(t, injected) + assert.Equal(t, `{"id":"a1","cv":{"id":"v1","name":"Alice"}}`, string(items[0].MarshalTo(nil))) + }) + + t.Run("root uses alias A, entity fetch uses alias B", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + // Root: { myViewer: currentViewer { id name } } + rootData := mustParseArena(t, ar, `{"myViewer":{"id":"v1","name":"Alice"}}`) + exportCfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldPath: []string{"myViewer"}, + L1Key: l1Key, + }, + }, + } + l.exportRequestScopedFields(&result{}, exportCfg, []*astjson.Value{rootData}) + + // Entity: { articles { cv: currentViewer { id name } } } + injectCfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "cv", + FieldPath: []string{"cv"}, + L1Key: l1Key, + ProvidesData: newViewerObj("id", "name"), + }, + }, + } + items := []*astjson.Value{ + mustParseArena(t, ar, `{"id":"a1"}`), + mustParseArena(t, ar, `{"id":"a2"}`), + } + injected := l.tryRequestScopedInjection(&result{}, injectCfg, items) + assert.True(t, injected) + assert.Equal(t, `{"id":"a1","cv":{"id":"v1","name":"Alice"}}`, string(items[0].MarshalTo(nil))) + assert.Equal(t, `{"id":"a2","cv":{"id":"v1","name":"Alice"}}`, string(items[1].MarshalTo(nil))) + }) + + t.Run("sub-field alias on export is normalized to schema name in L1", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + // Root: { currentViewer { id displayName: name } } + // The response has the aliased sub-field "displayName". + // L1 must store it under the schema name "name" so that a later + // entity fetch requesting { currentViewer { id name } } finds it. + rootData := mustParseArena(t, ar, `{"currentViewer":{"id":"v1","displayName":"Alice"}}`) + + // ProvidesData describes the response shape at the export location. + // Field "displayName" is an alias of schema field "name". + exportProvides := &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{}}, + {Name: []byte("displayName"), OriginalName: []byte("name"), Value: &Scalar{}}, + }, + } + ComputeHasAliases(exportProvides) + require.True(t, exportProvides.HasAliases) + + exportCfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldPath: []string{"currentViewer"}, + L1Key: l1Key, + ProvidesData: exportProvides, + }, + }, + } + l.exportRequestScopedFields(&result{}, exportCfg, []*astjson.Value{rootData}) + + // Verify L1 stored the value with schema field names (normalized) + cached, ok := l.requestScopedL1[l1Key] + require.True(t, ok) + assert.Equal(t, `{"id":"v1","name":"Alice"}`, string(cached.MarshalTo(nil))) + + // Entity fetch requesting { currentViewer { id name } } — uses schema name + injectCfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: l1Key, + ProvidesData: newViewerObj("id", "name"), + }, + }, + } + items := []*astjson.Value{mustParseArena(t, ar, `{"id":"a1"}`)} + injected := l.tryRequestScopedInjection(&result{}, injectCfg, items) + assert.True(t, injected) + assert.Equal(t, `{"id":"a1","currentViewer":{"id":"v1","name":"Alice"}}`, string(items[0].MarshalTo(nil))) + }) + + t.Run("sub-field alias on inject re-applies alias from schema-name L1", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + // L1 already has the schema-normalized value + l.requestScopedL1[l1Key] = mustParseArena(t, ar, `{"id":"v1","name":"Alice"}`) + + // Entity fetch: { articles { currentViewer { id displayName: name } } } + // ProvidesData tells the loader: cached field "name" should appear in + // the injected value as "displayName". + injectProvides := &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{}}, + {Name: []byte("displayName"), OriginalName: []byte("name"), Value: &Scalar{}}, + }, + } + ComputeHasAliases(injectProvides) + require.True(t, injectProvides.HasAliases) + + injectCfg := FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: l1Key, + ProvidesData: injectProvides, + }, + }, + } + items := []*astjson.Value{mustParseArena(t, ar, `{"id":"a1"}`)} + injected := l.tryRequestScopedInjection(&result{}, injectCfg, items) + assert.True(t, injected) + assert.Equal(t, `{"id":"a1","currentViewer":{"id":"v1","displayName":"Alice"}}`, string(items[0].MarshalTo(nil))) + }) +} + +// TestRequestScopedProvidesDataShapes covers Object-tree-based scenarios that the +// old flat RequiredFields / SubFieldAliases API could not express: nested aliases, +// arrays of objects with aliased item fields, arg-variant fields, mixed aliases at +// multiple depths, __typename preservation, and nested null sub-objects. +func TestRequestScopedProvidesDataShapes(t *testing.T) { + t.Parallel() + + const l1Key = "viewer.Personalized.currentViewer" + + t.Run("nested sub-field alias round-trip", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + // Query: { currentViewer { profile { displayName: name } } } + // profile is a nested object; its sub-field "name" is aliased to "displayName". + profileObj := &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("displayName"), OriginalName: []byte("name"), Value: &Scalar{}}, + }, + } + provides := &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("profile"), Value: profileObj}, + }, + } + ComputeHasAliases(provides) + require.True(t, provides.HasAliases) + + // Export: the response has "displayName" under profile — must be + // normalized to "name" for cache storage. + rootData := mustParseArena(t, ar, `{"currentViewer":{"profile":{"displayName":"Alice"}}}`) + l.exportRequestScopedFields(&result{}, FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + {FieldPath: []string{"currentViewer"}, L1Key: l1Key, ProvidesData: provides}, + }, + }, []*astjson.Value{rootData}) + + cached, ok := l.requestScopedL1[l1Key] + require.True(t, ok) + assert.Equal(t, `{"profile":{"name":"Alice"}}`, string(cached.MarshalTo(nil))) + + // Inject: same shape, alias must be re-applied on read. + items := []*astjson.Value{mustParseArena(t, ar, `{"id":"a1"}`)} + ok = l.tryRequestScopedInjection(&result{}, FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: l1Key, + ProvidesData: provides, + }, + }, + }, items) + assert.True(t, ok) + assert.Equal(t, `{"id":"a1","currentViewer":{"profile":{"displayName":"Alice"}}}`, string(items[0].MarshalTo(nil))) + }) + + t.Run("array of objects with aliased item field", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + // Query: { currentViewer { posts { heading: title } } } + itemObj := &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("heading"), OriginalName: []byte("title"), Value: &Scalar{}}, + }, + } + postsArr := &Array{Item: itemObj} + provides := &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("posts"), Value: postsArr}, + }, + } + ComputeHasAliases(provides) + require.True(t, provides.HasAliases) + + rootData := mustParseArena(t, ar, `{"currentViewer":{"posts":[{"heading":"First"},{"heading":"Second"}]}}`) + l.exportRequestScopedFields(&result{}, FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + {FieldPath: []string{"currentViewer"}, L1Key: l1Key, ProvidesData: provides}, + }, + }, []*astjson.Value{rootData}) + + cached, ok := l.requestScopedL1[l1Key] + require.True(t, ok) + assert.Equal(t, `{"posts":[{"title":"First"},{"title":"Second"}]}`, string(cached.MarshalTo(nil))) + + items := []*astjson.Value{mustParseArena(t, ar, `{"id":"a1"}`)} + ok = l.tryRequestScopedInjection(&result{}, FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: l1Key, + ProvidesData: provides, + }, + }, + }, items) + assert.True(t, ok) + assert.Equal(t, `{"id":"a1","currentViewer":{"posts":[{"heading":"First"},{"heading":"Second"}]}}`, string(items[0].MarshalTo(nil))) + }) + + t.Run("arg-variant sub-field uses hashed field name in cache", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + ctx := NewContext(t.Context()) + ctx.Variables = astjson.MustParseBytes([]byte(`{"a":"5"}`)) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + + l := &Loader{ + jsonArena: ar, + ctx: ctx, + requestScopedL1: map[string]*astjson.Value{}, + } + + // Query: { currentViewer { posts(first: 5) { id } } } + // posts has CacheArgs — cache stores the field under "posts_". + postsItem := &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{}}, + }, + } + postsField := &Field{ + Name: []byte("posts"), + Value: &Array{Item: postsItem}, + CacheArgs: []CacheFieldArg{{ArgName: "first", VariableName: "a"}}, + } + provides := &Object{ + Nullable: true, + Fields: []*Field{postsField}, + } + ComputeHasAliases(provides) + require.True(t, provides.HasAliases, "HasAliases must be set for CacheArgs fields") + + rootData := mustParseArena(t, ar, `{"currentViewer":{"posts":[{"id":"p1"},{"id":"p2"}]}}`) + l.exportRequestScopedFields(&result{}, FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + {FieldPath: []string{"currentViewer"}, L1Key: l1Key, ProvidesData: provides}, + }, + }, []*astjson.Value{rootData}) + + cached, ok := l.requestScopedL1[l1Key] + require.True(t, ok) + suffix := l.computeArgSuffix(postsField.CacheArgs) + // Under the hood the cache key includes the arg hash suffix. + assert.Equal(t, `{"posts`+suffix+`":[{"id":"p1"},{"id":"p2"}]}`, string(cached.MarshalTo(nil))) + + // Inject: ProvidesData with the same CacheArgs re-reads the hashed key + // and writes the response-visible name "posts". + items := []*astjson.Value{mustParseArena(t, ar, `{"id":"a1"}`)} + ok = l.tryRequestScopedInjection(&result{}, FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: l1Key, + ProvidesData: provides, + }, + }, + }, items) + assert.True(t, ok) + assert.Equal(t, `{"id":"a1","currentViewer":{"posts":[{"id":"p1"},{"id":"p2"}]}}`, string(items[0].MarshalTo(nil))) + }) + + t.Run("mixed aliases at multiple depths", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + // Query: + // { currentViewer { + // uid: id + // prof: profile { label: name } + // } } + profileObj := &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("label"), OriginalName: []byte("name"), Value: &Scalar{}}, + }, + } + provides := &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("uid"), OriginalName: []byte("id"), Value: &Scalar{}}, + {Name: []byte("prof"), OriginalName: []byte("profile"), Value: profileObj}, + }, + } + ComputeHasAliases(provides) + require.True(t, provides.HasAliases) + + rootData := mustParseArena(t, ar, `{"currentViewer":{"uid":"v1","prof":{"label":"Alice"}}}`) + l.exportRequestScopedFields(&result{}, FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + {FieldPath: []string{"currentViewer"}, L1Key: l1Key, ProvidesData: provides}, + }, + }, []*astjson.Value{rootData}) + + cached, ok := l.requestScopedL1[l1Key] + require.True(t, ok) + assert.Equal(t, `{"id":"v1","profile":{"name":"Alice"}}`, string(cached.MarshalTo(nil))) + + items := []*astjson.Value{mustParseArena(t, ar, `{"id":"a1"}`)} + ok = l.tryRequestScopedInjection(&result{}, FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: l1Key, + ProvidesData: provides, + }, + }, + }, items) + assert.True(t, ok) + assert.Equal(t, `{"id":"a1","currentViewer":{"uid":"v1","prof":{"label":"Alice"}}}`, string(items[0].MarshalTo(nil))) + }) + + t.Run("__typename is preserved through export normalization", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + // Query has an alias sub-field so HasAliases is set, forcing the + // normalize path that must also preserve __typename. + provides := &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("displayName"), OriginalName: []byte("name"), Value: &Scalar{}}, + }, + } + ComputeHasAliases(provides) + require.True(t, provides.HasAliases) + + rootData := mustParseArena(t, ar, `{"currentViewer":{"__typename":"Viewer","displayName":"Alice"}}`) + l.exportRequestScopedFields(&result{}, FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + {FieldPath: []string{"currentViewer"}, L1Key: l1Key, ProvidesData: provides}, + }, + }, []*astjson.Value{rootData}) + + cached, ok := l.requestScopedL1[l1Key] + require.True(t, ok) + assert.Equal(t, `"Viewer"`, string(cached.Get("__typename").MarshalTo(nil))) + assert.Equal(t, `"Alice"`, string(cached.Get("name").MarshalTo(nil))) + }) + + t.Run("nullable nested object stored as null survives validation", func(t *testing.T) { + t.Parallel() + + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + // Query: { currentViewer { profile { id } } } — profile is nullable. + profileObj := &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{}}, + }, + } + provides := &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("profile"), Value: profileObj}, + }, + } + ComputeHasAliases(provides) + + // Response has profile: null — the nullable nested object must not + // block validation or cause a panic. + rootData := mustParseArena(t, ar, `{"currentViewer":{"profile":null}}`) + l.exportRequestScopedFields(&result{}, FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + {FieldPath: []string{"currentViewer"}, L1Key: l1Key, ProvidesData: provides}, + }, + }, []*astjson.Value{rootData}) + + cached, ok := l.requestScopedL1[l1Key] + require.True(t, ok) + assert.Equal(t, `{"profile":null}`, string(cached.MarshalTo(nil))) + + items := []*astjson.Value{mustParseArena(t, ar, `{"id":"a1"}`)} + ok = l.tryRequestScopedInjection(&result{}, FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: l1Key, + ProvidesData: provides, + }, + }, + }, items) + assert.True(t, ok) + assert.Equal(t, `{"id":"a1","currentViewer":{"profile":null}}`, string(items[0].MarshalTo(nil))) + }) +} + +func TestRequestScopedSyntheticAliasRoundTrip(t *testing.T) { + t.Parallel() + + const l1Key = "viewer.Personalized.currentViewer" + + t.Run("field conflict round-trip keeps synthetic alias mapping stable across export and injection", func(t *testing.T) { + t.Parallel() + + // Export under one alias layout, then inject under a conflicting layout. + // The cache entry must normalize to schema names and denormalize back into the + // consumer's alias layout without swapping the values. + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + exportProvides := &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{}}, + {Name: []byte("name"), Value: &Scalar{}}, + {Name: []byte("__request_scoped__name_0"), OriginalName: []byte("email"), Value: &Scalar{}}, + }, + } + ComputeHasAliases(exportProvides) + require.True(t, exportProvides.HasAliases) + + // Export writes schema-name-normalized data into requestScoped L1. + rootData := mustParseArena(t, ar, `{"currentViewer":{"id":"v1","name":"Alice","__request_scoped__name_0":"alice@example.com"}}`) + l.exportRequestScopedFields(&result{}, FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldPath: []string{"currentViewer"}, + L1Key: l1Key, + ProvidesData: exportProvides, + }, + }, + }, []*astjson.Value{rootData}) + + cached, ok := l.requestScopedL1[l1Key] + require.True(t, ok) + assert.Equal(t, `{"id":"v1","name":"Alice","email":"alice@example.com"}`, string(cached.MarshalTo(nil))) + + injectProvides := &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{}}, + {Name: []byte("name"), OriginalName: []byte("email"), Value: &Scalar{}}, + {Name: []byte("__request_scoped__name_1"), OriginalName: []byte("name"), Value: &Scalar{}}, + }, + } + ComputeHasAliases(injectProvides) + require.True(t, injectProvides.HasAliases) + + // Injection must remap the schema-name entry into the consumer's synthetic aliases. + items := []*astjson.Value{mustParseArena(t, ar, `{"id":"a1"}`)} + ok = l.tryRequestScopedInjection(&result{}, FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: l1Key, + ProvidesData: injectProvides, + }, + }, + }, items) + assert.True(t, ok) + assert.Equal(t, `{"id":"a1","currentViewer":{"id":"v1","name":"alice@example.com","__request_scoped__name_1":"Alice"}}`, string(items[0].MarshalTo(nil))) + }) + + t.Run("argument conflict round-trip keeps synthetic alias mapping and arg-hash normalization aligned", func(t *testing.T) { + t.Parallel() + + // Export and inject the same field under two argument variants. The L1 entry must + // normalize to schema-name-plus-arg-suffix keys so each variant survives widening. + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + ctx := NewContext(t.Context()) + ctx.ExecutionOptions.Caching.EnableL1Cache = true + ctx.Variables = astjson.MustParseBytes([]byte(`{"a":"1","b":"2"}`)) + + l := &Loader{ + jsonArena: ar, + ctx: ctx, + requestScopedL1: map[string]*astjson.Value{}, + } + + exportNaturalPosts := &Field{ + Name: []byte("posts"), + Value: &Array{Item: &Object{Nullable: true, Fields: []*Field{{Name: []byte("id"), Value: &Scalar{}}}}}, + CacheArgs: []CacheFieldArg{{ArgName: "first", VariableName: "a"}}, + } + exportSyntheticPosts := &Field{ + Name: []byte("__request_scoped__posts_1"), + OriginalName: []byte("posts"), + Value: &Array{Item: &Object{Nullable: true, Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{}}, + {Name: []byte("title"), Value: &Scalar{}}, + }}}, + CacheArgs: []CacheFieldArg{{ArgName: "first", VariableName: "b"}}, + } + exportProvides := &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{}}, + exportNaturalPosts, + exportSyntheticPosts, + }, + } + ComputeHasAliases(exportProvides) + require.True(t, exportProvides.HasAliases) + + // Export writes both argument variants into requestScoped L1 under their normalized keys. + rootData := mustParseArena(t, ar, `{"currentViewer":{"id":"v1","posts":[{"id":"p1"}],"__request_scoped__posts_1":[{"id":"p2","title":"Second"}]}}`) + l.exportRequestScopedFields(&result{}, FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldPath: []string{"currentViewer"}, + L1Key: l1Key, + ProvidesData: exportProvides, + }, + }, + }, []*astjson.Value{rootData}) + + cached, ok := l.requestScopedL1[l1Key] + require.True(t, ok) + assert.Equal(t, + `{"id":"v1","posts`+l.computeArgSuffix(exportNaturalPosts.CacheArgs)+`":[{"id":"p1"}],"posts`+l.computeArgSuffix(exportSyntheticPosts.CacheArgs)+`":[{"id":"p2","title":"Second"}]}`, + string(cached.MarshalTo(nil)), + ) + + injectNaturalPosts := &Field{ + Name: []byte("posts"), + OriginalName: nil, + Value: &Array{Item: &Object{Nullable: true, Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{}}, + {Name: []byte("title"), Value: &Scalar{}}, + }}}, + CacheArgs: []CacheFieldArg{{ArgName: "first", VariableName: "b"}}, + } + injectSyntheticPosts := &Field{ + Name: []byte("__request_scoped__posts_0"), + OriginalName: []byte("posts"), + Value: &Array{Item: &Object{Nullable: true, Fields: []*Field{{Name: []byte("id"), Value: &Scalar{}}}}}, + CacheArgs: []CacheFieldArg{{ArgName: "first", VariableName: "a"}}, + } + injectProvides := &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{}}, + injectNaturalPosts, + injectSyntheticPosts, + }, + } + ComputeHasAliases(injectProvides) + require.True(t, injectProvides.HasAliases) + + // Injection must reconstruct the caller's argument layout from the normalized cache entry. + items := []*astjson.Value{mustParseArena(t, ar, `{"id":"a1"}`)} + ok = l.tryRequestScopedInjection(&result{}, FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: l1Key, + ProvidesData: injectProvides, + }, + }, + }, items) + assert.True(t, ok) + assert.Equal(t, `{"id":"a1","currentViewer":{"id":"v1","posts":[{"id":"p2","title":"Second"}],"__request_scoped__posts_0":[{"id":"p1"}]}}`, string(items[0].MarshalTo(nil))) + }) + + t.Run("three conflicting field variants round-trip through schema-name storage and synthetic alias remapping", func(t *testing.T) { + t.Parallel() + + // Three participants map different schema fields into the same response position. + // Export must keep the schema fields distinct, and injection must rebuild the + // consumer-specific alias layout from that shared cache entry. + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + exportProvides := &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{}}, + {Name: []byte("name"), Value: &Scalar{}}, + {Name: []byte("__request_scoped__name_0"), OriginalName: []byte("email"), Value: &Scalar{}}, + {Name: []byte("__request_scoped__name_1"), OriginalName: []byte("handle"), Value: &Scalar{}}, + }, + } + ComputeHasAliases(exportProvides) + require.True(t, exportProvides.HasAliases) + + // Export writes the shared schema-name view into requestScoped L1. + rootData := mustParseArena(t, ar, `{"currentViewer":{"id":"v1","name":"Alice","__request_scoped__name_0":"alice@example.com","__request_scoped__name_1":"alice-handle"}}`) + l.exportRequestScopedFields(&result{}, FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldPath: []string{"currentViewer"}, + L1Key: l1Key, + ProvidesData: exportProvides, + }, + }, + }, []*astjson.Value{rootData}) + + cached, ok := l.requestScopedL1[l1Key] + require.True(t, ok) + assert.Equal(t, `{"id":"v1","name":"Alice","email":"alice@example.com","handle":"alice-handle"}`, string(cached.MarshalTo(nil))) + + injectProvides := &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("id"), Value: &Scalar{}}, + {Name: []byte("name"), OriginalName: []byte("handle"), Value: &Scalar{}}, + {Name: []byte("__request_scoped__name_0"), OriginalName: []byte("email"), Value: &Scalar{}}, + {Name: []byte("__request_scoped__name_2"), OriginalName: []byte("name"), Value: &Scalar{}}, + }, + } + ComputeHasAliases(injectProvides) + require.True(t, injectProvides.HasAliases) + + // Injection remaps that shared entry into a different alias layout for the consumer. + items := []*astjson.Value{mustParseArena(t, ar, `{"id":"r1"}`)} + ok = l.tryRequestScopedInjection(&result{}, FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: l1Key, + ProvidesData: injectProvides, + }, + }, + }, items) + assert.True(t, ok) + assert.Equal(t, `{"id":"r1","currentViewer":{"id":"v1","name":"alice-handle","__request_scoped__name_0":"alice@example.com","__request_scoped__name_2":"Alice"}}`, string(items[0].MarshalTo(nil))) + }) + + t.Run("hidden requires dependency round-trips from an aliased root participant into the entity participant", func(t *testing.T) { + t.Parallel() + + const l1Key = "viewer.currentViewer" + + // The root participant exports name under a user alias, while the entity participant + // later needs the schema field name for a hidden @requires dependency. + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(1024)) + l := &Loader{ + jsonArena: ar, + requestScopedL1: map[string]*astjson.Value{}, + } + + exportProvides := &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("viewerName"), OriginalName: []byte("name"), Value: &Scalar{}}, + {Name: []byte("__typename"), Value: &Scalar{}}, + {Name: []byte("id"), Value: &Scalar{}}, + }, + } + ComputeHasAliases(exportProvides) + require.True(t, exportProvides.HasAliases) + + // Export must normalize the aliased root field back to the schema field name. + rootData := mustParseArena(t, ar, `{"currentViewer":{"viewerName":"Alice","__typename":"Viewer","id":"v1"}}`) + l.exportRequestScopedFields(&result{}, FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldPath: []string{"currentViewer"}, + L1Key: l1Key, + ProvidesData: exportProvides, + }, + }, + }, []*astjson.Value{rootData}) + + cached, ok := l.requestScopedL1[l1Key] + require.True(t, ok) + assert.Equal(t, `{"name":"Alice","__typename":"Viewer","id":"v1"}`, string(cached.MarshalTo(nil))) + + injectProvides := &Object{ + Nullable: true, + Fields: []*Field{ + {Name: []byte("name"), Value: &Scalar{}}, + {Name: []byte("__typename"), Value: &Scalar{}}, + {Name: []byte("id"), Value: &Scalar{}}, + }, + } + ComputeHasAliases(injectProvides) + require.False(t, injectProvides.HasAliases) + + // Injection into the entity participant must supply the hidden dependency fields + // exactly as the downstream subgraph expects them. + items := []*astjson.Value{mustParseArena(t, ar, `{"id":"a1"}`)} + ok = l.tryRequestScopedInjection(&result{}, FetchCacheConfiguration{ + RequestScopedFields: []RequestScopedField{ + { + FieldName: "currentViewer", + FieldPath: []string{"currentViewer"}, + L1Key: l1Key, + ProvidesData: injectProvides, + }, + }, + }, items) + assert.True(t, ok) + assert.Equal(t, `{"id":"a1","currentViewer":{"name":"Alice","__typename":"Viewer","id":"v1"}}`, string(items[0].MarshalTo(nil))) + }) +} diff --git a/v2/pkg/engine/resolve/resolvable.go b/v2/pkg/engine/resolve/resolvable.go index 6eb3395327..9f7bfbb8e3 100644 --- a/v2/pkg/engine/resolve/resolvable.go +++ b/v2/pkg/engine/resolve/resolvable.go @@ -62,6 +62,17 @@ type Resolvable struct { currentFieldInfo *FieldInfo + // Entity analytics fields (set during walkObject, used during renderFieldValue) + currentEntityAnalytics *ObjectCacheAnalytics // resolved analytics for current entity (nil = not entity) + currentEntityTypeName string // resolved concrete entity type name + currentEntityKeyRaw string // raw key JSON (when HashKeys=false) + currentEntityKeyHash uint64 // xxhash of key JSON (when HashKeys=true) + currentEntitySource FieldSource // where the entity data came from + + // haltExecution is set to true when ErrorBehaviorHalt encounters an error. + // Once set, remaining fetches and resolution will be skipped. + haltExecution bool + // actualListSizes maps the JSON path to the list size in the final response. // Used to compute the actual cost of the operation. actualListSizes map[string]int @@ -104,6 +115,12 @@ func (r *Resolvable) Reset() { r.renameTypeNames = r.renameTypeNames[:0] r.authorizationError = nil r.astjsonArena = nil + r.haltExecution = false + r.currentEntityAnalytics = nil + r.currentEntityTypeName = "" + r.currentEntityKeyRaw = "" + r.currentEntityKeyHash = 0 + r.currentEntitySource = FieldSourceSubgraph r.xxh.Reset() for k := range r.authorizationAllow { delete(r.authorizationAllow, k) @@ -128,7 +145,7 @@ func (r *Resolvable) Init(ctx *Context, initialData []byte, operationType ast.Op if err != nil { return err } - r.data, _, err = astjson.MergeValues(r.astjsonArena, r.data, initialValue) + r.data, err = astjson.MergeValues(r.astjsonArena, r.data, initialValue) if err != nil { return err } @@ -148,14 +165,14 @@ func (r *Resolvable) InitSubscription(ctx *Context, initialData []byte, postProc return err } if postProcessing.SelectResponseDataPath == nil { - r.data, _, err = astjson.MergeValuesWithPath(r.astjsonArena, r.data, initialValue, postProcessing.MergePath...) + r.data, err = astjson.MergeValuesWithPath(r.astjsonArena, r.data, initialValue, postProcessing.MergePath...) if err != nil { return err } } else { selectedInitialValue := initialValue.Get(postProcessing.SelectResponseDataPath...) if selectedInitialValue != nil { - r.data, _, err = astjson.MergeValuesWithPath(r.astjsonArena, r.data, selectedInitialValue, postProcessing.MergePath...) + r.data, err = astjson.MergeValuesWithPath(r.astjsonArena, r.data, selectedInitialValue, postProcessing.MergePath...) if err != nil { return err } @@ -224,6 +241,12 @@ func (r *Resolvable) Resolve(ctx context.Context, rootData *Object, fetchTree *F if r.authorizationError != nil { return r.authorizationError } + + // In HALT mode, if we encountered any error, the entire data becomes null + if r.haltExecution { + hasErrors = true + } + r.printBytes(lBrace) if r.hasErrors() { r.printErrors() @@ -264,6 +287,33 @@ func (r *Resolvable) err() bool { return true } +// handleNonNullableError handles the error behavior for non-nullable field errors. +// Returns true if the error should propagate (bubble up), false if it should stop here. +func (r *Resolvable) handleNonNullableError() bool { + // If ctx is nil (e.g., during variable rendering), default to PROPAGATE behavior + if r.ctx == nil { + return true + } + + switch r.ctx.ExecutionOptions.ErrorBehavior { + case ErrorBehaviorNull: + // NULL mode: don't propagate, the field becomes null even if non-nullable + return false + case ErrorBehaviorHalt: + // HALT mode: stop execution entirely, propagate the error + r.haltExecution = true + return true + default: + // PROPAGATE mode (default): traditional null bubbling + return true + } +} + +// HaltExecution returns true if execution should be halted (HALT mode encountered an error). +func (r *Resolvable) HaltExecution() bool { + return r.haltExecution +} + func (r *Resolvable) printErrors() { r.printBytes(quote) r.printBytes(literalErrors) @@ -519,6 +569,43 @@ func (r *Resolvable) renderFieldValue(value *astjson.Value, valueBytes []byte, n } else { _, r.printErr = r.out.Write(valueBytes) } + + // Hash field value for cache analytics (two-tier check: plan-time fast path + runtime fallback) + if r.ctx != nil && r.ctx.cacheAnalytics != nil && r.currentEntityAnalytics != nil && r.currentFieldInfo != nil { + // Guard: only hash fields that belong to the current entity type. + // When a non-entity (Review) is nested inside an entity (User), + // currentEntityAnalytics is still User's — we must NOT hash Review.body. + isOnCurrentEntity := r.currentFieldInfo.ExactParentTypeName == r.currentEntityTypeName + if !isOnCurrentEntity { + // Check ParentTypeNames for polymorphic match (interface field on concrete entity) + for _, pt := range r.currentFieldInfo.ParentTypeNames { + if pt == r.currentEntityTypeName { + isOnCurrentEntity = true + break + } + } + } + + if isOnCurrentEntity { + shouldHash := false + if r.currentFieldInfo.CacheAnalyticsHash { + // Fast path: plan-time guarantee (concrete entity, non-key field) + shouldHash = true + } else if !r.currentEntityAnalytics.IsKeyField(r.currentFieldInfo.Name) { + // Runtime fallback: field is NOT a key field on the resolved entity + // Handles: (a) polymorphic parents where plan-time couldn't determine + // (b) correctly skips actual key fields (IsKeyField returns true) + shouldHash = true + } + if shouldHash { + r.ctx.cacheAnalytics.HashFieldValue( + r.currentEntityTypeName, r.currentFieldInfo.Name, valueBytes, + r.currentEntityKeyRaw, r.currentEntityKeyHash, + r.currentEntitySource, + ) + } + } + } } func (r *Resolvable) pushArrayPathElement(index int) { @@ -594,7 +681,10 @@ func (r *Resolvable) walkObject(obj *Object, parent *astjson.Value) bool { return r.walkNull() } r.addNonNullableFieldError(obj.Path, parent) - return r.err() + if r.handleNonNullableError() { + return r.err() + } + return r.walkNull() } r.pushNodePathElement(obj.Path) isRoot := r.depth < 2 @@ -637,6 +727,58 @@ func (r *Resolvable) walkObject(obj *Object, parent *astjson.Value) bool { if r.print && !isRoot { r.printBytes(lBrace) } + + // Entity analytics (only during print phase, O(1) check via plan-time annotation) + if r.print && r.ctx != nil && r.ctx.cacheAnalytics != nil && obj.CacheAnalytics != nil { + // Resolve concrete entity analytics (handles polymorphic types) + analytics := obj.CacheAnalytics + entityTypeName := obj.TypeName + if analytics.ByTypeName != nil { + // Polymorphic type: resolve __typename and look up concrete analytics + concreteType := string(value.GetStringBytes("__typename")) + analytics = analytics.ByTypeName[concreteType] // nil if non-entity member + entityTypeName = concreteType + } + + if analytics != nil { + // Save/restore entity context for nested entities + savedAnalytics := r.currentEntityAnalytics + savedTypeName := r.currentEntityTypeName + savedKeyRaw := r.currentEntityKeyRaw + savedKeyHash := r.currentEntityKeyHash + savedSource := r.currentEntitySource + defer func() { + r.currentEntityAnalytics = savedAnalytics + r.currentEntityTypeName = savedTypeName + r.currentEntityKeyRaw = savedKeyRaw + r.currentEntityKeyHash = savedKeyHash + r.currentEntitySource = savedSource + }() + + r.currentEntityAnalytics = analytics + r.currentEntityTypeName = entityTypeName + + // Extract key field values (uses plan-time KeyFields directly) + keyJSON := buildEntityKeyJSON(value, analytics.KeyFields) + + // Look up source from loading phase + r.currentEntitySource = r.ctx.cacheAnalytics.EntitySource(entityTypeName, string(keyJSON)) + + // Hash or raw key (uses plan-time HashKeys directly) + if analytics.HashKeys { + r.xxh.Reset() + _, _ = r.xxh.Write(keyJSON) + r.currentEntityKeyHash = r.xxh.Sum64() + r.currentEntityKeyRaw = "" + } else { + r.currentEntityKeyRaw = string(keyJSON) + r.currentEntityKeyHash = 0 + } + + r.ctx.cacheAnalytics.IncrementEntityCount(entityTypeName, string(keyJSON)) + } + } + addComma := false r.typeNames = append(r.typeNames, typeName) @@ -838,7 +980,10 @@ func (r *Resolvable) walkArray(arr *Array, value *astjson.Value) bool { return r.walkNull() } r.addNonNullableFieldError(arr.Path, parent) - return r.err() + if r.handleNonNullableError() { + return r.err() + } + return r.walkNull() } r.pushNodePathElement(arr.Path) defer r.popNodePathElement(arr.Path) @@ -929,7 +1074,10 @@ func (r *Resolvable) walkString(s *String, value *astjson.Value) bool { return r.walkNull() } r.addNonNullableFieldError(s.Path, parent) - return r.err() + if r.handleNonNullableError() { + return r.err() + } + return r.walkNull() } if value.Type() != astjson.TypeString { r.marshalBuf = value.MarshalTo(r.marshalBuf[:0]) @@ -975,7 +1123,10 @@ func (r *Resolvable) walkBoolean(b *Boolean, value *astjson.Value) bool { return r.walkNull() } r.addNonNullableFieldError(b.Path, parent) - return r.err() + if r.handleNonNullableError() { + return r.err() + } + return r.walkNull() } if value.Type() != astjson.TypeTrue && value.Type() != astjson.TypeFalse { r.marshalBuf = value.MarshalTo(r.marshalBuf[:0]) @@ -996,7 +1147,10 @@ func (r *Resolvable) walkInteger(i *Integer, value *astjson.Value) bool { return r.walkNull() } r.addNonNullableFieldError(i.Path, parent) - return r.err() + if r.handleNonNullableError() { + return r.err() + } + return r.walkNull() } if value.Type() != astjson.TypeNumber { r.marshalBuf = value.MarshalTo(r.marshalBuf[:0]) @@ -1017,7 +1171,10 @@ func (r *Resolvable) walkFloat(f *Float, value *astjson.Value) bool { return r.walkNull() } r.addNonNullableFieldError(f.Path, parent) - return r.err() + if r.handleNonNullableError() { + return r.err() + } + return r.walkNull() } if !r.print { if value.Type() != astjson.TypeNumber { @@ -1047,7 +1204,10 @@ func (r *Resolvable) walkBigInt(b *BigInt, value *astjson.Value) bool { return r.walkNull() } r.addNonNullableFieldError(b.Path, parent) - return r.err() + if r.handleNonNullableError() { + return r.err() + } + return r.walkNull() } if r.print { r.renderScalarFieldValue(value, b.Nullable) @@ -1063,7 +1223,10 @@ func (r *Resolvable) walkScalar(s *Scalar, value *astjson.Value) bool { return r.walkNull() } r.addNonNullableFieldError(s.Path, parent) - return r.err() + if r.handleNonNullableError() { + return r.err() + } + return r.walkNull() } if r.print { r.renderScalarFieldValue(value, s.Nullable) @@ -1095,7 +1258,10 @@ func (r *Resolvable) walkCustom(c *CustomNode, value *astjson.Value) bool { return r.walkNull() } r.addNonNullableFieldError(c.Path, parent) - return r.err() + if r.handleNonNullableError() { + return r.err() + } + return r.walkNull() } r.marshalBuf = value.MarshalTo(r.marshalBuf[:0]) resolved, err := c.Resolve(r.ctx, r.marshalBuf) @@ -1174,7 +1340,10 @@ func (r *Resolvable) walkEnum(e *Enum, value *astjson.Value) bool { return r.walkNull() } r.addNonNullableFieldError(e.Path, parent) - return r.err() + if r.handleNonNullableError() { + return r.err() + } + return r.walkNull() } if value.Type() != astjson.TypeString { r.marshalBuf = value.MarshalTo(r.marshalBuf[:0]) @@ -1302,7 +1471,13 @@ func (r *Resolvable) renderFieldCoordinates() string { case 1: return r.renderRootFieldCoordinates(r.path[0].Name) default: - return fmt.Sprintf("%s.%s", r.enclosingTypeName(), r.path[pathLength-1].Name) + typeName := r.enclosingTypeName() + fieldName := r.path[pathLength-1].Name + if typeName == "" { + // Fall back to full path if no type name is available + return r.renderFieldPath() + } + return fmt.Sprintf("%s.%s", typeName, fieldName) } } diff --git a/v2/pkg/engine/resolve/resolvable_test.go b/v2/pkg/engine/resolve/resolvable_test.go index aea4e78eff..d592298d9c 100644 --- a/v2/pkg/engine/resolve/resolvable_test.go +++ b/v2/pkg/engine/resolve/resolvable_test.go @@ -824,7 +824,7 @@ func BenchmarkResolvable_Resolve(b *testing.B) { b.SetBytes(int64(len(expected))) b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { out.Reset() err = res.Resolve(context.Background(), object, nil, out) if err != nil { @@ -910,7 +910,7 @@ func BenchmarkResolvable_ResolveWithErrorBubbleUp(b *testing.B) { b.SetBytes(int64(len(expected))) b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { out.Reset() err = res.Resolve(context.Background(), object, nil, out) if err != nil { diff --git a/v2/pkg/engine/resolve/resolve.go b/v2/pkg/engine/resolve/resolve.go index f735752ef9..5e7756f6a2 100644 --- a/v2/pkg/engine/resolve/resolve.go +++ b/v2/pkg/engine/resolve/resolve.go @@ -10,12 +10,15 @@ import ( "io" "net/http" "runtime" + "strconv" + "strings" "time" "github.com/buger/jsonparser" "github.com/pkg/errors" "go.uber.org/atomic" + "github.com/wundergraph/astjson" "github.com/wundergraph/go-arena" "github.com/wundergraph/graphql-go-tools/v2/pkg/internal/xcontext" @@ -94,6 +97,23 @@ func (r *Resolver) SetAsyncErrorWriter(w AsyncErrorWriter) { r.asyncErrorWriter = w } +// CacheCircuitBreakerOpen returns true if the circuit breaker for the named cache +// is currently open (blocking L2 operations). Returns false if the cache doesn't +// exist or has no circuit breaker configured. +func (r *Resolver) CacheCircuitBreakerOpen(cacheName string) bool { + if r.options.Caches == nil { + return false + } + cache, ok := r.options.Caches[cacheName] + if !ok { + return false + } + if cb, ok := cache.(*circuitBreakerCache); ok { + return cb.state.isOpen() + } + return false +} + type tools struct { resolvable *Resolvable loader *Loader @@ -187,6 +207,18 @@ type ResolverOptions struct { ValidateRequiredExternalFields bool + Caches map[string]LoaderCache + + // CacheCircuitBreakers configures per-cache circuit breakers. + // Map key must match a key in Caches. Entries for missing cache names are ignored. + // When a breaker trips (consecutive failures >= threshold), all L2 operations for + // that cache are skipped until the cooldown period elapses. + CacheCircuitBreakers map[string]CircuitBreakerConfig + + // EntityCacheConfigs maps subgraphName → entityTypeName → config. + // Used by extensions-based cache invalidation to look up cache settings at runtime. + EntityCacheConfigs map[string]map[string]*EntityCacheInvalidationConfig + // SubgraphRequestDeduplicationShardCount defines the number of shards to use for subgraph request deduplication SubgraphRequestDeduplicationShardCount int // InboundRequestDeduplicationShardCount defines the number of shards to use for inbound request deduplication @@ -195,6 +227,26 @@ type ResolverOptions struct { // and will override any values set for those options // using runtime.GOMAXPROCS(0) allows the deduplication to scale with the CPU resources available to the process SetDeduplicationShardCountToGOMAXPROCS bool + + // OnErrorEnabled enables the onError feature (request extension + __service introspection). + // When false (default), the feature is completely invisible: + // - onError request extensions are silently ignored + // - __service introspection is not available + // - The server behaves exactly as if the feature doesn't exist + OnErrorEnabled bool + + // DefaultErrorBehavior is the default error behavior when onError is not specified or invalid. + // Invalid values silently fall back to this default. + // Only effective when OnErrorEnabled is true. + DefaultErrorBehavior ErrorBehavior + + // OnSubscriptionCacheWrite is called when a subscription populates the L2 cache. + // Since subscriptions run outside per-request analytics, this callback allows + // the router to record cache write events for metrics/dashboards. + OnSubscriptionCacheWrite func(event CacheWriteEvent) + + // OnSubscriptionCacheInvalidate is called when a subscription invalidates L2 cache entries. + OnSubscriptionCacheInvalidate func(entityType string, keys []string) } // New returns a new Resolver. ctx.Done() is used to cancel all active subscriptions and streams. @@ -259,6 +311,9 @@ func New(ctx context.Context, options ResolverOptions) *Resolver { options.InboundRequestDeduplicationShardCount = n } + // Wrap caches with circuit breakers where configured + options.Caches = wrapCachesWithCircuitBreakers(options.Caches, options.CacheCircuitBreakers) + resolver := &Resolver{ ctx: ctx, options: options, @@ -308,6 +363,8 @@ func newTools(options ResolverOptions, allowedExtensionFields map[string]struct{ validateRequiredExternalFields: options.ValidateRequiredExternalFields, singleFlight: sf, jsonArena: a, + caches: options.Caches, + entityCacheConfigs: options.EntityCacheConfigs, }, } } @@ -321,41 +378,7 @@ type GraphQLResolveInfo struct { ResolveDeduplicated bool } -func (r *Resolver) ResolveGraphQLResponse(ctx *Context, response *GraphQLResponse, data []byte, writer io.Writer) (*GraphQLResolveInfo, error) { - resp := &GraphQLResolveInfo{} - - start := time.Now() - <-r.maxConcurrency - resp.ResolveAcquireWaitTime = time.Since(start) - defer func() { - r.maxConcurrency <- struct{}{} - }() - - t := newTools(r.options, r.allowedErrorExtensionFields, r.allowedErrorFields, r.subgraphRequestSingleFlight, nil) - - err := t.resolvable.Init(ctx, data, response.Info.OperationType) - if err != nil { - return nil, err - } - - if !ctx.ExecutionOptions.SkipLoader { - err = t.loader.LoadGraphQLResponseData(ctx, response, t.resolvable) - if err != nil { - return nil, err - } - } - - err = t.resolvable.Resolve(ctx.ctx, response.Data, response.Fetches, writer) - if err != nil { - return nil, err - } - - ctx.ActualListSizes = t.resolvable.actualListSizes - - return resp, err -} - -func (r *Resolver) ArenaResolveGraphQLResponse(ctx *Context, response *GraphQLResponse, writer io.Writer) (*GraphQLResolveInfo, error) { +func (r *Resolver) ResolveGraphQLResponse(ctx *Context, response *GraphQLResponse, writer io.Writer) (*GraphQLResolveInfo, error) { resp := &GraphQLResolveInfo{} inflight, err := r.inboundRequestSingleFlight.GetOrCreate(ctx, response) @@ -385,10 +408,16 @@ func (r *Resolver) ArenaResolveGraphQLResponse(ctx *Context, response *GraphQLRe // we're intentionally not using defer Release to have more control over the timing (see below) t := newTools(r.options, r.allowedErrorExtensionFields, r.allowedErrorFields, r.subgraphRequestSingleFlight, resolveArena.Arena) + releaseResolveArena := func() { + t.resolvable.Reset() + t.loader.Free() + r.resolveArenaPool.Release(resolveArena) + } + err = t.resolvable.Init(ctx, nil, response.Info.OperationType) if err != nil { r.inboundRequestSingleFlight.FinishErr(inflight, err) - r.resolveArenaPool.Release(resolveArena) + releaseResolveArena() return nil, err } @@ -396,7 +425,7 @@ func (r *Resolver) ArenaResolveGraphQLResponse(ctx *Context, response *GraphQLRe err = t.loader.LoadGraphQLResponseData(ctx, response, t.resolvable) if err != nil { r.inboundRequestSingleFlight.FinishErr(inflight, err) - r.resolveArenaPool.Release(resolveArena) + releaseResolveArena() return nil, err } } @@ -407,15 +436,20 @@ func (r *Resolver) ArenaResolveGraphQLResponse(ctx *Context, response *GraphQLRe err = t.resolvable.Resolve(ctx.ctx, response.Data, response.Fetches, buf) if err != nil { r.inboundRequestSingleFlight.FinishErr(inflight, err) - r.resolveArenaPool.Release(resolveArena) + releaseResolveArena() r.responseBufferPool.Release(responseArena) return nil, err } + // Transfer ownership of the actualListSizes map to the caller before + // releaseResolveArena() invokes Resolvable.Reset(), which deletes every + // entry from the map in place — it would otherwise empty the same map + // the caller now holds (Go maps are reference types). ctx.ActualListSizes = t.resolvable.actualListSizes + t.resolvable.actualListSizes = nil // first release resolverArena // all data is resolved and written into the response arena - r.resolveArenaPool.Release(resolveArena) + releaseResolveArena() // next we write back to the client // this includes flushing and syscalls // as such, it can take some time @@ -436,6 +470,12 @@ func (r *Resolver) ArenaResolveGraphQLResponse(ctx *Context, response *GraphQLRe return resp, err } +// Deprecated: use ResolveGraphQLResponse instead. This wrapper is kept for +// backwards compatibility and will be removed in a future release. +func (r *Resolver) ArenaResolveGraphQLResponse(ctx *Context, response *GraphQLResponse, writer io.Writer) (*GraphQLResolveInfo, error) { + return r.ResolveGraphQLResponse(ctx, response, writer) +} + type trigger struct { id uint64 cancel context.CancelFunc @@ -443,6 +483,12 @@ type trigger struct { // initialized is set to true when the trigger is started and initialized initialized bool updater *subscriptionUpdater + // cacheConfig is computed once at trigger creation from the first subscription. + // All subscriptions on a trigger share the same plan (and hence the same + // cache config) because the trigger ID is derived from hash(input + headers). + // Different plans produce different inputs, which produce different triggers. + // nil means no entity cache population is configured for this trigger. + cacheConfig *triggerEntityCacheConfig } func (t *trigger) subscriptionIds() map[context.Context]SubscriptionIdentifier { @@ -577,9 +623,13 @@ func (r *Resolver) executeSubscriptionUpdate(resolveCtx *Context, sub *sub, shar resolveArena := r.resolveArenaPool.Acquire(resolveCtx.Request.ID) t := newTools(r.options, r.allowedErrorExtensionFields, r.allowedErrorFields, r.subgraphRequestSingleFlight, resolveArena.Arena) + defer func() { + t.resolvable.Reset() + t.loader.Free() + r.resolveArenaPool.Release(resolveArena) + }() if err := t.resolvable.InitSubscription(resolveCtx, input, sub.resolve.Trigger.PostProcessing); err != nil { - r.resolveArenaPool.Release(resolveArena) r.asyncErrorWriter.WriteError(resolveCtx, err, sub.resolve.Response, sub.writer) if r.options.Debug { fmt.Printf("resolver:trigger:subscription:init:failed:%d\n", sub.id.SubscriptionID) @@ -591,7 +641,6 @@ func (r *Resolver) executeSubscriptionUpdate(resolveCtx *Context, sub *sub, shar } if err := t.loader.LoadGraphQLResponseData(resolveCtx, sub.resolve.Response, t.resolvable); err != nil { - r.resolveArenaPool.Release(resolveArena) r.asyncErrorWriter.WriteError(resolveCtx, err, sub.resolve.Response, sub.writer) if r.options.Debug { fmt.Printf("resolver:trigger:subscription:load:failed:%d\n", sub.id.SubscriptionID) @@ -603,7 +652,6 @@ func (r *Resolver) executeSubscriptionUpdate(resolveCtx *Context, sub *sub, shar } if err := t.resolvable.Resolve(resolveCtx.ctx, sub.resolve.Response.Data, sub.resolve.Response.Fetches, sub.writer); err != nil { - r.resolveArenaPool.Release(resolveArena) r.asyncErrorWriter.WriteError(resolveCtx, err, sub.resolve.Response, sub.writer) if r.options.Debug { fmt.Printf("resolver:trigger:subscription:resolve:failed:%d\n", sub.id.SubscriptionID) @@ -614,8 +662,6 @@ func (r *Resolver) executeSubscriptionUpdate(resolveCtx *Context, sub *sub, shar return } - r.resolveArenaPool.Release(resolveArena) - if err := sub.writer.Flush(); err != nil { // If flush fails (e.g. client disconnected), remove the subscription. _ = r.AsyncUnsubscribeSubscription(sub.id) @@ -634,6 +680,244 @@ func (r *Resolver) executeSubscriptionUpdate(resolveCtx *Context, sub *sub, shar } } +// triggerEntityCacheConfig holds the minimal config needed for the +// async trigger-level entity cache goroutine. +type triggerEntityCacheConfig struct { + pop *SubscriptionEntityCachePopulation + resolveCtx *Context + postProcess PostProcessingConfiguration +} + +// buildTriggerCacheConfig checks a subscription's cache configuration and returns +// a triggerEntityCacheConfig if entity cache population is configured and all +// preconditions are met. Called once at trigger creation time. +func (r *Resolver) buildTriggerCacheConfig(c *Context, s *sub) *triggerEntityCacheConfig { + pop := s.resolve.EntityCachePopulation + if pop == nil || pop.CacheKeyTemplate == nil { + return nil + } + if !c.ExecutionOptions.Caching.EnableL2Cache { + return nil + } + if _, ok := r.options.Caches[pop.CacheName]; !ok { + return nil + } + return &triggerEntityCacheConfig{ + pop: pop, + resolveCtx: c, + postProcess: s.resolve.Trigger.PostProcessing, + } +} + +// handleTriggerEntityCache performs the L2 cache operation (set or delete) for +// root entities received via a subscription event. This is the trigger-level +// version that runs once per trigger event instead of once per subscription. +// +// THREADING: This method runs in a dedicated goroutine (via performTriggerEntityCacheAsync). +// It reads config.resolveCtx which was captured at subscription creation time. This is safe +// because the accessed fields (Request.ID, SubgraphHeadersBuilder, ExecutionOptions, Variables, +// RemapVariables) are not mutated after subscription creation. Do NOT write to resolveCtx from here. +func (r *Resolver) handleTriggerEntityCache(config *triggerEntityCacheConfig, data []byte) { + cache, ok := r.options.Caches[config.pop.CacheName] + if !ok { + return + } + + // Get the global prefix and subgraph header prefix for cache key isolation. + // Mirrors prepareCacheKeys(): global prefix → header hash prefix → interceptor. + var prefix string + globalPrefix := config.resolveCtx.ExecutionOptions.Caching.GlobalCacheKeyPrefix + if config.pop.IncludeSubgraphHeaderPrefix && config.resolveCtx.SubgraphHeadersBuilder != nil { + _, hash := config.resolveCtx.SubgraphHeadersBuilder.HeadersForSubgraph(config.pop.DataSourceName) + var buf [20]byte + b := strconv.AppendUint(buf[:0], hash, 10) + if globalPrefix != "" { + prefix = globalPrefix + ":" + string(b) + } else { + prefix = string(b) + } + } else if globalPrefix != "" { + prefix = globalPrefix + } + + // We need a temporary resolvable to parse the subscription data and extract entity items. + resolveArena := r.resolveArenaPool.Acquire(config.resolveCtx.Request.ID) + t := newTools(r.options, r.allowedErrorExtensionFields, r.allowedErrorFields, r.subgraphRequestSingleFlight, resolveArena.Arena) + defer func() { + t.resolvable.Reset() + t.loader.Free() + r.resolveArenaPool.Release(resolveArena) + }() + if err := t.resolvable.InitSubscription(config.resolveCtx, data, config.postProcess); err != nil { + return + } + + entityData := t.resolvable.data + if entityData == nil { + return + } + if config.pop.SubscriptionFieldName != "" { + entityData = entityData.Get(config.pop.SubscriptionFieldName) + } + if entityData == nil { + return + } + + // Collect entity items (single entity or array of entities) + var items []*astjson.Value + if entityData.Type() == astjson.TypeArray { + items = entityData.GetArray() + } else if entityData.Type() == astjson.TypeObject { + items = []*astjson.Value{entityData} + } + if len(items) == 0 { + return + } + + // Inject __typename for cache key rendering and filter by entity type. + // Two cases: + // 1. No __typename present: inject the configured EntityTypeName so + // RenderCacheKeys can produce proper keys. + // 2. __typename present but doesn't match (union/interface return types): + // skip the item — only the configured entity type should be cached. + if config.pop.EntityTypeName != "" { + // Allocate a new slice — do NOT use items[:0] because items shares the + // backing array with entityData.GetArray(). Overwriting it would corrupt + // the parsed JSON structure. + filtered := make([]*astjson.Value, 0, len(items)) + for _, item := range items { + existing := item.Get("__typename") + if existing == nil { + item.Set(resolveArena.Arena, "__typename", astjson.StringValue(resolveArena.Arena, config.pop.EntityTypeName)) + filtered = append(filtered, item) + } else { + if string(existing.GetStringBytes()) == config.pop.EntityTypeName { + filtered = append(filtered, item) + } + } + } + items = filtered + if len(items) == 0 { + return + } + } + + // Render cache keys + cacheKeys, err := config.pop.CacheKeyTemplate.RenderCacheKeys(resolveArena.Arena, config.resolveCtx, items, prefix) + if err != nil || len(cacheKeys) == 0 { + return + } + + // Apply L2CacheKeyInterceptor to match the full key construction pipeline + // used by prepareCacheKeys() and processExtensionsCacheInvalidation(). + // Without this, custom key transforms (e.g., tenant prefix) would be missing + // from subscription cache operations, causing cache key mismatches. + if interceptor := config.resolveCtx.ExecutionOptions.Caching.L2CacheKeyInterceptor; interceptor != nil { + interceptorInfo := L2CacheKeyInterceptorInfo{ + SubgraphName: config.pop.DataSourceName, + CacheName: config.pop.CacheName, + } + for _, ck := range cacheKeys { + for i, key := range ck.Keys { + ck.Keys[i] = interceptor(config.resolveCtx.ctx, key, interceptorInfo) + } + } + } + + // Use the resolver context (not client context) since this is a trigger-level operation + ctx := r.ctx + + // Copy cache key strings off the arena before releasing it. + // RenderCacheKeys allocates keys on the arena; we must copy them + // so they remain valid after the arena is released. + switch config.pop.Mode { + case SubscriptionCacheModePopulate: + entries := make([]*CacheEntry, 0, len(cacheKeys)) + for _, ck := range cacheKeys { + if len(ck.Keys) == 0 || ck.Item == nil { + continue + } + value := ck.Item.MarshalTo(nil) + entries = append(entries, &CacheEntry{ + Key: strings.Clone(ck.Keys[0]), + Value: value, + TTL: config.pop.TTL, + }) + } + // Cache errors are intentionally ignored: subscription delivery must + // not be blocked by cache failures. + if len(entries) > 0 { + _ = cache.Set(ctx, entries) + if r.options.OnSubscriptionCacheWrite != nil { + for _, entry := range entries { + r.options.OnSubscriptionCacheWrite(CacheWriteEvent{ + CacheKey: entry.Key, + EntityType: config.pop.EntityTypeName, + ByteSize: len(entry.Value), + DataSource: config.pop.DataSourceName, + CacheLevel: CacheLevelL2, + TTL: config.pop.TTL, + Source: CacheSourceSubscription, + }) + } + } + } + case SubscriptionCacheModeInvalidate: + keys := make([]string, 0, len(cacheKeys)) + for _, ck := range cacheKeys { + if len(ck.Keys) > 0 { + keys = append(keys, strings.Clone(ck.Keys[0])) + } + } + if len(keys) > 0 { + _ = cache.Delete(ctx, keys) + if r.options.OnSubscriptionCacheInvalidate != nil { + r.options.OnSubscriptionCacheInvalidate(config.pop.EntityTypeName, keys) + } + } + } +} + +// performTriggerEntityCacheAsync is the goroutine entry point: runs the cache +// operation, then posts a TriggerCacheDone event back to the event loop. +func (r *Resolver) performTriggerEntityCacheAsync(triggerID uint64, id *SubscriptionIdentifier, config *triggerEntityCacheConfig, data []byte) { + r.handleTriggerEntityCache(config, data) + select { + case <-r.ctx.Done(): + return + case r.events <- subscriptionEvent{ + triggerID: triggerID, + kind: subscriptionEventKindTriggerCacheDone, + data: data, + id: id, + }: + } +} + +// handleTriggerCacheDone fans out the subscription update after the trigger-level +// cache operation has completed. +func (r *Resolver) handleTriggerCacheDone(event subscriptionEvent) { + trig, ok := r.triggers[event.triggerID] + if !ok { + return + } + if event.id != nil { + // Targeted update for a single subscription + for c, s := range trig.subscriptions { + if s.id != *event.id { + continue + } + r.sendUpdateToSubscription(event.data, c, s) + break + } + } else { + // Broadcast to all subscriptions + for c, s := range trig.subscriptions { + r.sendUpdateToSubscription(event.data, c, s) + } + } +} + // processEvents maintains the single threaded event loop that processes all events func (r *Resolver) processEvents() { done := r.ctx.Done() @@ -662,13 +946,13 @@ func (r *Resolver) handleEvent(event subscriptionEvent) { case subscriptionEventKindAddSubscription: r.handleAddSubscription(event.triggerID, event.addSubscription) case subscriptionEventKindRemoveSubscription: - r.handleRemoveSubscription(event.id) + r.handleRemoveSubscription(*event.id) case subscriptionEventKindCompleteSubscription: - r.handleCompleteSubscription(event.id) + r.handleCompleteSubscription(*event.id) case subscriptionEventKindRemoveClient: r.handleRemoveClient(event.id.ConnectionID) case subscriptionEventKindUpdateSubscription: - r.handleUpdateSubscription(event.triggerID, event.data, event.id) + r.handleUpdateSubscription(event.triggerID, event.data, *event.id) case subscriptionEventKindTriggerUpdate: r.handleTriggerUpdate(event.triggerID, event.data) case subscriptionEventKindTriggerComplete: @@ -677,6 +961,8 @@ func (r *Resolver) handleEvent(event subscriptionEvent) { r.handleTriggerInitialized(event.triggerID) case subscriptionEventKindTriggerClose: r.handleTriggerClose(event) + case subscriptionEventKindTriggerCacheDone: + r.handleTriggerCacheDone(event) case subscriptionEventKindUnknown: panic("unknown event") } @@ -717,7 +1003,7 @@ func (r *Resolver) handleHeartbeat(sub *sub) { func (r *Resolver) handleTriggerClose(s subscriptionEvent) { if r.options.Debug { - fmt.Printf("resolver:trigger:shutdown:%d:%d\n", s.triggerID, s.id.SubscriptionID) + fmt.Printf("resolver:trigger:shutdown:%d\n", s.triggerID) } r.closeTrigger(s.triggerID, s.closeKind) @@ -836,6 +1122,7 @@ func (r *Resolver) handleAddSubscription(triggerID uint64, add *addSubscription) subscriptions: make(map[*Context]*sub), cancel: cancel, updater: updater, + cacheConfig: r.buildTriggerCacheConfig(add.ctx, s), } r.triggers[triggerID] = trig trig.subscriptions[add.ctx] = s @@ -989,9 +1276,20 @@ func (r *Resolver) handleTriggerUpdate(id uint64, data []byte) { fmt.Printf("resolver:trigger:update:%d\n", id) } - for c, s := range trig.subscriptions { - r.sendUpdateToSubscription(data, c, s) + // Fast path: no entity cache config → fan out directly + if trig.cacheConfig == nil { + for c, s := range trig.subscriptions { + r.sendUpdateToSubscription(data, c, s) + } + return } + + // Slow path: populate L2 cache BEFORE fanning out to subscriptions. + // The cache must be populated first because child entity fetches check + // L2 cache. If we fanned out immediately, those fetches would miss. + dataCopy := make([]byte, len(data)) + copy(dataCopy, data) + go r.performTriggerEntityCacheAsync(id, nil, trig.cacheConfig, dataCopy) } func (r *Resolver) handleUpdateSubscription(id uint64, data []byte, subIdentifier SubscriptionIdentifier) { @@ -1004,13 +1302,22 @@ func (r *Resolver) handleUpdateSubscription(id uint64, data []byte, subIdentifie fmt.Printf("resolver:trigger:subscription:update:%d:%d,%d\n", id, subIdentifier.ConnectionID, subIdentifier.SubscriptionID) } - for c, s := range trig.subscriptions { - if s.id != subIdentifier { - continue + // Fast path: no entity cache config → fan out directly + if trig.cacheConfig == nil { + for c, s := range trig.subscriptions { + if s.id != subIdentifier { + continue + } + r.sendUpdateToSubscription(data, c, s) + break } - r.sendUpdateToSubscription(data, c, s) - break + return } + + // Slow path: populate L2 cache BEFORE fanning out (see handleTriggerUpdate). + dataCopy := make([]byte, len(data)) + copy(dataCopy, data) + go r.performTriggerEntityCacheAsync(id, &subIdentifier, trig.cacheConfig, dataCopy) } func (r *Resolver) sendUpdateToSubscription(data []byte, c *Context, s *sub) { @@ -1175,7 +1482,7 @@ func (r *Resolver) AsyncCompleteSubscription(id SubscriptionIdentifier) error { case <-r.ctx.Done(): return r.ctx.Err() case r.events <- subscriptionEvent{ - id: id, + id: &id, kind: subscriptionEventKindCompleteSubscription, }: } @@ -1187,7 +1494,7 @@ func (r *Resolver) AsyncUnsubscribeSubscription(id SubscriptionIdentifier) error case <-r.ctx.Done(): return r.ctx.Err() case r.events <- subscriptionEvent{ - id: id, + id: &id, kind: subscriptionEventKindRemoveSubscription, }: default: @@ -1197,7 +1504,7 @@ func (r *Resolver) AsyncUnsubscribeSubscription(id SubscriptionIdentifier) error case <-r.ctx.Done(): return case r.events <- subscriptionEvent{ - id: id, + id: &id, kind: subscriptionEventKindRemoveSubscription, }: } @@ -1211,7 +1518,7 @@ func (r *Resolver) AsyncUnsubscribeClient(connectionID int64) error { case <-r.ctx.Done(): return r.ctx.Err() case r.events <- subscriptionEvent{ - id: SubscriptionIdentifier{ + id: &SubscriptionIdentifier{ ConnectionID: connectionID, }, kind: subscriptionEventKindRemoveClient, @@ -1223,7 +1530,7 @@ func (r *Resolver) AsyncUnsubscribeClient(connectionID int64) error { case <-r.ctx.Done(): return case r.events <- subscriptionEvent{ - id: SubscriptionIdentifier{ + id: &SubscriptionIdentifier{ ConnectionID: connectionID, }, kind: subscriptionEventKindRemoveClient, @@ -1352,7 +1659,7 @@ func (r *Resolver) ResolveGraphQLSubscription(ctx *Context, subscription *GraphQ r.events <- subscriptionEvent{ triggerID: triggerID, kind: subscriptionEventKindRemoveSubscription, - id: id, + id: &id, } return nil @@ -1482,7 +1789,7 @@ func (s *subscriptionUpdater) UpdateSubscription(id SubscriptionIdentifier, data triggerID: s.triggerID, kind: subscriptionEventKindUpdateSubscription, data: data, - id: id, + id: &id, }: } } @@ -1552,7 +1859,7 @@ func (s *subscriptionUpdater) CloseSubscription(kind SubscriptionCloseKind, id S triggerID: s.triggerID, kind: subscriptionEventKindRemoveSubscription, closeKind: kind, - id: id, + id: &id, }: if s.debug { fmt.Printf("resolver:subscription_updater:close:sent_event:%d\n", s.triggerID) @@ -1561,8 +1868,11 @@ func (s *subscriptionUpdater) CloseSubscription(kind SubscriptionCloseKind, id S } type subscriptionEvent struct { - triggerID uint64 - id SubscriptionIdentifier + triggerID uint64 + // id identifies the target subscription. nil means "all subscriptions on the trigger" + // (used by TriggerCacheDone: the cache operation runs once per trigger, so the + // resulting data update is broadcast to all subscriptions sharing that trigger). + id *SubscriptionIdentifier kind subscriptionEventKind data []byte addSubscription *addSubscription @@ -1593,6 +1903,7 @@ const ( subscriptionEventKindTriggerInitialized subscriptionEventKindTriggerClose subscriptionEventKindUpdateSubscription + subscriptionEventKindTriggerCacheDone ) type SubscriptionUpdater interface { diff --git a/v2/pkg/engine/resolve/resolve_arena_gc_test.go b/v2/pkg/engine/resolve/resolve_arena_gc_test.go index e2e1534587..83318a2d10 100644 --- a/v2/pkg/engine/resolve/resolve_arena_gc_test.go +++ b/v2/pkg/engine/resolve/resolve_arena_gc_test.go @@ -21,7 +21,7 @@ import ( // keeping an object alive, the GC will collect it and subsequent access will // SIGSEGV or return corrupted data. func forceGC() { - for i := 0; i < 3; i++ { + for range 3 { runtime.GC() } } @@ -48,7 +48,7 @@ func newTestResolver(t *testing.T, opts ResolverOptions) *Resolver { func resolveWithGCPressure(t *testing.T, resolver *Resolver, setupCtx func() *Context, setupResp func() *GraphQLResponse) string { t.Helper() var lastOutput string - for i := 0; i < gcIterations; i++ { + for i := range gcIterations { response := setupResp() resolveCtx := setupCtx() forceGC() @@ -72,8 +72,7 @@ func TestArenaGCSafety_FetchError(t *testing.T) { return resp }, ) - assert.Contains(t, output, `"errors"`) - assert.Contains(t, output, `"data"`) + assert.Equal(t, `{"errors":[{"message":"Failed to fetch from Subgraph 'testService' at Path 'query'."}],"data":{"field":null}}`, output) } func TestArenaGCSafety_EmptyResponse(t *testing.T) { @@ -85,7 +84,7 @@ func TestArenaGCSafety_EmptyResponse(t *testing.T) { return resp }, ) - assert.Contains(t, output, `"errors"`) + assert.Equal(t, `{"errors":[{"message":"Failed to fetch from Subgraph 'testService' at Path 'query', Reason: empty response."}],"data":{"field":null}}`, output) } func TestArenaGCSafety_InvalidJSON(t *testing.T) { @@ -97,7 +96,7 @@ func TestArenaGCSafety_InvalidJSON(t *testing.T) { return resp }, ) - assert.Contains(t, output, `"errors"`) + assert.Equal(t, `{"errors":[{"message":"Failed to fetch from Subgraph 'testService' at Path 'query', Reason: invalid JSON."}],"data":{"field":null}}`, output) } func TestArenaGCSafety_InvalidShape(t *testing.T) { @@ -109,7 +108,7 @@ func TestArenaGCSafety_InvalidShape(t *testing.T) { return resp }, ) - assert.Contains(t, output, `"errors"`) + assert.Equal(t, `{"errors":[{"message":"Failed to fetch from Subgraph 'testService' at Path 'query', Reason: no data or errors in response."}],"data":{"field":null}}`, output) } func TestArenaGCSafety_SubgraphErrorsWrapMode(t *testing.T) { @@ -121,7 +120,7 @@ func TestArenaGCSafety_SubgraphErrorsWrapMode(t *testing.T) { return resp }, ) - assert.Contains(t, output, `"errors"`) + assert.Equal(t, `{"errors":[{"message":"Failed to fetch from Subgraph 'testService' at Path 'query'.","extensions":{"errors":[{"message":"downstream error"}]}}],"data":{"field":null}}`, output) } func TestArenaGCSafety_SubgraphErrorsPassthrough(t *testing.T) { @@ -135,8 +134,7 @@ func TestArenaGCSafety_SubgraphErrorsPassthrough(t *testing.T) { return resp }, ) - assert.Contains(t, output, `"errors"`) - assert.Contains(t, output, `downstream error`) + assert.Equal(t, `{"errors":[{"message":"downstream error"}],"data":{"field":null}}`, output) } func TestArenaGCSafety_SubgraphErrorsWithExtensionCode(t *testing.T) { @@ -152,8 +150,7 @@ func TestArenaGCSafety_SubgraphErrorsWithExtensionCode(t *testing.T) { }, ) // The extension code is set on errors; verify the output is valid - assert.Contains(t, output, `"errors"`) - assert.Contains(t, output, `downstream error`) + assert.Equal(t, `{"errors":[{"message":"downstream error"}],"data":{"field":null}}`, output) } func TestArenaGCSafety_SubgraphErrorsWithServiceName(t *testing.T) { @@ -168,7 +165,7 @@ func TestArenaGCSafety_SubgraphErrorsWithServiceName(t *testing.T) { return resp }, ) - assert.Contains(t, output, `testService`) + assert.Equal(t, `{"errors":[{"message":"downstream error","extensions":{"serviceName":"testService"}}],"data":{"field":null}}`, output) } func TestArenaGCSafety_SubgraphErrorsWithExtensionCodeAndServiceName(t *testing.T) { @@ -184,8 +181,7 @@ func TestArenaGCSafety_SubgraphErrorsWithExtensionCodeAndServiceName(t *testing. return resp }, ) - assert.Contains(t, output, `"errors"`) - assert.Contains(t, output, `testService`) + assert.Equal(t, `{"errors":[{"message":"downstream error","extensions":{"serviceName":"testService"}}],"data":{"field":null}}`, output) } func TestArenaGCSafety_AuthorizationRejected(t *testing.T) { @@ -209,8 +205,7 @@ func TestArenaGCSafety_AuthorizationRejected(t *testing.T) { return resp }, ) - assert.Contains(t, output, `"errors"`) - assert.Contains(t, output, `Unauthorized`) + assert.Equal(t, `{"errors":[{"message":"Unauthorized request to Subgraph 'testService' at Path 'query', Reason: not allowed.","extensions":{"code":"UNAUTHORIZED_FIELD_OR_TYPE"}}],"data":{"field":null}}`, output) } func TestArenaGCSafety_RateLimitRejected(t *testing.T) { @@ -231,8 +226,7 @@ func TestArenaGCSafety_RateLimitRejected(t *testing.T) { return resp }, ) - assert.Contains(t, output, `"errors"`) - assert.Contains(t, output, `Rate limit`) + assert.Equal(t, `{"errors":[{"message":"Rate limit exceeded for Subgraph 'testService' at Path 'query', Reason: rate limit exceeded."}],"data":{"field":null}}`, output) } func TestArenaGCSafety_RateLimitWithExtensionCode(t *testing.T) { @@ -256,7 +250,7 @@ func TestArenaGCSafety_RateLimitWithExtensionCode(t *testing.T) { return resp }, ) - assert.Contains(t, output, `RATE_LIMIT_EXCEEDED`) + assert.Equal(t, `{"errors":[{"message":"Rate limit exceeded for Subgraph 'testService' at Path 'query', Reason: rate limit exceeded.","extensions":{"code":"RATE_LIMIT_EXCEEDED"}}],"data":{"field":null}}`, output) } // --- Successful data merge tests --- @@ -270,8 +264,7 @@ func TestArenaGCSafety_MergeResult(t *testing.T) { return resp }, ) - assert.Contains(t, output, `hello world`) - assert.NotContains(t, output, `"errors"`) + assert.Equal(t, `{"data":{"field":"hello world"}}`, output) } // --- Resolvable SetNull path tests --- @@ -322,7 +315,7 @@ func TestArenaGCSafety_NullableFieldNull(t *testing.T) { } }, ) - assert.Contains(t, output, `"obj":null`) + assert.Equal(t, `{"data":{"obj":null}}`, output) } func TestArenaGCSafety_NonNullableFieldNull(t *testing.T) { @@ -375,8 +368,7 @@ func TestArenaGCSafety_NonNullableFieldNull(t *testing.T) { }, ) // The non-nullable field being null should bubble up to null the wrapper object - assert.Contains(t, output, `"wrapper":null`) - assert.Contains(t, output, `"errors"`) + assert.Equal(t, `{"errors":[{"message":"Cannot return null for non-nullable field 'Query.wrapper.name'.","path":["wrapper","name"]}],"data":{"wrapper":null}}`, output) } // --- Authorization skip errors (TrueValue) test --- @@ -447,8 +439,7 @@ func TestArenaGCSafety_AuthRejectionNullableField(t *testing.T) { } }, ) - assert.Contains(t, output, `"name"`) - assert.Contains(t, output, `"data"`) + assert.Equal(t, `{"data":{"user":{"name":"Alice","secret":"classified"}}}`, output) } // --- Nested fetch tree tests --- @@ -515,8 +506,7 @@ func TestArenaGCSafety_SequenceWithErrorThenSuccess(t *testing.T) { } }, ) - assert.Contains(t, output, `first fetch failed`) - assert.Contains(t, output, `ok`) + assert.Equal(t, `{"errors":[{"message":"first fetch failed"}],"data":{"field":null,"other":"ok"}}`, output) } func TestArenaGCSafety_ParallelFetches(t *testing.T) { @@ -590,8 +580,7 @@ func TestArenaGCSafety_ParallelFetches(t *testing.T) { } }, ) - assert.Contains(t, output, `Bob`) - assert.Contains(t, output, `Widget`) + assert.Equal(t, `{"data":{"user":{"name":"Bob"},"product":{"title":"Widget"}}}`, output) } // --- Array nullability tests (SetNull for arrays) --- @@ -641,7 +630,7 @@ func TestArenaGCSafety_NullableArrayWithNullItem(t *testing.T) { }, ) // Non-nullable item being null should propagate to null the nullable array - assert.Contains(t, output, `"items":null`) + assert.Equal(t, `{"errors":[{"message":"Cannot return null for non-nullable field 'Query.items'.","path":["items",0]}],"data":{"items":null}}`, output) } // --- Mixed success and error responses --- @@ -660,8 +649,7 @@ func TestArenaGCSafety_PartialDataWithErrors(t *testing.T) { return resp }, ) - assert.Contains(t, output, `partial value`) - assert.Contains(t, output, `partial failure`) + assert.Equal(t, `{"errors":[{"message":"partial failure","path":["field"]}],"data":{"field":"partial value"}}`, output) } // --- Large/stress tests --- @@ -674,7 +662,7 @@ func TestArenaGCSafety_ManyErrors(t *testing.T) { // Build a response with 20 errors var errMsgs []string - for i := 0; i < 20; i++ { + for range 20 { errMsgs = append(errMsgs, `{"message":"error `+strings.Repeat("x", 100)+`"}`) } errorsJSON := "[" + strings.Join(errMsgs, ",") + "]" @@ -686,7 +674,7 @@ func TestArenaGCSafety_ManyErrors(t *testing.T) { return resp }, ) - assert.Contains(t, output, `"errors"`) + assert.Equal(t, `{"errors":[{"message":"error xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"},{"message":"error xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"},{"message":"error xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"},{"message":"error xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"},{"message":"error xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"},{"message":"error xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"},{"message":"error xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"},{"message":"error xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"},{"message":"error xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"},{"message":"error xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"},{"message":"error xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"},{"message":"error xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"},{"message":"error xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"},{"message":"error xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"},{"message":"error xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"},{"message":"error xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"},{"message":"error xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"},{"message":"error xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"},{"message":"error xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"},{"message":"error xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}],"data":{"field":null}}`, output) } // --- Verify JSON validity --- @@ -722,14 +710,14 @@ func TestArenaGCSafety_OutputIsValidJSON(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { resolver := newTestResolver(t, tc.opts) - for i := 0; i < gcIterations; i++ { + for i := range gcIterations { resp, _ := gcTestResponse(FakeDataSource(tc.data)) ctx := NewContext(context.Background()) forceGC() buf := &bytes.Buffer{} _, err := resolver.ArenaResolveGraphQLResponse(ctx, resp, buf) require.NoError(t, err) - var parsed map[string]interface{} + var parsed map[string]any require.NoError(t, json.Unmarshal(buf.Bytes(), &parsed), "invalid JSON on iteration %d: %s", i, buf.String()) } }) @@ -785,8 +773,7 @@ func TestArenaGCSafety_StatusCodeFallback(t *testing.T) { return resp }, ) - assert.Contains(t, output, `"errors"`) - assert.Contains(t, output, `503`) + assert.Equal(t, `{"errors":[{"message":"503: Service Unavailable","extensions":{"statusCode":503}}],"data":{"field":null}}`, output) } func TestArenaGCSafety_ApolloRouterCompatError(t *testing.T) { @@ -805,7 +792,7 @@ func TestArenaGCSafety_ApolloRouterCompatError(t *testing.T) { return resp }, ) - assert.Contains(t, output, `SUBREQUEST_HTTP_ERROR`) + assert.Equal(t, `{"errors":[{"message":"HTTP fetch failed from 'testService': 500: Internal Server Error","path":[],"extensions":{"code":"SUBREQUEST_HTTP_ERROR","service":"testService","reason":"500: Internal Server Error","http":{"status":500}}},{"message":"bad","extensions":{"statusCode":500}}],"data":{"field":null}}`, output) } func TestArenaGCSafety_SubgraphStatusCodeInExtensions(t *testing.T) { @@ -823,8 +810,7 @@ func TestArenaGCSafety_SubgraphStatusCodeInExtensions(t *testing.T) { return resp }, ) - assert.Contains(t, output, `"statusCode"`) - assert.Contains(t, output, `502`) + assert.Equal(t, `{"errors":[{"message":"fail","extensions":{"statusCode":502}}],"data":{"field":null}}`, output) } // --- Group B: Loader error filtering codepaths --- @@ -842,8 +828,7 @@ func TestArenaGCSafety_OmitErrorExtensions(t *testing.T) { return resp }, ) - assert.Contains(t, output, `"errors"`) - assert.NotContains(t, output, `"extensions"`) + assert.Equal(t, `{"errors":[{"message":"err"}],"data":{"field":null}}`, output) } func TestArenaGCSafety_OmitErrorLocations(t *testing.T) { @@ -858,7 +843,7 @@ func TestArenaGCSafety_OmitErrorLocations(t *testing.T) { return resp }, ) - assert.Contains(t, output, `"locations"`) + assert.Equal(t, `{"errors":[{"message":"err","locations":[{"line":1,"column":2}]}],"data":{"field":null}}`, output) } func TestArenaGCSafety_OmitAllErrorLocations(t *testing.T) { @@ -874,7 +859,7 @@ func TestArenaGCSafety_OmitAllErrorLocations(t *testing.T) { return resp }, ) - assert.NotContains(t, output, `"locations"`) + assert.Equal(t, `{"errors":[{"message":"err"}],"data":{"field":null}}`, output) } func TestArenaGCSafety_AllowedExtensionFields(t *testing.T) { @@ -890,8 +875,7 @@ func TestArenaGCSafety_AllowedExtensionFields(t *testing.T) { return resp }, ) - assert.Contains(t, output, `"code"`) - assert.NotContains(t, output, `"secret"`) + assert.Equal(t, `{"errors":[{"message":"err","extensions":{"code":"X"}}],"data":{"field":null}}`, output) } func TestArenaGCSafety_WrapModeWithPropagation(t *testing.T) { @@ -907,8 +891,7 @@ func TestArenaGCSafety_WrapModeWithPropagation(t *testing.T) { return resp }, ) - assert.Contains(t, output, `"errors"`) - assert.Contains(t, output, `inner`) + assert.Equal(t, `{"errors":[{"message":"Failed to fetch from Subgraph 'testService' at Path 'query'.","extensions":{"errors":[{"message":"inner"}]}}],"data":{"field":null}}`, output) } // --- Group C: Resolvable scalar walk functions --- @@ -925,8 +908,7 @@ func TestArenaGCSafety_BooleanField(t *testing.T) { ) }, ) - assert.Contains(t, output, `true`) - assert.NotContains(t, output, `"errors"`) + assert.Equal(t, `{"data":{"active":true}}`, output) } func TestArenaGCSafety_IntegerField(t *testing.T) { @@ -941,7 +923,7 @@ func TestArenaGCSafety_IntegerField(t *testing.T) { ) }, ) - assert.Contains(t, output, `42`) + assert.Equal(t, `{"data":{"count":42}}`, output) } func TestArenaGCSafety_FloatField(t *testing.T) { @@ -956,7 +938,7 @@ func TestArenaGCSafety_FloatField(t *testing.T) { ) }, ) - assert.Contains(t, output, `9.99`) + assert.Equal(t, `{"data":{"price":9.99}}`, output) } func TestArenaGCSafety_FloatTruncation(t *testing.T) { @@ -975,7 +957,7 @@ func TestArenaGCSafety_FloatTruncation(t *testing.T) { }, ) // Whole-number float should be truncated to int representation - assert.Contains(t, output, `"price":10`) + assert.Equal(t, `{"data":{"price":10}}`, output) } func TestArenaGCSafety_BigIntField(t *testing.T) { @@ -990,7 +972,7 @@ func TestArenaGCSafety_BigIntField(t *testing.T) { ) }, ) - assert.Contains(t, output, `9007199254740993`) + assert.Equal(t, `{"data":{"id":9007199254740993}}`, output) } func TestArenaGCSafety_ScalarField(t *testing.T) { @@ -1005,7 +987,7 @@ func TestArenaGCSafety_ScalarField(t *testing.T) { ) }, ) - assert.Contains(t, output, `"key"`) + assert.Equal(t, `{"data":{"meta":{"key":"value"}}}`, output) } func TestArenaGCSafety_EnumValid(t *testing.T) { @@ -1020,7 +1002,7 @@ func TestArenaGCSafety_EnumValid(t *testing.T) { ) }, ) - assert.Contains(t, output, `"ACTIVE"`) + assert.Equal(t, `{"data":{"status":"ACTIVE"}}`, output) } func TestArenaGCSafety_EnumInvalid(t *testing.T) { @@ -1036,7 +1018,7 @@ func TestArenaGCSafety_EnumInvalid(t *testing.T) { ) }, ) - assert.Contains(t, output, `"errors"`) + assert.Equal(t, `{"errors":[{"message":"Enum \"Status\" cannot represent value: \"UNKNOWN\"","path":["status"],"extensions":{"code":"INTERNAL_SERVER_ERROR"}}],"data":{"status":null}}`, output) } func TestArenaGCSafety_StringUnescapeResponseJson(t *testing.T) { @@ -1052,7 +1034,7 @@ func TestArenaGCSafety_StringUnescapeResponseJson(t *testing.T) { ) }, ) - assert.Contains(t, output, `nested`) + assert.Equal(t, `{"data":{"payload":{"nested":"value"}}}`, output) } func TestArenaGCSafety_CustomNode(t *testing.T) { @@ -1068,7 +1050,7 @@ func TestArenaGCSafety_CustomNode(t *testing.T) { ) }, ) - assert.Contains(t, output, `"hello"`) + assert.Equal(t, `{"data":{"custom":"hello"}}`, output) } func TestArenaGCSafety_ArrayObjectItemWalkFail(t *testing.T) { @@ -1121,8 +1103,7 @@ func TestArenaGCSafety_ArrayObjectItemWalkFail(t *testing.T) { } }, ) - assert.Contains(t, output, `"ok"`) - assert.Contains(t, output, `null`) + assert.Equal(t, `{"errors":[{"message":"Cannot return null for non-nullable field 'Query.items.name'.","path":["items",1,"name"]}],"data":{"items":[{"name":"ok"},null]}}`, output) } func TestArenaGCSafety_ValueCompletion(t *testing.T) { @@ -1174,8 +1155,7 @@ func TestArenaGCSafety_ValueCompletion(t *testing.T) { } }, ) - assert.Contains(t, output, `"extensions"`) - assert.Contains(t, output, `"valueCompletion"`) + assert.Equal(t, `{"data":{"wrapper":null},"extensions":{"valueCompletion":[{"message":"Cannot return null for non-nullable field Query.wrapper.required.","path":["wrapper","required"],"extensions":{"code":"INVALID_GRAPHQL"}}]}}`, output) } // --- Group D: Type-mismatch error paths --- @@ -1193,7 +1173,7 @@ func TestArenaGCSafety_BooleanTypeMismatch(t *testing.T) { ) }, ) - assert.Contains(t, output, `"errors"`) + assert.Equal(t, `{"errors":[{"message":"Bool cannot represent non-boolean value: \"\"not_a_bool\"\"","path":["active"]}],"data":null}`, output) } func TestArenaGCSafety_IntegerTypeMismatch(t *testing.T) { @@ -1208,7 +1188,7 @@ func TestArenaGCSafety_IntegerTypeMismatch(t *testing.T) { ) }, ) - assert.Contains(t, output, `"errors"`) + assert.Equal(t, `{"errors":[{"message":"Int cannot represent non-integer value: \"\"not_a_number\"\"","path":["count"]}],"data":null}`, output) } func TestArenaGCSafety_FloatTypeMismatch(t *testing.T) { @@ -1223,7 +1203,7 @@ func TestArenaGCSafety_FloatTypeMismatch(t *testing.T) { ) }, ) - assert.Contains(t, output, `"errors"`) + assert.Equal(t, `{"errors":[{"message":"Float cannot represent non-float value: \"\"not_a_float\"\"","path":["price"]}],"data":null}`, output) } func TestArenaGCSafety_StringTypeMismatch(t *testing.T) { @@ -1238,5 +1218,5 @@ func TestArenaGCSafety_StringTypeMismatch(t *testing.T) { ) }, ) - assert.Contains(t, output, `"errors"`) + assert.Equal(t, `{"errors":[{"message":"String cannot represent non-string value: \"123\"","path":["name"]}],"data":null}`, output) } diff --git a/v2/pkg/engine/resolve/resolve_caching_test.go b/v2/pkg/engine/resolve/resolve_caching_test.go new file mode 100644 index 0000000000..f648ddef7d --- /dev/null +++ b/v2/pkg/engine/resolve/resolve_caching_test.go @@ -0,0 +1,148 @@ +package resolve + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" +) + +// TestResolver_CachingRoundTrip verifies end-to-end resolution of a nested query with +// batch entity fetches, ensuring the full fetch tree (root + entity) produces correct JSON. +func TestResolver_CachingRoundTrip(t *testing.T) { + t.Run("nested batching single root result", testFn(func(t *testing.T, ctrl *gomock.Controller) (node *GraphQLResponse, ctx Context, expectedOutput string) { + + listingRoot := mockedDS(t, ctrl, + `{"method":"POST","url":"http://listing","body":{"query":"query{listing{__typename id name}}"}}`, + `{"data":{"listing":{"__typename":"Listing","id":1,"name":"L1"}}}`) + + nested := mockedDS(t, ctrl, + `{"method":"POST","url":"http://nested","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){__typename ... on Listing { nested { id price listing { __typename id }} }}}","variables":{"representations":[{"__typename":"Listing","id":1}]}}}`, + `{"data":{"_entities":[{"__typename":"Listing","nested":{"id":1.1,"price":123,"listing":{"__typename":"Listing","id":1}}}]}}`) + + return &GraphQLResponse{ + Fetches: Sequence( + SingleWithPath(&SingleFetch{ + InputTemplate: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://listing","body":{"query":"query{listing{__typename id name}}"}}`), + SegmentType: StaticSegmentType, + }, + }, + }, + FetchConfiguration: FetchConfiguration{ + DataSource: listingRoot, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + }, + }, "query"), + SingleWithPath(&BatchEntityFetch{ + Input: BatchInput{ + Header: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`{"method":"POST","url":"http://nested","body":{"query":"query($representations: [_Any!]!){_entities(representations: $representations){__typename ... on Listing { nested { id price listing { __typename id }} }}}","variables":{"representations":[`), + SegmentType: StaticSegmentType, + }, + }, + }, + Items: []InputTemplate{ + { + Segments: []TemplateSegment{ + { + SegmentType: VariableSegmentType, + VariableKind: ResolvableObjectVariableKind, + Renderer: NewGraphQLVariableResolveRenderer(&Object{ + Fields: []*Field{ + { + Name: []byte("__typename"), + Value: &String{ + Path: []string{"__typename"}, + }, + }, + { + Name: []byte("id"), + Value: &Integer{ + Path: []string{"id"}, + }, + }, + }, + }), + }, + }, + }, + }, + Separator: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`,`), + SegmentType: StaticSegmentType, + }, + }, + }, + Footer: InputTemplate{ + Segments: []TemplateSegment{ + { + Data: []byte(`]}}}`), + SegmentType: StaticSegmentType, + }, + }, + }, + }, + DataSource: nested, + PostProcessing: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data", "_entities"}, + }, + }, "query.listing", ObjectPath("listing")), + ), + Data: &Object{ + Fields: []*Field{ + { + Name: []byte("listing"), + Value: &Object{ + Path: []string{"listing"}, + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Integer{ + Path: []string{"id"}, + Nullable: false, + }, + }, + { + Name: []byte("name"), + Value: &String{ + Path: []string{"name"}, + Nullable: false, + }, + }, + { + Name: []byte("nested"), + Value: &Object{ + Path: []string{"nested"}, + Fields: []*Field{ + { + Name: []byte("id"), + Value: &Float{ + Path: []string{"id"}, + }, + }, + { + Name: []byte("price"), + Value: &Integer{ + Path: []string{"price"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, Context{ctx: context.Background(), Variables: nil}, `{"data":{"listing":{"id":1,"name":"L1","nested":{"id":1.1,"price":123}}}}` + })) +} diff --git a/v2/pkg/engine/resolve/resolve_federation_test.go b/v2/pkg/engine/resolve/resolve_federation_test.go index 2f894fdfc1..edc2877495 100644 --- a/v2/pkg/engine/resolve/resolve_federation_test.go +++ b/v2/pkg/engine/resolve/resolve_federation_test.go @@ -11,7 +11,7 @@ import ( ) type TestingTB interface { - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) Helper() FailNow() } diff --git a/v2/pkg/engine/resolve/resolve_mock_test.go b/v2/pkg/engine/resolve/resolve_mock_test.go index a64b7dd831..cce330fce0 100644 --- a/v2/pkg/engine/resolve/resolve_mock_test.go +++ b/v2/pkg/engine/resolve/resolve_mock_test.go @@ -46,7 +46,7 @@ func (m *MockDataSource) Load(arg0 context.Context, arg1 http.Header, arg2 []byt } // Load indicates an expected call of Load. -func (mr *MockDataSourceMockRecorder) Load(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockDataSourceMockRecorder) Load(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Load", reflect.TypeOf((*MockDataSource)(nil).Load), arg0, arg1, arg2) } @@ -61,7 +61,7 @@ func (m *MockDataSource) LoadWithFiles(arg0 context.Context, arg1 http.Header, a } // LoadWithFiles indicates an expected call of LoadWithFiles. -func (mr *MockDataSourceMockRecorder) LoadWithFiles(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockDataSourceMockRecorder) LoadWithFiles(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadWithFiles", reflect.TypeOf((*MockDataSource)(nil).LoadWithFiles), arg0, arg1, arg2, arg3) } diff --git a/v2/pkg/engine/resolve/resolve_test.go b/v2/pkg/engine/resolve/resolve_test.go index 98568556ab..5d41bd6e68 100644 --- a/v2/pkg/engine/resolve/resolve_test.go +++ b/v2/pkg/engine/resolve/resolve_test.go @@ -241,7 +241,7 @@ func TestResolver_ResolveNode(t *testing.T) { return func(t *testing.T) { buf := &bytes.Buffer{} - _, err := r.ResolveGraphQLResponse(&ctx, response, nil, buf) + _, err := r.ResolveGraphQLResponse(&ctx, response, buf) assert.NoError(t, err) assert.Equal(t, expectedOutput, buf.String()) ctrl.Finish() @@ -266,7 +266,7 @@ func TestResolver_ResolveNode(t *testing.T) { return func(t *testing.T) { t.Helper() buf := &bytes.Buffer{} - _, err := r.ResolveGraphQLResponse(&ctx, response, nil, buf) + _, err := r.ResolveGraphQLResponse(&ctx, response, buf) assert.NoError(t, err) assert.Equal(t, expectedErr, buf.String()) ctrl.Finish() @@ -1285,7 +1285,7 @@ func testFn(fn func(t *testing.T, ctrl *gomock.Controller) (node *GraphQLRespons } buf := &bytes.Buffer{} - _, err := r.ResolveGraphQLResponse(&ctx, node, nil, buf) + _, err := r.ResolveGraphQLResponse(&ctx, node, buf) assert.NoError(t, err) assert.Equal(t, expectedOutput, buf.String()) ctrl.Finish() @@ -1334,7 +1334,7 @@ func testFnApolloCompatibility(fn func(t *testing.T, ctrl *gomock.Controller) (n } buf := &bytes.Buffer{} - _, err := r.ResolveGraphQLResponse(&ctx, node, nil, buf) + _, err := r.ResolveGraphQLResponse(&ctx, node, buf) assert.NoError(t, err) assert.Equal(t, expectedOutput, buf.String()) ctrl.Finish() @@ -1368,7 +1368,7 @@ func testFnSubgraphErrorsPassthrough(fn func(t *testing.T, ctrl *gomock.Controll } buf := &bytes.Buffer{} - _, err := r.ResolveGraphQLResponse(&ctx, node, nil, buf) + _, err := r.ResolveGraphQLResponse(&ctx, node, buf) assert.NoError(t, err) assert.Equal(t, expectedOutput, buf.String()) ctrl.Finish() @@ -1403,7 +1403,7 @@ func testFnSubgraphErrorsWithExtensionFieldCode(fn func(t *testing.T, ctrl *gomo } buf := &bytes.Buffer{} - _, err := r.ResolveGraphQLResponse(&ctx, node, nil, buf) + _, err := r.ResolveGraphQLResponse(&ctx, node, buf) assert.NoError(t, err) assert.Equal(t, expectedOutput, buf.String()) ctrl.Finish() @@ -1438,7 +1438,7 @@ func testFnSubgraphErrorsWithAllowAllExtensionFields(fn func(t *testing.T, ctrl } buf := &bytes.Buffer{} - _, err := r.ResolveGraphQLResponse(&ctx, node, nil, buf) + _, err := r.ResolveGraphQLResponse(&ctx, node, buf) assert.NoError(t, err) assert.Equal(t, expectedOutput, buf.String()) ctrl.Finish() @@ -1475,7 +1475,7 @@ func testFnSubgraphErrorsWithExtensionFieldServiceName(fn func(t *testing.T, ctr } buf := &bytes.Buffer{} - _, err := r.ResolveGraphQLResponse(&ctx, node, nil, buf) + _, err := r.ResolveGraphQLResponse(&ctx, node, buf) assert.NoError(t, err) assert.Equal(t, expectedOutput, buf.String()) ctrl.Finish() @@ -1511,7 +1511,7 @@ func testFnSubgraphErrorsWithExtensionDefaultCode(fn func(t *testing.T, ctrl *go } buf := &bytes.Buffer{} - _, err := r.ResolveGraphQLResponse(&ctx, node, nil, buf) + _, err := r.ResolveGraphQLResponse(&ctx, node, buf) assert.NoError(t, err) assert.Equal(t, expectedOutput, buf.String()) ctrl.Finish() @@ -1544,7 +1544,7 @@ func testFnNoSubgraphErrorForwarding(fn func(t *testing.T, ctrl *gomock.Controll } buf := &bytes.Buffer{} - _, err := r.ResolveGraphQLResponse(&ctx, node, nil, buf) + _, err := r.ResolveGraphQLResponse(&ctx, node, buf) assert.NoError(t, err) assert.Equal(t, expectedOutput, buf.String()) ctrl.Finish() @@ -1566,7 +1566,7 @@ func testFnWithPostEvaluation(fn func(t *testing.T, ctrl *gomock.Controller) (no } buf := &bytes.Buffer{} - _, err := r.ResolveGraphQLResponse(ctx, node, nil, buf) + _, err := r.ResolveGraphQLResponse(ctx, node, buf) assert.NoError(t, err) assert.Equal(t, expectedOutput, buf.String()) ctrl.Finish() @@ -1589,7 +1589,7 @@ func testFnWithError(fn func(t *testing.T, ctrl *gomock.Controller) (node *Graph } buf := &bytes.Buffer{} - _, err := r.ResolveGraphQLResponse(&ctx, node, nil, buf) + _, err := r.ResolveGraphQLResponse(&ctx, node, buf) assert.Error(t, err, expectedOutput) ctrl.Finish() } @@ -1617,7 +1617,7 @@ func testFnSubgraphErrorsPassthroughAndOmitCustomFields(fn func(t *testing.T, ct } buf := &bytes.Buffer{} - _, err := r.ResolveGraphQLResponse(&ctx, node, nil, buf) + _, err := r.ResolveGraphQLResponse(&ctx, node, buf) assert.NoError(t, err) assert.Equal(t, expectedOutput, buf.String()) ctrl.Finish() @@ -1642,7 +1642,7 @@ func testFnWithPostEvaluationAndOptions(opts ResolverOptions, fn func(t *testing } buf := &bytes.Buffer{} - _, err := r.ResolveGraphQLResponse(ctx, node, nil, buf) + _, err := r.ResolveGraphQLResponse(ctx, node, buf) assert.NoError(t, err) assert.Equal(t, expectedOutput, buf.String()) ctrl.Finish() @@ -5010,8 +5010,7 @@ func TestResolver_ArenaResolveGraphQLResponse(t *testing.T) { } func TestResolver_ArenaResolveGraphQLResponse_RequestDeduplication(t *testing.T) { - rCtx, cancel := context.WithCancel(context.Background()) - defer cancel() + rCtx := t.Context() r := newResolver(rCtx) ds := newBlockingDataSource([]byte(`{"value":"slow"}`)) @@ -5115,8 +5114,7 @@ func TestResolver_ArenaResolveGraphQLResponse_RequestDeduplication(t *testing.T) } func TestResolver_ArenaResolveGraphQLResponse_RequestDeduplication_SharedData(t *testing.T) { - rCtx, cancel := context.WithCancel(context.Background()) - defer cancel() + rCtx := t.Context() r := newResolver(rCtx) ds := newBlockingDataSource([]byte(`{"value":"slow"}`)) @@ -5526,8 +5524,7 @@ func TestResolver_WithHeader(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - rCtx, cancel := context.WithCancel(context.Background()) - defer cancel() + rCtx := t.Context() resolver := newResolver(rCtx) header := make(http.Header) @@ -5579,7 +5576,7 @@ func TestResolver_WithHeader(t *testing.T) { }, }, } - _, err := resolver.ResolveGraphQLResponse(ctx, res, nil, out) + _, err := resolver.ResolveGraphQLResponse(ctx, res, out) assert.NoError(t, err) assert.Equal(t, `{"data":{"bar":"baz"}}`, out.String()) }) @@ -5600,8 +5597,7 @@ func TestResolver_WithVariableRemapping(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - rCtx, cancel := context.WithCancel(context.Background()) - defer cancel() + rCtx := t.Context() resolver := newResolver(rCtx) ctx := &Context{ @@ -5651,7 +5647,7 @@ func TestResolver_WithVariableRemapping(t *testing.T) { }, }, } - _, err := resolver.ResolveGraphQLResponse(ctx, res, nil, out) + _, err := resolver.ResolveGraphQLResponse(ctx, res, out) assert.NoError(t, err) assert.Equal(t, `{"data":{"bar":"baz"}}`, out.String()) }) @@ -6036,8 +6032,7 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { } t.Run("should return errors if the upstream data has errors", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() fakeStream := createFakeStream(func(counter int) (message string, done bool) { return `{"errors":[{"message":"Validation error occurred","locations":[{"line":1,"column":1}],"extensions":{"code":"GRAPHQL_VALIDATION_FAILED"}}],"data":null}`, true @@ -6060,8 +6055,7 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { }) t.Run("should return an error if the data source has not been defined", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() resolver, plan, recorder, id := setup(c, nil) @@ -6074,8 +6068,7 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { }) t.Run("should successfully get result from upstream", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() fakeStream := createFakeStream(func(counter int) (message string, done bool) { return fmt.Sprintf(`{"data":{"counter":%d}}`, counter), counter == 2 @@ -6098,17 +6091,19 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { recorder.AwaitComplete(t, defaultTimeout) messages := recorder.Messages() - assert.Greater(t, len(messages), 2) time.Sleep(resolver.heartbeatInterval) - // Validate that despite the time, we don't see any heartbeats sent - assert.Contains(t, messages, `{"data":{"counter":0}}`) - assert.Contains(t, messages, `{"data":{"counter":1}}`) - assert.Contains(t, messages, `{"data":{"counter":2}}`) + // Validate that despite the time, we don't see any heartbeats sent — + // the stream should contain exactly the three counter messages produced + // by the fake stream, with no additional heartbeat payloads interleaved. + assert.Equal(t, []string{ + `{"data":{"counter":0}}`, + `{"data":{"counter":1}}`, + `{"data":{"counter":2}}`, + }, messages) }) t.Run("should successfully delete multiple finished subscriptions", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() fakeStream := createFakeStream(func(counter int) (message string, done bool) { return fmt.Sprintf(`{"data":{"counter":%d}}`, counter), counter == 1 @@ -6165,8 +6160,7 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { }) t.Run("should propagate extensions to stream", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() fakeStream := createFakeStream(func(counter int) (message string, done bool) { return fmt.Sprintf(`{"data":{"counter":%d}}`, counter), counter == 2 @@ -6186,15 +6180,15 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { recorder.AwaitComplete(t, defaultTimeout) messages := recorder.Messages() - assert.Len(t, messages, 3) - assert.Contains(t, messages, `{"data":{"counter":0}}`) - assert.Contains(t, messages, `{"data":{"counter":1}}`) - assert.Contains(t, messages, `{"data":{"counter":2}}`) + assert.Equal(t, []string{ + `{"data":{"counter":0}}`, + `{"data":{"counter":1}}`, + `{"data":{"counter":2}}`, + }, messages) }) t.Run("should propagate initial payload to stream", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() fakeStream := createFakeStream(func(counter int) (message string, done bool) { return fmt.Sprintf(`{"data":{"counter":%d}}`, counter), counter == 2 @@ -6214,15 +6208,15 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { recorder.AwaitComplete(t, defaultTimeout) messages := recorder.Messages() - assert.Len(t, messages, 3) - assert.Contains(t, messages, `{"data":{"counter":0}}`) - assert.Contains(t, messages, `{"data":{"counter":1}}`) - assert.Contains(t, messages, `{"data":{"counter":2}}`) + assert.Equal(t, []string{ + `{"data":{"counter":0}}`, + `{"data":{"counter":1}}`, + `{"data":{"counter":2}}`, + }, messages) }) t.Run("should stop stream on unsubscribe subscription", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() fakeStream := createFakeStream(func(counter int) (message string, done bool) { return fmt.Sprintf(`{"data":{"counter":%d}}`, counter), false @@ -6246,8 +6240,7 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { }) t.Run("should stop stream on unsubscribe client", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() fakeStream := createFakeStream(func(counter int) (message string, done bool) { return fmt.Sprintf(`{"data":{"counter":%d}}`, counter), false @@ -6271,8 +6264,7 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { }) t.Run("renders query plan with trigger", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() fakeStream := createFakeStream(func(counter int) (message string, done bool) { return fmt.Sprintf(`{"data":{"counter":%d}}`, counter), counter == 0 @@ -6300,8 +6292,7 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { }) t.Run("renders query plan with trigger and additional data", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() fakeStream := createFakeStream(func(counter int) (message string, done bool) { return fmt.Sprintf(`{"data":{"counter":%d}}`, counter), counter == 0 @@ -6350,7 +6341,7 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { const numSubscriptions = 2 var resolverCompleted atomic.Uint32 var recorderCompleted atomic.Uint32 - for i := 0; i < numSubscriptions; i++ { + for range numSubscriptions { recorder := &SubscriptionRecorder{ buf: &bytes.Buffer{}, messages: []string{}, @@ -6383,8 +6374,7 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { }) t.Run("should wait for all in flight operations to be completed", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() fakeStream := createFakeStream(func(counter int) (message string, done bool) { return fmt.Sprintf(`{"data":{"counter":%d}}`, counter), true @@ -6415,8 +6405,7 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { }) t.Run("should call SubscriptionOnStart hook", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() called := make(chan bool, 1) @@ -6449,8 +6438,7 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { }) t.Run("SubscriptionOnStart ctx has a working subscription updater", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() fakeStream := createFakeStream(func(counter int) (message string, done bool) { return fmt.Sprintf(`{"data":{"counter":%d}}`, counter), counter == 0 @@ -6480,8 +6468,7 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { }) t.Run("SubscriptionOnStart ctx updater only updates the right subscription", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() executed := atomic.Bool{} @@ -6587,8 +6574,7 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { }) t.Run("SubscriptionOnStart ctx updater on multiple subscriptions with same trigger works", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() id2 := SubscriptionIdentifier{ ConnectionID: 1, @@ -6663,8 +6649,7 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { }) t.Run("SubscriptionOnStart can send a lot of updates without blocking", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() workChanBufferSize := 10000 fakeStream := createFakeStream(func(counter int) (message string, done bool) { @@ -6672,8 +6657,8 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { }, 1*time.Millisecond, func(input []byte) { assert.Equal(t, `{"method":"POST","url":"http://localhost:4000","body":{"query":"subscription { counter }"}}`, string(input)) }, func(ctx StartupHookContext, input []byte) (err error) { - for i := 0; i < workChanBufferSize+1; i++ { - ctx.Updater([]byte(fmt.Sprintf(`{"data":{"counter":%d}}`, i+100))) + for i := range workChanBufferSize + 1 { + ctx.Updater(fmt.Appendf(nil, `{"data":{"counter":%d}}`, i+100)) } return nil }) @@ -6692,15 +6677,14 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { recorder.AwaitComplete(t, defaultTimeout) assert.Equal(t, workChanBufferSize+2, len(recorder.Messages())) - for i := 0; i < workChanBufferSize; i++ { + for i := range workChanBufferSize { assert.Equal(t, fmt.Sprintf(`{"data":{"counter":%d}}`, i+100), recorder.Messages()[i]) } assert.Equal(t, `{"data":{"counter":0}}`, recorder.Messages()[workChanBufferSize+1]) }) t.Run("SubscriptionOnStart can send a lot of updates in a go routine while updates are coming from other sources", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() messagesToSendFromHook := int32(100) messagesToSendFromOtherSources := int32(100) @@ -6724,7 +6708,7 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { assert.Equal(t, `{"method":"POST","url":"http://localhost:4000","body":{"query":"subscription { counter }"}}`, string(input)) }, func(ctx StartupHookContext, input []byte) (err error) { // send the first update immediately - ctx.Updater([]byte(fmt.Sprintf(`{"data":{"counter":%d}}`, 0+20000))) + ctx.Updater(fmt.Appendf(nil, `{"data":{"counter":%d}}`, 0+20000)) // start a go routine to send the updates after the source started emitting messages go func() { @@ -6733,7 +6717,7 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { select { case <-firstMessageArrived: for i := 1; i < int(messagesToSendFromHook); i++ { - ctx.Updater([]byte(fmt.Sprintf(`{"data":{"counter":%d}}`, i+20000))) + ctx.Updater(fmt.Appendf(nil, `{"data":{"counter":%d}}`, i+20000)) } case <-time.After(defaultTimeout): // if the first message did not arrive, do not send any updates @@ -6781,8 +6765,7 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { }) t.Run("it is possible to have two subscriptions to the same trigger", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() // sub2Ready gates the data source goroutine so that it doesn't start // emitting before sub2 has been registered on the trigger. Without this, @@ -6835,8 +6818,7 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { }) t.Run("should propagate errors from SubscriptionOnStart hook", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() expectedErr := errors.New("startup hook failed") fakeStream := createFakeStream(func(counter int) (message string, done bool) { @@ -6858,11 +6840,11 @@ func TestResolver_ResolveGraphQLSubscription(t *testing.T) { recorder.AwaitAnyMessageCount(t, defaultTimeout) messages := recorder.Messages() - require.Greater(t, len(messages), 0, "Expected error message to be written to recorder") + require.Equal(t, 1, len(messages), "startup hook failure should emit exactly one GraphQL error message") errorMessage := messages[0] - assert.Contains(t, errorMessage, "errors", "Expected error message in GraphQL format") - assert.Contains(t, errorMessage, expectedErr.Error(), "Expected actual error message to be included") + assert.Equal(t, `{"errors":[{"message":"startup hook failed"}],"data":null}`, errorMessage, + "startup hook error must be rendered as a GraphQL error payload carrying the original error message") }) } @@ -6923,8 +6905,7 @@ func Test_ResolveGraphQLSubscriptionWithFilter(t *testing.T) { */ t.Run("matching entity should be included", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() count := 0 @@ -7019,8 +7000,7 @@ func Test_ResolveGraphQLSubscriptionWithFilter(t *testing.T) { }) t.Run("non-matching entity should remain", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() count := 0 @@ -7113,8 +7093,7 @@ func Test_ResolveGraphQLSubscriptionWithFilter(t *testing.T) { }) t.Run("matching array values should be included", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() count := 0 @@ -7208,8 +7187,7 @@ func Test_ResolveGraphQLSubscriptionWithFilter(t *testing.T) { }) t.Run("matching array values with prefix should be included", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() count := 0 @@ -7307,8 +7285,7 @@ func Test_ResolveGraphQLSubscriptionWithFilter(t *testing.T) { }) t.Run("should err when subscription filter has multiple templates", func(t *testing.T) { - c, cancel := context.WithCancel(context.Background()) - defer cancel() + c := t.Context() count := 0 @@ -7419,8 +7396,7 @@ func Test_ResolveGraphQLSubscriptionWithFilter(t *testing.T) { } func Benchmark_NestedBatching(b *testing.B) { - rCtx, cancel := context.WithCancel(context.Background()) - defer cancel() + rCtx := b.Context() resolver := newResolver(rCtx) @@ -7696,13 +7672,13 @@ func Benchmark_NestedBatching(b *testing.B) { expected := []byte(`{"data":{"topProducts":[{"name":"Table","stock":8,"reviews":[{"body":"Love Table!","author":{"name":"user-1"}},{"body":"Prefer other Table.","author":{"name":"user-2"}}]},{"name":"Couch","stock":2,"reviews":[{"body":"Couch Too expensive.","author":{"name":"user-1"}}]},{"name":"Chair","stock":5,"reviews":[{"body":"Chair Could be better.","author":{"name":"user-2"}}]}]}}`) pool := sync.Pool{ - New: func() interface{} { + New: func() any { return bytes.NewBuffer(make([]byte, 0, 1024)) }, } ctxPool := sync.Pool{ - New: func() interface{} { + New: func() any { return NewContext(context.Background()) }, } @@ -7716,7 +7692,7 @@ func Benchmark_NestedBatching(b *testing.B) { ctx := ctxPool.Get().(*Context) buf := pool.Get().(*bytes.Buffer) ctx.ctx = context.Background() - _, err := resolver.ResolveGraphQLResponse(ctx, plan, nil, buf) + _, err := resolver.ResolveGraphQLResponse(ctx, plan, buf) if err != nil { b.Fatal(err) } @@ -7734,8 +7710,7 @@ func Benchmark_NestedBatching(b *testing.B) { } func Benchmark_NestedBatchingArena(b *testing.B) { - rCtx, cancel := context.WithCancel(context.Background()) - defer cancel() + rCtx := b.Context() resolver := newResolver(rCtx) @@ -8011,13 +7986,13 @@ func Benchmark_NestedBatchingArena(b *testing.B) { expected := []byte(`{"data":{"topProducts":[{"name":"Table","stock":8,"reviews":[{"body":"Love Table!","author":{"name":"user-1"}},{"body":"Prefer other Table.","author":{"name":"user-2"}}]},{"name":"Couch","stock":2,"reviews":[{"body":"Couch Too expensive.","author":{"name":"user-1"}}]},{"name":"Chair","stock":5,"reviews":[{"body":"Chair Could be better.","author":{"name":"user-2"}}]}]}}`) pool := sync.Pool{ - New: func() interface{} { + New: func() any { return bytes.NewBuffer(make([]byte, 0, 1024)) }, } ctxPool := sync.Pool{ - New: func() interface{} { + New: func() any { return NewContext(context.Background()) }, } @@ -8049,8 +8024,7 @@ func Benchmark_NestedBatchingArena(b *testing.B) { } func Benchmark_NoCheckNestedBatching(b *testing.B) { - rCtx, cancel := context.WithCancel(context.Background()) - defer cancel() + rCtx := b.Context() resolver := newResolver(rCtx) @@ -8321,13 +8295,13 @@ func Benchmark_NoCheckNestedBatching(b *testing.B) { expected := []byte(`{"data":{"topProducts":[{"name":"Table","stock":8,"reviews":[{"body":"Love Table!","author":{"name":"user-1"}},{"body":"Prefer other Table.","author":{"name":"user-2"}}]},{"name":"Couch","stock":2,"reviews":[{"body":"Couch Too expensive.","author":{"name":"user-1"}}]},{"name":"Chair","stock":5,"reviews":[{"body":"Chair Could be better.","author":{"name":"user-2"}}]}]}}`) pool := sync.Pool{ - New: func() interface{} { + New: func() any { return bytes.NewBuffer(make([]byte, 0, 1024)) }, } ctxPool := sync.Pool{ - New: func() interface{} { + New: func() any { return NewContext(context.Background()) }, } @@ -8341,7 +8315,7 @@ func Benchmark_NoCheckNestedBatching(b *testing.B) { ctx := ctxPool.Get().(*Context) buf := pool.Get().(*bytes.Buffer) ctx.ctx = context.Background() - _, err := resolver.ResolveGraphQLResponse(ctx, plan, nil, buf) + _, err := resolver.ResolveGraphQLResponse(ctx, plan, buf) if err != nil { b.Fatal(err) } diff --git a/v2/pkg/engine/resolve/response.go b/v2/pkg/engine/resolve/response.go index d8af8d017b..31e3f15548 100644 --- a/v2/pkg/engine/resolve/response.go +++ b/v2/pkg/engine/resolve/response.go @@ -2,17 +2,50 @@ package resolve import ( "io" + "time" "github.com/gobwas/ws" "github.com/wundergraph/graphql-go-tools/v2/pkg/ast" - "github.com/wundergraph/graphql-go-tools/v2/pkg/lexer/literal" ) +// SubscriptionCacheMode determines how subscription events manage L2 cache entries. +type SubscriptionCacheMode int + +const ( + // SubscriptionCacheModePopulate writes entity data to L2 on each subscription event. + SubscriptionCacheModePopulate SubscriptionCacheMode = iota + // SubscriptionCacheModeInvalidate deletes the L2 cache entry on each subscription event. + SubscriptionCacheModeInvalidate +) + +// SubscriptionEntityCachePopulation configures how the resolver manages L2 cache +// entries for root entities received via subscription events. +type SubscriptionEntityCachePopulation struct { + // Mode determines whether to populate or invalidate L2 cache entries. + Mode SubscriptionCacheMode + // CacheKeyTemplate generates cache keys from entity @key fields. + CacheKeyTemplate *EntityQueryCacheKeyTemplate + // CacheName identifies which LoaderCache instance to use. + CacheName string + // TTL is the time-to-live for populated cache entries (only used in Populate mode). + TTL time.Duration + // IncludeSubgraphHeaderPrefix controls whether forwarded headers affect cache keys. + IncludeSubgraphHeaderPrefix bool + // DataSourceName is the subgraph name for SubgraphHeadersBuilder lookup. + DataSourceName string + // SubscriptionFieldName is the name of the subscription root field (e.g., "updateProductPrice"). + // Used to navigate from the subscription data root to the entity data. + SubscriptionFieldName string + // EntityTypeName is the entity type name (e.g., "Product") used to set __typename in cache keys. + EntityTypeName string +} + type GraphQLSubscription struct { - Trigger GraphQLSubscriptionTrigger - Response *GraphQLResponse - Filter *SubscriptionFilter + Trigger GraphQLSubscriptionTrigger + Response *GraphQLResponse + Filter *SubscriptionFilter + EntityCachePopulation *SubscriptionEntityCachePopulation } type GraphQLSubscriptionTrigger struct { @@ -87,46 +120,6 @@ type SubscriptionResponseWriter interface { Close(kind SubscriptionCloseKind) } -func writeGraphqlResponse(buf *BufPair, writer io.Writer, ignoreData bool) (err error) { - hasErrors := buf.Errors.Len() != 0 - hasData := buf.Data.Len() != 0 && !ignoreData - - err = writeSafe(err, writer, lBrace) - - if hasErrors { - err = writeSafe(err, writer, quote) - err = writeSafe(err, writer, literalErrors) - err = writeSafe(err, writer, quote) - err = writeSafe(err, writer, colon) - err = writeSafe(err, writer, lBrack) - err = writeSafe(err, writer, buf.Errors.Bytes()) - err = writeSafe(err, writer, rBrack) - err = writeSafe(err, writer, comma) - } - - err = writeSafe(err, writer, quote) - err = writeSafe(err, writer, literalData) - err = writeSafe(err, writer, quote) - err = writeSafe(err, writer, colon) - - if hasData { - _, err = writer.Write(buf.Data.Bytes()) - } else { - err = writeSafe(err, writer, literal.NULL) - } - err = writeSafe(err, writer, rBrace) - - return err -} - -func writeSafe(err error, writer io.Writer, data []byte) error { - if err != nil { - return err - } - _, err = writer.Write(data) - return err -} - func writeFlushComplete(writer SubscriptionResponseWriter, msg []byte) error { _, err := writer.Write(msg) if err != nil { diff --git a/v2/pkg/engine/resolve/structural_copy_bench_test.go b/v2/pkg/engine/resolve/structural_copy_bench_test.go new file mode 100644 index 0000000000..ea49512e17 --- /dev/null +++ b/v2/pkg/engine/resolve/structural_copy_bench_test.go @@ -0,0 +1,260 @@ +package resolve + +// Benchmarks for the L1/L2 cache copy primitives. +// +// These target the four StructuralCopy helpers in loader_cache_transform.go +// plus the L2 wire-format MarshalTo path, both with and without an alias +// Transform, to isolate the overhead of alias/arg-suffix normalization +// from the plain structural copy. +// +// Mapping to production call sites (loader_cache.go): +// L1Write -> structuralCopyNormalizedPassthrough (populateL1Cache) +// L1Read -> structuralCopyDenormalizedPassthrough (tryL1CacheLoad) +// L2Read -> ParseBytesWithArena + structuralCopyDenormalized (applyEntityFetchL2Results) +// L2Write -> MarshalTo (cacheKeysToEntriesBatch) — no transform in prod, since the +// L1-stored value is already schema-shape. The "WithTransform" variant models +// the hypothetical "normalize-and-serialize" cost of writing an aliased +// response value directly to L2. + +import ( + "testing" + + "github.com/wundergraph/astjson" + "github.com/wundergraph/go-arena" +) + +// Representative entity payload: 10 fields, 4 aliased, mix of scalars + small nested array. +// Response shape (with aliases) — what the subgraph returned verbatim. +const benchEntityResponseShape = `{` + + `"__typename":"Product",` + + `"id":"p-00000001",` + + `"n":"Wireless Headphones Pro X",` + + `"p":249.99,` + + `"in_stock":true,` + + `"category":"electronics",` + + `"desc":"Premium noise-cancelling wireless headphones with 40h battery life.",` + + `"created_at":"2024-01-15T10:30:00Z",` + + `"updated_at":"2024-03-22T14:05:12Z",` + + `"tag_list":["audio","wireless","premium","bestseller"]` + + `}` + +// Schema shape — what's stored in L1/L2 after normalization. +const benchEntitySchemaShape = `{` + + `"__typename":"Product",` + + `"id":"p-00000001",` + + `"name":"Wireless Headphones Pro X",` + + `"price":249.99,` + + `"in_stock":true,` + + `"category":"electronics",` + + `"description":"Premium noise-cancelling wireless headphones with 40h battery life.",` + + `"created_at":"2024-01-15T10:30:00Z",` + + `"updated_at":"2024-03-22T14:05:12Z",` + + `"tags":["audio","wireless","premium","bestseller"]` + + `}` + +// benchAliasedObject describes the entity for the Transform builder. +// 4 of the 10 fields are aliased: n/p/desc/tag_list. +func benchAliasedObject() *Object { + return &Object{ + HasAliases: true, + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{}}, + {Name: []byte("id"), Value: &String{}}, + {Name: []byte("n"), OriginalName: []byte("name"), Value: &String{}}, + {Name: []byte("p"), OriginalName: []byte("price"), Value: &Float{}}, + {Name: []byte("in_stock"), Value: &Boolean{}}, + {Name: []byte("category"), Value: &String{}}, + {Name: []byte("desc"), OriginalName: []byte("description"), Value: &String{}}, + {Name: []byte("created_at"), Value: &String{}}, + {Name: []byte("updated_at"), Value: &String{}}, + {Name: []byte("tag_list"), OriginalName: []byte("tags"), Value: &Array{Item: &String{}}}, + }, + } +} + +// benchNoAliasObject — same fields with no aliases. HasAliases=false routes +// all helpers to plain StructuralCopy (no Transform built). +func benchNoAliasObject() *Object { + return &Object{ + HasAliases: false, + Fields: []*Field{ + {Name: []byte("__typename"), Value: &String{}}, + {Name: []byte("id"), Value: &String{}}, + {Name: []byte("name"), Value: &String{}}, + {Name: []byte("price"), Value: &Float{}}, + {Name: []byte("in_stock"), Value: &Boolean{}}, + {Name: []byte("category"), Value: &String{}}, + {Name: []byte("description"), Value: &String{}}, + {Name: []byte("created_at"), Value: &String{}}, + {Name: []byte("updated_at"), Value: &String{}}, + {Name: []byte("tags"), Value: &Array{Item: &String{}}}, + }, + } +} + +// newBenchLoader builds a Loader with a fresh target arena. The parser's +// scratch slabs and transform slabs amortize across iterations, mirroring prod. +func newBenchLoader() (*Loader, arena.Arena) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + return &Loader{jsonArena: ar}, ar +} + +// parseOnto parses src onto a's arena using a fresh parser (one-shot). +func parseOnto(a arena.Arena, src []byte) *astjson.Value { + v, err := astjson.ParseBytesWithArena(a, src) + if err != nil { + panic(err) + } + return v +} + +// ---------- L1 Write ---------- + +// BenchmarkStructuralCopy_L1Write_NoTransform: +// populateL1Cache path when the response has no aliases — +// structuralCopyNormalizedPassthrough degenerates to plain StructuralCopy. +func BenchmarkStructuralCopy_L1Write_NoTransform(b *testing.B) { + sourceAr := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + src := parseOnto(sourceAr, []byte(benchEntitySchemaShape)) + obj := benchNoAliasObject() + + l, ar := newBenchLoader() + b.ReportAllocs() + b.ResetTimer() + for b.Loop() { + _ = l.structuralCopyNormalizedPassthrough(src, obj) + ar.Reset() + } +} + +// BenchmarkStructuralCopy_L1Write_WithTransform: +// populateL1Cache path with alias normalization — the hot path for any +// query that aliases entity fields. +func BenchmarkStructuralCopy_L1Write_WithTransform(b *testing.B) { + sourceAr := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + src := parseOnto(sourceAr, []byte(benchEntityResponseShape)) + obj := benchAliasedObject() + + l, ar := newBenchLoader() + b.ReportAllocs() + b.ResetTimer() + for b.Loop() { + _ = l.structuralCopyNormalizedPassthrough(src, obj) + ar.Reset() + } +} + +// ---------- L1 Read ---------- + +// BenchmarkStructuralCopy_L1Read_NoTransform: +// tryL1CacheLoad path with no aliases — plain StructuralCopy. +func BenchmarkStructuralCopy_L1Read_NoTransform(b *testing.B) { + sourceAr := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + src := parseOnto(sourceAr, []byte(benchEntitySchemaShape)) + obj := benchNoAliasObject() + + l, ar := newBenchLoader() + b.ReportAllocs() + b.ResetTimer() + for b.Loop() { + _ = l.structuralCopyDenormalizedPassthrough(src, obj) + ar.Reset() + } +} + +// BenchmarkStructuralCopy_L1Read_WithTransform: +// tryL1CacheLoad path with alias denormalization — re-applies the request's +// aliases to the schema-shape stored value. +func BenchmarkStructuralCopy_L1Read_WithTransform(b *testing.B) { + sourceAr := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + src := parseOnto(sourceAr, []byte(benchEntitySchemaShape)) + obj := benchAliasedObject() + + l, ar := newBenchLoader() + b.ReportAllocs() + b.ResetTimer() + for b.Loop() { + _ = l.structuralCopyDenormalizedPassthrough(src, obj) + ar.Reset() + } +} + +// ---------- L2 Read (parse + denormalize) ---------- + +// BenchmarkStructuralCopy_L2Read_NoTransform: +// applyEntityFetchL2Results path with no aliases — parse the wire bytes onto +// l.jsonArena then plain StructuralCopy to produce an isolated materialized value. +func BenchmarkStructuralCopy_L2Read_NoTransform(b *testing.B) { + wire := []byte(benchEntitySchemaShape) + obj := benchNoAliasObject() + + l, ar := newBenchLoader() + b.ReportAllocs() + b.ResetTimer() + for b.Loop() { + parsed, err := l.parser.ParseBytesWithArena(l.jsonArena, wire) + if err != nil { + b.Fatal(err) + } + _ = l.structuralCopyDenormalized(parsed, obj) + ar.Reset() + } +} + +// BenchmarkStructuralCopy_L2Read_WithTransform: +// applyEntityFetchL2Results path with alias denormalization — parse + Transform. +func BenchmarkStructuralCopy_L2Read_WithTransform(b *testing.B) { + wire := []byte(benchEntitySchemaShape) + obj := benchAliasedObject() + + l, ar := newBenchLoader() + b.ReportAllocs() + b.ResetTimer() + for b.Loop() { + parsed, err := l.parser.ParseBytesWithArena(l.jsonArena, wire) + if err != nil { + b.Fatal(err) + } + _ = l.structuralCopyDenormalized(parsed, obj) + ar.Reset() + } +} + +// ---------- L2 Write (serialize) ---------- + +// BenchmarkStructuralCopy_L2Write_NoTransform: +// cacheKeysToEntriesBatch path — MarshalTo on the already-normalized L1 entry. +// This is the ONLY path prod currently takes: the transform cost was paid on L1 write. +func BenchmarkStructuralCopy_L2Write_NoTransform(b *testing.B) { + ar := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + v := parseOnto(ar, []byte(benchEntitySchemaShape)) + + var buf []byte + b.ReportAllocs() + b.ResetTimer() + for b.Loop() { + buf = v.MarshalTo(buf[:0]) + } + _ = buf +} + +// BenchmarkStructuralCopy_L2Write_WithTransform: +// Hypothetical "normalize + serialize" cost — models writing a still-aliased +// response value to L2 without an intermediate L1 entry. Not a live prod path, +// but measures the combined Transform + MarshalTo cost for comparison. +func BenchmarkStructuralCopy_L2Write_WithTransform(b *testing.B) { + sourceAr := arena.NewMonotonicArena(arena.WithMinBufferSize(4096)) + src := parseOnto(sourceAr, []byte(benchEntityResponseShape)) + obj := benchAliasedObject() + + l, ar := newBenchLoader() + var buf []byte + b.ReportAllocs() + b.ResetTimer() + for b.Loop() { + normalized := l.structuralCopyNormalized(src, obj) + buf = normalized.MarshalTo(buf[:0]) + ar.Reset() + } + _ = buf +} diff --git a/v2/pkg/engine/resolve/subgraph_request_singleflight_test.go b/v2/pkg/engine/resolve/subgraph_request_singleflight_test.go index aaf07f5af7..ad06e3aff9 100644 --- a/v2/pkg/engine/resolve/subgraph_request_singleflight_test.go +++ b/v2/pkg/engine/resolve/subgraph_request_singleflight_test.go @@ -175,8 +175,8 @@ func TestSubgraphRequestSingleFlight_SizeHintRollingWindow(t *testing.T) { fetchItem := newFetchItem(fetchInfo) var fetchKey uint64 - for i := 0; i < 50; i++ { - item, shared := flight.GetOrCreateItem(fetchItem, []byte(fmt.Sprintf("body-%d", i)), 0) + for i := range 50 { + item, shared := flight.GetOrCreateItem(fetchItem, fmt.Appendf(nil, "body-%d", i), 0) if shared { t.Fatalf("expected leader for iteration %d", i) } diff --git a/v2/pkg/engine/resolve/tainted_objects_test.go b/v2/pkg/engine/resolve/tainted_objects_test.go index b8205dc724..a3948a7713 100644 --- a/v2/pkg/engine/resolve/tainted_objects_test.go +++ b/v2/pkg/engine/resolve/tainted_objects_test.go @@ -13,56 +13,56 @@ func TestSelectObjectAndIndex(t *testing.T) { tests := []struct { name string responseJSON string - pathElements []interface{} // Can be strings or numbers - expectedEntity string // JSON string of expected entity, or "nil" for nil + pathElements []any // Can be strings or numbers + expectedEntity string // JSON string of expected entity, or "nil" for nil expectedIndex int }{ { name: "complex federation-like structure", responseJSON: `[{"__typename": "User", "id": "1", "name": "John"}, {"__typename": "User", "id": "2", "name": null}]`, - pathElements: []interface{}{1}, + pathElements: []any{1}, expectedEntity: `{"__typename": "User", "id": "2", "name": null}`, expectedIndex: 1, }, { name: "mixed path with number then string", responseJSON: `[{"user": {"name": "John"}}, {"user": {"name": "Jane"}}]`, - pathElements: []interface{}{1, "user"}, + pathElements: []any{1, "user"}, expectedEntity: `{"name": "Jane"}`, expectedIndex: 1, }, { name: "multiple numbers in path", responseJSON: `[[{"name": "A"}, {"name": "B"}], [{"name": "C"}, {"name": "D"}]]`, - pathElements: []interface{}{1, 0}, + pathElements: []any{1, 0}, expectedEntity: `{"name": "C"}`, expectedIndex: 1, }, { name: "path leads to non-existent key", responseJSON: `[{"user": {"name": "John"}}]`, - pathElements: []interface{}{0, "user", "nonexistent"}, + pathElements: []any{0, "user", "nonexistent"}, expectedEntity: "nil", expectedIndex: -1, }, { name: "negative index is an error", responseJSON: `[{"name": "A"}, {"name": "negative"}]`, - pathElements: []interface{}{-2}, + pathElements: []any{-2}, expectedEntity: "nil", expectedIndex: -1, }, { name: "out of bound index is an error", responseJSON: `[{"name": "A"}, {"name": "negative"}]`, - pathElements: []interface{}{9}, + pathElements: []any{9}, expectedEntity: "nil", expectedIndex: -1, }, { name: "empty path is an error", responseJSON: `[{"name": "A"}, {"name": "negative"}]`, - pathElements: []interface{}{}, + pathElements: []any{}, expectedEntity: "nil", expectedIndex: -1, }, @@ -97,10 +97,15 @@ func TestSelectObjectAndIndex(t *testing.T) { expectedEntity, err := astjson.ParseBytes([]byte(tt.expectedEntity)) assert.NoError(t, err, "Failed to parse expected entity JSON") - // Compare JSON representations + // Compare the full entity shape with canonical JSON so object key order + // differences do not hide value regressions. actualJSON := entity.MarshalTo(nil) expectedJSON := expectedEntity.MarshalTo(nil) - assert.JSONEq(t, string(expectedJSON), string(actualJSON), "Entity content mismatch") + assert.Equal(t, + compactJSONForAssert(t, string(expectedJSON)), + compactJSONForAssert(t, string(actualJSON)), + "Entity content mismatch", + ) } }) } diff --git a/v2/pkg/engine/resolve/trace.go b/v2/pkg/engine/resolve/trace.go index ea04e73ec4..f2dc146b3b 100644 --- a/v2/pkg/engine/resolve/trace.go +++ b/v2/pkg/engine/resolve/trace.go @@ -25,6 +25,8 @@ type TraceOptions struct { ExcludeOutput bool // ExcludeLoadStats excludes the load timing information from the trace output ExcludeLoadStats bool + // ExcludeCacheStats excludes cache information from the trace output + ExcludeCacheStats bool // EnablePredictableDebugTimings makes the timings in the trace output predictable for debugging purposes EnablePredictableDebugTimings bool // IncludeTraceOutputInResponseExtensions includes the trace output in the response extensions @@ -43,6 +45,7 @@ func (r *TraceOptions) EnableAll() { r.ExcludeInput = false r.ExcludeOutput = false r.ExcludeLoadStats = false + r.ExcludeCacheStats = false r.EnablePredictableDebugTimings = false r.IncludeTraceOutputInResponseExtensions = true } @@ -57,6 +60,7 @@ func (r *TraceOptions) DisableAll() { r.ExcludeInput = true r.ExcludeOutput = true r.ExcludeLoadStats = true + r.ExcludeCacheStats = true r.EnablePredictableDebugTimings = false r.IncludeTraceOutputInResponseExtensions = false } @@ -81,6 +85,73 @@ type TraceData struct { Request *RequestData `json:"request,omitempty"` } +// CacheTrace captures per-fetch caching behavior for trace output. +// Built AFTER mergeResult + populateCachesAfterFetch, when final cache state is known. +type CacheTrace struct { + // Overall cache timing (aligned with DataSourceLoadTrace) + DurationSinceStartNano int64 `json:"duration_since_start_nanoseconds,omitempty"` + DurationSinceStartPretty string `json:"duration_since_start_pretty,omitempty"` + DurationNano int64 `json:"duration_nanoseconds,omitempty"` + DurationPretty string `json:"duration_pretty,omitempty"` + + // Runtime state (global switches AND per-fetch config combined) + L1Enabled bool `json:"l1_enabled"` + L2Enabled bool `json:"l2_enabled"` + CacheName string `json:"cache_name,omitempty"` + TTLSeconds int64 `json:"ttl_seconds,omitempty"` + + // Entity count — total number of entities involved in this fetch + EntityCount int `json:"entity_count"` + + // L1 cache results + L1Hit int `json:"l1_hit"` + L1Miss int `json:"l1_miss"` + + // L2 cache results + L2Hit int `json:"l2_hit"` + L2Miss int `json:"l2_miss"` + + // Negative caching + NegativeCacheHits int `json:"negative_cache_hits,omitempty"` + + // L2 operation timing (Get) + L2GetDurationNano int64 `json:"l2_get_duration_nanoseconds,omitempty"` + L2GetDurationPretty string `json:"l2_get_duration_pretty,omitempty"` + + // L2 operation timing (Set — regular entries) + L2SetDurationNano int64 `json:"l2_set_duration_nanoseconds,omitempty"` + L2SetDurationPretty string `json:"l2_set_duration_pretty,omitempty"` + + // L2 operation timing (Set — negative entries, separate TTL) + L2SetNegativeDurationNano int64 `json:"l2_set_negative_duration_nanoseconds,omitempty"` + L2SetNegativeDurationPretty string `json:"l2_set_negative_duration_pretty,omitempty"` + + // Configuration flags that affected behavior + PartialCacheLoad bool `json:"partial_cache_load,omitempty"` + ShadowMode bool `json:"shadow_mode,omitempty"` + ShadowHit bool `json:"shadow_hit,omitempty"` // L2 had data but shadow mode forced fetch + IncludeSubgraphHeaderPrefix bool `json:"include_subgraph_header_prefix,omitempty"` + + // Entity-level detail (only for entity/batch fetches with multiple items) + Entities []CacheTraceEntity `json:"entities,omitempty"` + + // Cache keys (when not excluded) + Keys []string `json:"keys,omitempty"` + + // Errors + L2GetError string `json:"l2_get_error,omitempty"` + L2SetError string `json:"l2_set_error,omitempty"` + L2SetNegativeError string `json:"l2_set_negative_error,omitempty"` +} + +// CacheTraceEntity records cache outcome for a single entity in batch fetches. +type CacheTraceEntity struct { + Key string `json:"key"` // Cache key (or hash) + Source string `json:"source"` // "l1", "l2", "subgraph", "negative_cache" + ByteSize int `json:"byte_size,omitempty"` // Size of cached/fetched data + RemainingTTLSeconds float64 `json:"remaining_ttl_seconds,omitempty"` // Remaining TTL in seconds (L2 hits only, 0 = unknown) +} + func GetTrace(ctx context.Context, fetchTree *FetchTreeNode) TraceData { trace := TraceData{ Version: "1", diff --git a/v2/pkg/engine/resolve/trigger_cache_test.go b/v2/pkg/engine/resolve/trigger_cache_test.go new file mode 100644 index 0000000000..d069c74422 --- /dev/null +++ b/v2/pkg/engine/resolve/trigger_cache_test.go @@ -0,0 +1,305 @@ +package resolve + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/wundergraph/go-arena" +) + +// newTestResolverWithCaches constructs a minimal Resolver for handleTriggerEntityCache tests. +// It avoids New() which spawns the event-loop goroutine. +func newTestResolverWithCaches(caches map[string]LoaderCache) *Resolver { + return &Resolver{ + ctx: context.Background(), + options: ResolverOptions{ + Caches: caches, + }, + resolveArenaPool: arena.NewArenaPool(), + subgraphRequestSingleFlight: NewSingleFlight(1), + } +} + +// productCacheKeyTemplate builds an EntityQueryCacheKeyTemplate that uses +// __typename + id as the cache key, matching the standard Product entity. +func productCacheKeyTemplate() *EntityQueryCacheKeyTemplate { + return &EntityQueryCacheKeyTemplate{ + Keys: NewResolvableObjectVariable(&Object{ + Fields: []*Field{ + { + Name: []byte("__typename"), + Value: &String{ + Path: []string{"__typename"}, + }, + }, + { + Name: []byte("id"), + Value: &String{ + Path: []string{"id"}, + }, + }, + }, + }), + } +} + +// TestHandleTriggerEntityCache verifies subscription-driven entity cache operations: +// populate (set), invalidate (delete), typename injection, and filtering. +// Without this, subscription events could corrupt or fail to update the L2 cache. +func TestHandleTriggerEntityCache(t *testing.T) { + t.Run("populate single entity", func(t *testing.T) { + cache := NewFakeLoaderCache() + r := newTestResolverWithCaches(map[string]LoaderCache{"default": cache}) + + resolveCtx := NewContext(context.Background()) + resolveCtx.ExecutionOptions.Caching.EnableL2Cache = true + + config := &triggerEntityCacheConfig{ + pop: &SubscriptionEntityCachePopulation{ + Mode: SubscriptionCacheModePopulate, + CacheKeyTemplate: productCacheKeyTemplate(), + CacheName: "default", + TTL: 30 * time.Second, + SubscriptionFieldName: "updateProduct", + EntityTypeName: "Product", + }, + resolveCtx: resolveCtx, + postProcess: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + } + + data := []byte(`{"data":{"updateProduct":{"id":"prod-1","name":"Widget","price":9.99}}}`) + + r.handleTriggerEntityCache(config, data) + + log := cache.GetLog() + // Expect exactly 1 set with 1 key + // Verify single set with correct key and TTL + require.Equal(t, 1, len(log)) + assert.Equal(t, CacheLogEntry{ + Operation: "set", + Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"id":"prod-1"}}`, TTL: 30 * time.Second}}, + }, log[0]) + + // Verify stored data includes injected __typename + entries, err := cache.Get(context.Background(), []string{`{"__typename":"Product","key":{"id":"prod-1"}}`}) + require.NoError(t, err) + require.Equal(t, 1, len(entries)) + require.NotNil(t, entries[0]) + assert.Equal(t, `{"id":"prod-1","name":"Widget","price":9.99,"__typename":"Product"}`, string(entries[0].Value)) + }) + + t.Run("populate array of entities", func(t *testing.T) { + cache := NewFakeLoaderCache() + r := newTestResolverWithCaches(map[string]LoaderCache{"default": cache}) + + resolveCtx := NewContext(context.Background()) + resolveCtx.ExecutionOptions.Caching.EnableL2Cache = true + + config := &triggerEntityCacheConfig{ + pop: &SubscriptionEntityCachePopulation{ + Mode: SubscriptionCacheModePopulate, + CacheKeyTemplate: productCacheKeyTemplate(), + CacheName: "default", + TTL: 30 * time.Second, + SubscriptionFieldName: "updateProducts", + EntityTypeName: "Product", + }, + resolveCtx: resolveCtx, + postProcess: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + } + + data := []byte(`{"data":{"updateProducts":[{"id":"prod-1","name":"Widget"},{"id":"prod-2","name":"Gadget"}]}}`) + + r.handleTriggerEntityCache(config, data) + + log := cache.GetLog() + // Verify single set with both entity keys + require.Equal(t, 1, len(log)) + assert.Equal(t, "set", log[0].Operation) + assert.Equal(t, []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"id":"prod-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"id":"prod-2"}}`, TTL: 30 * time.Second}, + }, log[0].Items) + }) + + t.Run("typename filtering skips non-matching entities", func(t *testing.T) { + // Regression test for the items[:0] backing array reuse bug (fixed in cc9b20aa). + // Before the fix, using items[:0] to filter in-place corrupted the parsed JSON + // array because GetArray() returns a slice over the parser's internal buffer. + cache := NewFakeLoaderCache() + r := newTestResolverWithCaches(map[string]LoaderCache{"default": cache}) + + resolveCtx := NewContext(context.Background()) + resolveCtx.ExecutionOptions.Caching.EnableL2Cache = true + + config := &triggerEntityCacheConfig{ + pop: &SubscriptionEntityCachePopulation{ + Mode: SubscriptionCacheModePopulate, + CacheKeyTemplate: productCacheKeyTemplate(), + CacheName: "default", + TTL: 30 * time.Second, + SubscriptionFieldName: "entityUpdates", + EntityTypeName: "Product", + }, + resolveCtx: resolveCtx, + postProcess: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + } + + // Mixed types: Product, Review, Product — only Products should be cached + data := []byte(`{"data":{"entityUpdates":[{"__typename":"Product","id":"prod-1","name":"Widget"},{"__typename":"Review","id":"rev-1","body":"Great"},{"__typename":"Product","id":"prod-2","name":"Gadget"}]}}`) + + r.handleTriggerEntityCache(config, data) + + log := cache.GetLog() + // Only Products cached, not the Review + require.Equal(t, 1, len(log)) + assert.Equal(t, "set", log[0].Operation) + assert.Equal(t, []CacheLogItem{ + {Key: `{"__typename":"Product","key":{"id":"prod-1"}}`, TTL: 30 * time.Second}, + {Key: `{"__typename":"Product","key":{"id":"prod-2"}}`, TTL: 30 * time.Second}, + }, log[0].Items) + + // Verify stored data integrity (the items[:0] bug would corrupt values) + entries, err := cache.Get(context.Background(), []string{ + `{"__typename":"Product","key":{"id":"prod-1"}}`, + `{"__typename":"Product","key":{"id":"prod-2"}}`, + }) + require.NoError(t, err) + require.Equal(t, 2, len(entries)) + require.NotNil(t, entries[0]) + require.NotNil(t, entries[1]) + assert.Equal(t, `{"__typename":"Product","id":"prod-1","name":"Widget"}`, string(entries[0].Value)) + assert.Equal(t, `{"__typename":"Product","id":"prod-2","name":"Gadget"}`, string(entries[1].Value)) + }) + + t.Run("missing typename gets injected", func(t *testing.T) { + cache := NewFakeLoaderCache() + r := newTestResolverWithCaches(map[string]LoaderCache{"default": cache}) + + resolveCtx := NewContext(context.Background()) + resolveCtx.ExecutionOptions.Caching.EnableL2Cache = true + + config := &triggerEntityCacheConfig{ + pop: &SubscriptionEntityCachePopulation{ + Mode: SubscriptionCacheModePopulate, + CacheKeyTemplate: productCacheKeyTemplate(), + CacheName: "default", + TTL: 30 * time.Second, + SubscriptionFieldName: "updateProduct", + EntityTypeName: "Product", + }, + resolveCtx: resolveCtx, + postProcess: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + } + + // Entity without __typename — should be injected from EntityTypeName + data := []byte(`{"data":{"updateProduct":{"id":"prod-1","name":"Widget"}}}`) + + r.handleTriggerEntityCache(config, data) + + log := cache.GetLog() + // Cache key should include injected "Product" typename + require.Equal(t, 1, len(log)) + assert.Equal(t, "set", log[0].Operation) + assert.Equal(t, []CacheLogItem{{Key: `{"__typename":"Product","key":{"id":"prod-1"}}`, TTL: 30 * time.Second}}, log[0].Items) + + // Verify stored data includes injected __typename + entries, err := cache.Get(context.Background(), []string{`{"__typename":"Product","key":{"id":"prod-1"}}`}) + require.NoError(t, err) + require.Equal(t, 1, len(entries)) + require.NotNil(t, entries[0]) + assert.Equal(t, `{"id":"prod-1","name":"Widget","__typename":"Product"}`, string(entries[0].Value)) + }) + + t.Run("invalidate mode deletes cache entry", func(t *testing.T) { + cache := NewFakeLoaderCache() + r := newTestResolverWithCaches(map[string]LoaderCache{"default": cache}) + + // Pre-populate cache with an entity + err := cache.Set(context.Background(), withCacheEntryTTL([]*CacheEntry{ + {Key: `{"__typename":"Product","key":{"id":"prod-1"}}`, Value: []byte(`{"__typename":"Product","id":"prod-1","name":"Old"}`)}, + }, 30*time.Second)) + require.NoError(t, err) + cache.ClearLog() + + resolveCtx := NewContext(context.Background()) + resolveCtx.ExecutionOptions.Caching.EnableL2Cache = true + + config := &triggerEntityCacheConfig{ + pop: &SubscriptionEntityCachePopulation{ + Mode: SubscriptionCacheModeInvalidate, + CacheKeyTemplate: productCacheKeyTemplate(), + CacheName: "default", + TTL: 30 * time.Second, + SubscriptionFieldName: "deleteProduct", + EntityTypeName: "Product", + }, + resolveCtx: resolveCtx, + postProcess: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + } + + data := []byte(`{"data":{"deleteProduct":{"id":"prod-1"}}}`) + + r.handleTriggerEntityCache(config, data) + + log := cache.GetLog() + // Verify delete operation + require.Equal(t, 1, len(log)) + assert.Equal(t, CacheLogEntry{ + Operation: "delete", + Items: []CacheLogItem{{Key: `{"__typename":"Product","key":{"id":"prod-1"}}`}}, + }, log[0]) + + // Verify the entry is gone + entries, err := cache.Get(context.Background(), []string{`{"__typename":"Product","key":{"id":"prod-1"}}`}) + require.NoError(t, err) + require.Equal(t, 1, len(entries)) + assert.Nil(t, entries[0]) + }) + + t.Run("missing cache name returns early", func(t *testing.T) { + cache := NewFakeLoaderCache() + // Resolver has "default" cache, but config references "nonexistent" + r := newTestResolverWithCaches(map[string]LoaderCache{"default": cache}) + + resolveCtx := NewContext(context.Background()) + resolveCtx.ExecutionOptions.Caching.EnableL2Cache = true + + config := &triggerEntityCacheConfig{ + pop: &SubscriptionEntityCachePopulation{ + Mode: SubscriptionCacheModePopulate, + CacheKeyTemplate: productCacheKeyTemplate(), + CacheName: "nonexistent", + TTL: 30 * time.Second, + SubscriptionFieldName: "updateProduct", + EntityTypeName: "Product", + }, + resolveCtx: resolveCtx, + postProcess: PostProcessingConfiguration{ + SelectResponseDataPath: []string{"data"}, + }, + } + + data := []byte(`{"data":{"updateProduct":{"id":"prod-1","name":"Widget"}}}`) + + // Should not panic and should not perform any cache operations + r.handleTriggerEntityCache(config, data) + + log := cache.GetLog() + assert.Equal(t, 0, len(log)) + }) +} diff --git a/v2/pkg/engine/resolve/variables.go b/v2/pkg/engine/resolve/variables.go index afc00459ad..3f54993d93 100644 --- a/v2/pkg/engine/resolve/variables.go +++ b/v2/pkg/engine/resolve/variables.go @@ -11,7 +11,6 @@ const ( ObjectVariableKind HeaderVariableKind ResolvableObjectVariableKind - ListVariableKind ) const ( diff --git a/v2/pkg/engine/resolve/variables_renderer.go b/v2/pkg/engine/resolve/variables_renderer.go index 0fa1d3ee14..7a16844b98 100644 --- a/v2/pkg/engine/resolve/variables_renderer.go +++ b/v2/pkg/engine/resolve/variables_renderer.go @@ -277,6 +277,77 @@ func (g *GraphQLVariableRenderer) renderGraphQLValue(data *astjson.Value, out io return } +func NewCacheKeyVariableRenderer() *CacheKeyVariableRenderer { + return &CacheKeyVariableRenderer{} +} + +type CacheKeyVariableRenderer struct { +} + +func (g *CacheKeyVariableRenderer) GetKind() string { + return "cacheKey" +} + +func (g *CacheKeyVariableRenderer) RenderVariable(ctx context.Context, data *astjson.Value, out io.Writer) error { + return g.renderGraphQLValue(data, out) +} + +func (g *CacheKeyVariableRenderer) renderGraphQLValue(data *astjson.Value, out io.Writer) (err error) { + if data == nil { + _, _ = out.Write(literal.NULL) + return + } + switch data.Type() { + case astjson.TypeString: + b := data.GetStringBytes() + _, _ = out.Write(b) + case astjson.TypeObject: + _, _ = out.Write(literal.LBRACE) + o := data.GetObject() + first := true + o.Visit(func(k []byte, v *astjson.Value) { + if err != nil { + return + } + if !first { + _, _ = out.Write(literal.COMMA) + } else { + first = false + } + _, _ = out.Write(k) + _, _ = out.Write(literal.COLON) + err = g.renderGraphQLValue(v, out) + }) + if err != nil { + return err + } + _, _ = out.Write(literal.RBRACE) + case astjson.TypeNull: + _, _ = out.Write(literal.NULL) + case astjson.TypeTrue: + _, _ = out.Write(literal.TRUE) + case astjson.TypeFalse: + _, _ = out.Write(literal.FALSE) + case astjson.TypeArray: + _, _ = out.Write(literal.LBRACK) + arr := data.GetArray() + for i, value := range arr { + if i > 0 { + _, _ = out.Write(literal.COMMA) + } + err = g.renderGraphQLValue(value, out) + if err != nil { + return err + } + } + _, _ = out.Write(literal.RBRACK) + case astjson.TypeNumber: + b := data.MarshalTo(nil) + _, _ = out.Write(b) + } + return +} + func NewCSVVariableRenderer(arrayValueType JsonRootType) *CSVVariableRenderer { return &CSVVariableRenderer{ Kind: VariableRendererKindCsv,