diff --git a/src/HotChocolate/Fusion/benchmarks/k6/deep-recursion.js b/src/HotChocolate/Fusion/benchmarks/k6/deep-recursion.js
index bc804d5a1e5..accba207fc0 100644
--- a/src/HotChocolate/Fusion/benchmarks/k6/deep-recursion.js
+++ b/src/HotChocolate/Fusion/benchmarks/k6/deep-recursion.js
@@ -3,7 +3,7 @@ import { check } from "k6";
import { Rate } from "k6/metrics";
import { textSummary } from "https://jslib.k6.io/k6-summary/0.0.1/index.js";
-const GRAPHQL_URL = 'http://localhost:5000/graphql';
+const GRAPHQL_URL = 'http://localhost:5220/graphql';
const endpoint = __ENV.GATEWAY_ENDPOINT || GRAPHQL_URL;
const mode = __ENV.MODE || "constant";
const isConstant = mode === "constant";
diff --git a/src/HotChocolate/Fusion/benchmarks/k6/eShop.Gateway/Properties/launchSettings.json b/src/HotChocolate/Fusion/benchmarks/k6/eShop.Gateway/Properties/launchSettings.json
index 7981999c10a..fd34c7bfd5c 100644
--- a/src/HotChocolate/Fusion/benchmarks/k6/eShop.Gateway/Properties/launchSettings.json
+++ b/src/HotChocolate/Fusion/benchmarks/k6/eShop.Gateway/Properties/launchSettings.json
@@ -4,8 +4,8 @@
"commandName": "Project",
"dotnetRunMessages": true,
"launchBrowser": true,
- "launchUrl": "http://localhost:5000/graphql",
- "applicationUrl": "http://localhost:5000",
+ "launchUrl": "http://localhost:5220/graphql",
+ "applicationUrl": "http://localhost:5220",
"environmentVariables": {
"ASPNETCORE_ENVIRONMENT": "Production"
}
diff --git a/src/HotChocolate/Fusion/src/Fusion.Diagnostics/Listeners/FusionActivityExecutionDiagnosticEventListener.cs b/src/HotChocolate/Fusion/src/Fusion.Diagnostics/Listeners/FusionActivityExecutionDiagnosticEventListener.cs
index 816544718d6..71224d39a04 100644
--- a/src/HotChocolate/Fusion/src/Fusion.Diagnostics/Listeners/FusionActivityExecutionDiagnosticEventListener.cs
+++ b/src/HotChocolate/Fusion/src/Fusion.Diagnostics/Listeners/FusionActivityExecutionDiagnosticEventListener.cs
@@ -10,7 +10,8 @@ namespace HotChocolate.Fusion.Diagnostics.Listeners;
internal sealed class FusionActivityExecutionDiagnosticEventListener(
FusionActivityEnricher enricher,
- InstrumentationOptions options) : FusionExecutionDiagnosticEventListener
+ InstrumentationOptions options)
+ : FusionExecutionDiagnosticEventListener
{
public override IDisposable ExecuteRequest(RequestContext context)
{
diff --git a/src/HotChocolate/Fusion/src/Fusion.Diagnostics/Spans/ExecutePlanNodeSpan.cs b/src/HotChocolate/Fusion/src/Fusion.Diagnostics/Spans/ExecutePlanNodeSpan.cs
index c3f7b26f4fe..447daa90536 100644
--- a/src/HotChocolate/Fusion/src/Fusion.Diagnostics/Spans/ExecutePlanNodeSpan.cs
+++ b/src/HotChocolate/Fusion/src/Fusion.Diagnostics/Spans/ExecutePlanNodeSpan.cs
@@ -52,10 +52,6 @@ internal sealed class ExecutePlanNodeSpan(
{
SetSourceSchemaTags(activity, operationExecutionNode.Operation, schemaName);
}
- else if (node is OperationBatchExecutionNode batchExecutionNode)
- {
- SetSourceSchemaTags(activity, batchExecutionNode.Operation, schemaName);
- }
return new ExecutePlanNodeSpan(activity, context, node, schemaName, enricher);
}
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/BatchStreamResult.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/BatchStreamResult.cs
new file mode 100644
index 00000000000..64dda2da55b
--- /dev/null
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/BatchStreamResult.cs
@@ -0,0 +1,7 @@
+namespace HotChocolate.Fusion.Execution.Clients;
+
+///
+/// Represents a single result from a batch stream, tagged with the index of the
+/// request it belongs to.
+///
+public readonly record struct BatchStreamResult(int RequestIndex, SourceSchemaResult Result);
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/ISourceSchemaClient.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/ISourceSchemaClient.cs
index b126206fb50..17caded927d 100644
--- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/ISourceSchemaClient.cs
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/ISourceSchemaClient.cs
@@ -27,16 +27,18 @@ ValueTask ExecuteAsync(
CancellationToken cancellationToken);
///
- /// Executes multiple GraphQL operations as a single batched transport request.
+ /// Executes multiple GraphQL operations as a single batched transport request and
+ /// streams results back as they arrive. Each result is tagged with its request index
+ /// so the caller can route it to the correct operation.
///
/// The current operation plan execution context.
/// The requests to include in the batch.
/// A token to cancel the operation.
///
- /// A dictionary mapping each request's ID
- /// to its corresponding response.
+ /// An async stream of where each item contains
+ /// the request index and the corresponding .
///
- ValueTask> ExecuteBatchAsync(
+ IAsyncEnumerable ExecuteBatchStreamAsync(
OperationPlanContext context,
ImmutableArray requests,
CancellationToken cancellationToken);
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/ISourceSchemaDispatcher.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/ISourceSchemaDispatcher.cs
deleted file mode 100644
index 0a145c41d2f..00000000000
--- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/ISourceSchemaDispatcher.cs
+++ /dev/null
@@ -1,43 +0,0 @@
-namespace HotChocolate.Fusion.Execution.Clients;
-
-///
-/// Controls the lifecycle of batching groups during plan execution.
-/// The executor uses this interface to register groups discovered in the plan,
-/// notify the dispatcher when nodes are skipped, and abort all pending work
-/// on cancellation or failure.
-///
-public interface ISourceSchemaDispatcher
-{
- ///
- /// Registers a batching group. The dispatcher will hold requests for the specified
- /// node IDs until all members have submitted or been skipped.
- ///
- /// The batching group identifier assigned at planning time.
- /// The execution node IDs that belong to this group.
- void RegisterGroup(int groupId, IReadOnlyList nodeIds);
-
- ///
- /// Marks a node as skipped, removing it from its group's outstanding member count.
- /// If this was the last outstanding member, the group is dispatched with
- /// whatever requests have been submitted so far.
- ///
- /// The ID of the execution node to skip.
- void SkipNode(int nodeId);
-
- ///
- /// Aborts all pending batching groups, faulting any waiting callers with the
- /// specified error. Subsequent calls to
- /// and become no-ops.
- ///
- ///
- /// The exception to propagate to pending callers, or null to use a
- /// default .
- ///
- void Abort(Exception? error = null);
-
- ///
- /// Resets the dispatcher to its initial state, clearing all groups and the aborted flag.
- /// Must be called between subscription events so that groups can be re-registered.
- ///
- void Reset();
-}
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/ISourceSchemaScheduler.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/ISourceSchemaScheduler.cs
deleted file mode 100644
index 9ee2b5b3cc0..00000000000
--- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/ISourceSchemaScheduler.cs
+++ /dev/null
@@ -1,22 +0,0 @@
-namespace HotChocolate.Fusion.Execution.Clients;
-
-///
-/// Schedules the execution of source schema requests.
-/// Execution nodes call this interface instead of directly,
-/// allowing the scheduler to hold requests that belong to the same batching group until all
-/// group members have submitted or been skipped, and then dispatch them as a single batch.
-///
-public interface ISourceSchemaScheduler
-{
- ///
- /// Submits a request for execution. If the request belongs to a batching group,
- /// the returned task may not complete until all other members of the group have
- /// submitted or been skipped.
- ///
- /// The request to execute.
- /// A token to cancel the operation.
- /// The response from the source schema.
- ValueTask ExecuteAsync(
- SourceSchemaClientRequest request,
- CancellationToken cancellationToken);
-}
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/SourceSchemaClientRequest.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/SourceSchemaClientRequest.cs
index 54035737ac8..6c9bb83a778 100644
--- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/SourceSchemaClientRequest.cs
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/SourceSchemaClientRequest.cs
@@ -19,14 +19,6 @@ public sealed class SourceSchemaClientRequest
///
public required string SchemaName { get; init; }
- ///
- /// Gets the optional batching group identifier assigned at planning time.
- /// When set, the holds this request until
- /// all nodes in the same group have submitted or been skipped, then dispatches
- /// them together via .
- ///
- public int? BatchingGroupId { get; init; }
-
///
/// Gets the GraphQL operation type (query, mutation, or subscription).
///
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/SourceSchemaHttpClient.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/SourceSchemaHttpClient.cs
index 1c1a4a972bb..bb54b018b85 100644
--- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/SourceSchemaHttpClient.cs
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/SourceSchemaHttpClient.cs
@@ -2,7 +2,6 @@
using System.Collections.Immutable;
using System.Diagnostics;
using System.Runtime.CompilerServices;
-using System.Runtime.ExceptionServices;
using System.Text.Json;
using HotChocolate.Fusion.Execution.Nodes;
using HotChocolate.Fusion.Properties;
@@ -88,20 +87,18 @@ public async ValueTask ExecuteAsync(
}
///
- public async ValueTask> ExecuteBatchAsync(
+ public async IAsyncEnumerable ExecuteBatchStreamAsync(
OperationPlanContext context,
ImmutableArray requests,
- CancellationToken cancellationToken)
+ [EnumeratorCancellation] CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(context);
if (requests.Length == 0)
{
- return [];
+ yield break;
}
- Debug.WriteLine(requests[0].SchemaName);
-
if (ContainsSubscriptionRequest(requests))
{
throw new InvalidOperationException(
@@ -113,28 +110,95 @@ public async ValueTask> ExecuteBatchA
var httpResponse = await _client.SendAsync(httpRequest, cancellationToken).ConfigureAwait(false);
- var uri = httpRequest.Uri ?? new Uri("http://unknown");
- var contentType = httpResponse.RawContentType ?? "unknown";
- var isSuccessful = httpResponse.IsSuccessStatusCode;
+ try
+ {
+ await foreach (var result in httpResponse.ReadAsResultStreamAsync().WithCancellation(cancellationToken))
+ {
+ var requestIndex = ResolveRequestIndex(requests, result);
- var nodeResponses = new NodeResponse[requests.Length];
- var builder = ImmutableArray.CreateBuilder(requests.Length);
+ // When the subgraph returns a blanket error without a
+ // requestIndex, the error applies to every request in the
+ // batch. We yield the same result for each request so
+ // downstream error handling sees the error on every path.
+ if (requestIndex == -1)
+ {
+ for (var i = 0; i < requests.Length; i++)
+ {
+ var req = requests[i];
- for (var i = 0; i < requests.Length; i++)
- {
- var nodeResponse = new NodeResponse(uri, contentType, isSuccessful);
- nodeResponses[i] = nodeResponse;
- builder.Add(nodeResponse);
- }
+ if (!TryGetResultPath(req, variableIndex: 0, out var p, out var ap))
+ {
+ continue;
+ }
- _ = ReadBatchStreamInBackgroundAsync(
- context,
- requests,
- nodeResponses,
- httpResponse,
- cancellationToken);
+ var ssr = ap.IsDefaultOrEmpty
+ ? new SourceSchemaResult(p, result)
+ : new SourceSchemaResult(p, result, additionalPaths: ap);
- return builder.MoveToImmutable();
+ _configuration.OnSourceSchemaResult?.Invoke(context, req.Node, ssr);
+ yield return new BatchStreamResult(i, ssr);
+ }
+
+ continue;
+ }
+
+ var request = requests[requestIndex];
+ var variableIndex = ResolveVariableIndex(request, result);
+
+ // When the response lacks a variableIndex, the error
+ // applies to every variable set in this request.
+ if (variableIndex == -1)
+ {
+ for (var vi = 0; vi < request.Variables.Length; vi++)
+ {
+ if (!TryGetResultPath(request, vi, out var vp, out var vap))
+ {
+ continue;
+ }
+
+ var vssr = vap.IsDefaultOrEmpty
+ ? new SourceSchemaResult(vp, result)
+ : new SourceSchemaResult(vp, result, additionalPaths: vap);
+
+ _configuration.OnSourceSchemaResult?.Invoke(context, request.Node, vssr);
+ yield return new BatchStreamResult(requestIndex, vssr);
+ }
+
+ continue;
+ }
+
+ if (!TryGetResultPath(request, variableIndex, out var path, out var additionalPaths))
+ {
+ result.Dispose();
+ throw new InvalidOperationException(
+ string.Format(
+ FusionExecutionResources.SourceSchemaHttpClient_InvalidVariableIndex,
+ variableIndex,
+ request.Node.Id));
+ }
+
+ var sourceSchemaResult = additionalPaths.IsDefaultOrEmpty
+ ? new SourceSchemaResult(path, result)
+ : new SourceSchemaResult(path, result, additionalPaths: additionalPaths);
+
+ var onSourceSchemaResult = _configuration.OnSourceSchemaResult;
+ onSourceSchemaResult?.Invoke(context, request.Node, sourceSchemaResult);
+
+ if (onSourceSchemaResult is not null && !additionalPaths.IsDefaultOrEmpty)
+ {
+ foreach (var additionalPath in additionalPaths)
+ {
+ onSourceSchemaResult(context, request.Node, sourceSchemaResult.WithPath(additionalPath));
+ }
+ }
+
+ yield return new BatchStreamResult(requestIndex, sourceSchemaResult);
+ }
+ }
+ finally
+ {
+ httpResponse.Dispose();
+ }
}
///
@@ -287,78 +351,28 @@ private static VariableBatchRequest CreateVariableBatchRequest(
extensions: null);
}
- private async Task ReadBatchStreamInBackgroundAsync(
- OperationPlanContext context,
+ private static int ResolveRequestIndex(
ImmutableArray requests,
- NodeResponse[] nodeResponses,
- GraphQLHttpResponse httpResponse,
- CancellationToken cancellationToken)
+ SourceResultDocument result)
{
- try
+ if (requests.Length == 1)
{
- await foreach (var result in httpResponse.ReadAsResultStreamAsync()
- .WithCancellation(cancellationToken))
- {
- var requestIndex = result.Root.GetProperty(RequestIndex).GetInt32();
-
- if ((uint)requestIndex >= (uint)requests.Length)
- {
- result.Dispose();
- throw new InvalidOperationException(
- string.Format(
- FusionExecutionResources.SourceSchemaHttpClient_InvalidRequestIndex,
- requestIndex));
- }
-
- var request = requests[requestIndex];
- var nodeResponse = nodeResponses[requestIndex];
-
- var variableIndex = ResolveVariableIndex(request, result);
-
- if (!TryGetResultPath(request, variableIndex, out var path, out var additionalPaths))
- {
- result.Dispose();
- throw new InvalidOperationException(
- string.Format(
- FusionExecutionResources.SourceSchemaHttpClient_InvalidVariableIndex,
- variableIndex,
- request.Node.Id));
- }
-
- WriteResultToChannel(context, request.Node, nodeResponse, path, additionalPaths, result);
- }
-
- // Stream completed successfully. Complete all channels, failing any
- // that never received results (fail-loud).
- for (var i = 0; i < nodeResponses.Length; i++)
- {
- var nodeResponse = nodeResponses[i];
-
- if (!nodeResponse.HasReceivedResults)
- {
- nodeResponse.Complete(
- new InvalidOperationException(
- string.Format(
- FusionExecutionResources.SourceSchemaHttpClient_NoResultForNode,
- requests[i].Node.Id)));
- }
- else
- {
- nodeResponse.Complete();
- }
- }
+ return 0;
}
- catch (Exception ex)
+
+ if (!result.Root.TryGetProperty(RequestIndex, out var requestIndexElement))
{
- for (var i = 0; i < nodeResponses.Length; i++)
- {
- nodeResponses[i].Complete(ex);
- }
+ return -1;
}
- finally
+
+ var requestIndex = requestIndexElement.GetInt32();
+
+ if ((uint)requestIndex < (uint)requests.Length)
{
- httpResponse.Dispose();
+ return requestIndex;
}
+
+ throw ThrowHelper.RequestIndexOutOfRange(requestIndex);
}
private static int ResolveVariableIndex(
@@ -372,15 +386,19 @@ private static int ResolveVariableIndex(
return 0;
}
- var variableIndex = result.Root.GetProperty(VariableIndex).GetInt32();
+ if (!result.Root.TryGetProperty(VariableIndex, out var variableIndexElement))
+ {
+ return -1;
+ }
+
+ var variableIndex = variableIndexElement.GetInt32();
if ((uint)variableIndex < (uint)variableCount)
{
return variableIndex;
}
- throw new InvalidOperationException(
- $"The batch response contains an out-of-range variableIndex '{variableIndex}'.");
+ throw ThrowHelper.VariableIndexOutOfRange(variableIndex);
}
private static bool TryGetResultPath(
@@ -485,41 +503,6 @@ private void ConfigureBatchCallbacks(
};
}
- private void WriteResultToChannel(
- OperationPlanContext context,
- ExecutionNode node,
- NodeResponse nodeResponse,
- CompactPath path,
- ImmutableArray additionalPaths,
- SourceResultDocument document)
- {
- var sourceSchemaResult = additionalPaths.IsDefaultOrEmpty
- ? new SourceSchemaResult(path, document)
- : new SourceSchemaResult(path, document, additionalPaths: additionalPaths);
- var onSourceSchemaResult = _configuration.OnSourceSchemaResult;
-
- onSourceSchemaResult?.Invoke(context, node, sourceSchemaResult);
-
- if (!nodeResponse.TryWrite(sourceSchemaResult))
- {
- sourceSchemaResult.Dispose();
- return;
- }
-
- nodeResponse.HasReceivedResults = true;
-
- if (onSourceSchemaResult is null || additionalPaths.IsDefaultOrEmpty)
- {
- return;
- }
-
- // Preserve callback behavior for all logical result paths without enqueueing aliases.
- foreach (var additionalPath in additionalPaths)
- {
- onSourceSchemaResult(context, node, sourceSchemaResult.WithPath(additionalPath));
- }
- }
-
private static bool ContainsSubscriptionRequest(
IReadOnlyList requests)
{
@@ -711,156 +694,4 @@ public override async IAsyncEnumerable ReadAsResultStreamAsy
public override void Dispose() => response.Dispose();
}
-
- ///
- /// A streaming response for a single execution node within a batched HTTP request.
- /// Results are pushed into a under lock by the background stream
- /// reader and signalled via a lightweight .
- /// The execution node reads lazily via .
- ///
- private sealed class NodeResponse(Uri uri, string contentType, bool isSuccessful) : SourceSchemaClientResponse
- {
-#if NET9_0_OR_GREATER
- private readonly Lock _sync = new();
-#else
- private readonly object _sync = new();
-#endif
- private const int InitialCapacity = 32;
- private static readonly ArrayPool s_pool = ArrayPool.Shared;
- private readonly AsyncAutoResetEvent _signal = new();
- private SourceSchemaResult[] _results = s_pool.Rent(InitialCapacity);
- private int _resultsCount;
- private SourceSchemaResult[] _drain = s_pool.Rent(InitialCapacity);
- private int _drainCount;
- private volatile bool _completed;
- private Exception? _error;
- private bool _disposed;
-
- public override Uri Uri { get; } = uri;
-
- public override string ContentType { get; } = contentType;
-
- public override bool IsSuccessful { get; } = isSuccessful;
-
- ///
- /// Gets whether at least one result has been written to this response.
- /// Used to detect nodes that received no results from the batch stream.
- ///
- internal bool HasReceivedResults { get; set; }
-
- internal bool TryWrite(SourceSchemaResult result)
- {
- if (_disposed)
- {
- return false;
- }
-
- lock (_sync)
- {
- if (_resultsCount == _results.Length)
- {
- var newArray = s_pool.Rent(_results.Length * 2);
- _results.AsSpan(0, _resultsCount).CopyTo(newArray);
- s_pool.Return(_results, clearArray: true);
- _results = newArray;
- }
-
- _results[_resultsCount++] = result;
- }
-
- _signal.Set();
- return true;
- }
-
- internal void Complete(Exception? error = null)
- {
- _error = error;
- _completed = true;
- _signal.Set();
- }
-
- public override async IAsyncEnumerable ReadAsResultStreamAsync(
- [EnumeratorCancellation] CancellationToken cancellationToken = default)
- {
- while (true)
- {
- cancellationToken.ThrowIfCancellationRequested();
-
- var (buffer, count) = Drain();
- for (var i = 0; i < count; i++)
- {
- yield return buffer[i];
- }
-
- if (_completed)
- {
- // Final drain, writer may have enqueued between our last
- // drain and the completion flag becoming visible.
- (buffer, count) = Drain();
- for (var i = 0; i < count; i++)
- {
- yield return buffer[i];
- }
-
- if (_error is not null)
- {
- ExceptionDispatchInfo.Throw(_error);
- }
-
- yield break;
- }
-
- await _signal;
- }
- }
-
- private (SourceSchemaResult[] Buffer, int Count) Drain()
- {
- lock (_sync)
- {
- if (_resultsCount == 0)
- {
- return (Array.Empty(), 0);
- }
-
- // Clear the previous drain buffer so it's ready
- // to become the next write target.
- _drain.AsSpan(0, _drainCount).Clear();
- _drainCount = 0;
-
- // Swap the buffers so the writer can keep adding
- // while we drain outside the lock.
- (_results, _drain) = (_drain, _results);
- (_resultsCount, _drainCount) = (0, _resultsCount);
- }
-
- return (_drain, _drainCount);
- }
-
- public override void Dispose()
- {
- if (_disposed)
- {
- return;
- }
-
- _disposed = true;
-
- Complete();
-
- var (buffer, count) = Drain();
- for (var i = 0; i < count; i++)
- {
- buffer[i].Dispose();
- }
-
- lock (_sync)
- {
- s_pool.Return(_results, clearArray: true);
- s_pool.Return(_drain, clearArray: true);
- _results = [];
- _drain = [];
- }
- }
- }
}
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/SourceSchemaRequestDispatcher.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/SourceSchemaRequestDispatcher.cs
deleted file mode 100644
index 10a17d1247d..00000000000
--- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Clients/SourceSchemaRequestDispatcher.cs
+++ /dev/null
@@ -1,566 +0,0 @@
-using System.Collections.Immutable;
-using System.Runtime.InteropServices;
-using HotChocolate.Language;
-using static HotChocolate.Fusion.Execution.Clients.SourceSchemaClientCapabilities;
-using static HotChocolate.Fusion.Properties.FusionExecutionResources;
-
-namespace HotChocolate.Fusion.Execution.Clients;
-
-///
-/// Coordinates the dispatch of source schema requests, implementing both
-/// and .
-///
-/// Requests that do not belong to a batching group (or are subscriptions) are forwarded
-/// directly to the underlying . Grouped requests are
-/// held until every node in the group has submitted or been skipped, at which point they
-/// are dispatched together via .
-///
-///
-internal sealed class SourceSchemaRequestDispatcher
- : ISourceSchemaScheduler
- , ISourceSchemaDispatcher
-{
- private const int NodeStateUnregistered = -1;
- private const int NodeStatePending = 0;
- private const int NodeStateSubmitted = 1;
- private const int NodeStateSkipped = 2;
-
-#if NET9_0_OR_GREATER
- private readonly Lock _sync = new();
-#else
- private readonly object _sync = new();
-#endif
- private readonly OperationPlanContext _context;
- private readonly ISourceSchemaClientScope _clientScope;
- private readonly CancellationToken _requestAborted;
- private readonly Dictionary _groups = [];
- private readonly List _trackedNodeIdSlots = [];
- private int[] _groupByNodeIdSlots = [];
- private int[] _nodeStateSlots = [];
- private Exception? _abortError;
- private bool _aborted;
-
- ///
- /// Initializes a new instance of
- /// using the given to obtain the client scope and
- /// cancellation token for all downstream requests.
- ///
- ///
- /// The operation plan context that owns this dispatcher. The dispatcher uses
- /// to resolve clients and
- /// to propagate cancellation.
- ///
- public SourceSchemaRequestDispatcher(OperationPlanContext context)
- {
- ArgumentNullException.ThrowIfNull(context);
-
- _context = context;
- _clientScope = context.ClientScope;
- _requestAborted = context.RequestContext.RequestAborted;
- }
-
- ///
- /// Executes a source schema request. If the request belongs to a batching group,
- /// it is held until all nodes in that group have submitted or been skipped, then
- /// dispatched as a batch. Otherwise, it is forwarded immediately.
- ///
- /// The source schema request to execute.
- /// A token to cancel the operation.
- /// The response from the source schema.
- ///
- /// The request's node was not registered in the expected batching group.
- ///
- public ValueTask ExecuteAsync(
- SourceSchemaClientRequest request,
- CancellationToken cancellationToken)
- {
- ArgumentNullException.ThrowIfNull(request);
-
- var client = _clientScope.GetClient(request.SchemaName, request.OperationType);
-
- // if the request is not part of a batch group,
- // if it is a mutation or subscription,
- // or if the source schema does not support request batching,
- // we will dispatch it right away without waiting for other requests.
- if ((client.Capabilities & RequestBatching) != RequestBatching
- || request.BatchingGroupId is not { } groupId
- || request.OperationType is OperationType.Mutation or OperationType.Subscription)
- {
- return client.ExecuteAsync(_context, request, cancellationToken);
- }
-
- PendingRequest? pendingRequest = null;
- ImmutableArray pendingRequests = [];
- var needsDispatch = false;
- Exception? abortError = null;
-
- lock (_sync)
- {
- // the execution was aborted by the operation plan executor.
- if (_aborted)
- {
- abortError = CreateAbortException();
- }
- // we register the node to be dispatched.
- else if (_groups.TryGetValue(groupId, out var group)
- && group.TrySubmit(request, _nodeStateSlots, out pendingRequest))
- {
- if (group.TryCreateDispatch(out pendingRequests))
- {
- needsDispatch = true;
- RemoveGroup(group);
- }
- }
- // we are in an invalid state where the executor did not announce all groups or nodes.
- else
- {
- abortError = new InvalidOperationException(
- string.Format(
- SourceSchemaRequestDispatcher_NodeNotRegisteredInGroup,
- request.Node.Id,
- groupId));
- }
- }
-
- // now we handle the decisions we made in the lock.
- if (abortError is not null)
- {
- return ValueTask.FromException(abortError);
- }
-
- if (needsDispatch)
- {
- BeginDispatchGroup(pendingRequests);
- }
-
- return new ValueTask(pendingRequest!.Completion.Task);
- }
-
- ///
- /// Registers a batching group with the given node IDs. All registered nodes must
- /// either submit a request via or be skipped via
- /// before the group is dispatched.
- ///
- /// The batching group identifier.
- /// The execution node IDs that belong to this group.
- public void RegisterGroup(int groupId, IReadOnlyList nodeIds)
- {
- ArgumentNullException.ThrowIfNull(nodeIds);
-
- if (nodeIds.Count == 0)
- {
- throw new ArgumentException(
- SourceSchemaRequestDispatcher_RegisterGroupEmptyNodeIds,
- nameof(nodeIds));
- }
-
- lock (_sync)
- {
- if (_aborted)
- {
- return;
- }
-
- if (!_groups.TryGetValue(groupId, out var group))
- {
- group = new GroupState(groupId, nodeIds.Count);
- _groups.Add(groupId, group);
- }
-
- foreach (var nodeId in nodeIds)
- {
- EnsureNodeIdSlotCapacity(nodeId + 1);
- var existingGroupId = _groupByNodeIdSlots[nodeId];
-
- if (existingGroupId < 0)
- {
- _trackedNodeIdSlots.Add(nodeId);
- group.RegisterNode(nodeId);
- }
- else if (existingGroupId != groupId)
- {
- group.RegisterNode(nodeId);
- }
-
- _groupByNodeIdSlots[nodeId] = groupId;
- _nodeStateSlots[nodeId] = NodeStatePending;
- }
- }
- }
-
- ///
- /// Marks a node as skipped so it no longer blocks dispatch of its batching group.
- /// If this was the last remaining node in the group, the group is dispatched.
- ///
- /// The execution node ID to skip.
- public void SkipNode(int nodeId)
- {
- ImmutableArray pendingRequests;
- var needsDispatch = false;
-
- lock (_sync)
- {
- if (_aborted)
- {
- return;
- }
-
- if ((uint)nodeId >= (uint)_groupByNodeIdSlots.Length)
- {
- return;
- }
-
- var groupId = _groupByNodeIdSlots[nodeId];
-
- if (groupId < 0 || !_groups.TryGetValue(groupId, out var group))
- {
- return;
- }
-
- group.Skip(nodeId, _nodeStateSlots);
-
- if (group.TryCreateDispatch(out pendingRequests))
- {
- needsDispatch = true;
- RemoveGroup(group);
- }
- }
-
- if (needsDispatch)
- {
- BeginDispatchGroup(pendingRequests);
- }
- }
-
- ///
- /// Aborts the dispatcher, failing all pending requests with the given error.
- /// Subsequent calls to , ,
- /// and become no-ops.
- ///
- ///
- /// The error to propagate to pending requests. If null, an
- /// is used.
- ///
- public void Abort(Exception? error = null)
- {
- PendingRequest[] pendingRequests;
- Exception abortError;
-
- lock (_sync)
- {
- if (_aborted)
- {
- return;
- }
-
- _aborted = true;
- _abortError = error ?? new OperationCanceledException(SourceSchemaRequestDispatcher_OperationAborted);
- abortError = _abortError;
- pendingRequests = [.. _groups.Values.SelectMany(static t => t.PendingRequests)];
-
- _groups.Clear();
- ClearNodeIdSlots();
- }
-
- foreach (var pendingRequest in pendingRequests)
- {
- pendingRequest.Completion.TrySetException(abortError);
- }
- }
-
- ///
- /// Resets the dispatcher to its initial state, clearing all groups and the aborted flag.
- /// Any pending requests from a prior event are abandoned (they should have been
- /// completed or aborted before calling this).
- ///
- public void Reset()
- {
- lock (_sync)
- {
- _aborted = false;
- _abortError = null;
- _groups.Clear();
- ClearNodeIdSlots();
- }
- }
-
- private void BeginDispatchGroup(ImmutableArray pendingRequests)
- {
- // if pending requests is 0 it mean the the whole group was skipped and we do not need to do anything.
- if (pendingRequests.Length == 0)
- {
- return;
- }
-
- // in all other cases we dispatch the group asynchronously.
- _ = DispatchGroupAsync(pendingRequests);
- }
-
- private async Task DispatchGroupAsync(ImmutableArray pendingRequests)
- {
- try
- {
- if (pendingRequests.Length == 1)
- {
- var pendingRequest = pendingRequests[0];
-
- var client = _clientScope.GetClient(
- pendingRequest.Request.SchemaName,
- pendingRequest.Request.OperationType);
-
- await DispatchSingleAsync(client, pendingRequest).ConfigureAwait(false);
- }
- else
- {
- var client = _clientScope.GetClient(
- pendingRequests[0].Request.SchemaName,
- pendingRequests[0].Request.OperationType);
-
- await DispatchBatchAsync(client, pendingRequests).ConfigureAwait(false);
- }
- }
- catch (Exception ex)
- {
- foreach (var pendingRequest in pendingRequests)
- {
- pendingRequest.Completion.TrySetException(ex);
- }
- }
- }
-
- private async ValueTask DispatchSingleAsync(
- ISourceSchemaClient client,
- PendingRequest pendingRequest)
- {
- try
- {
- var response = await client.ExecuteAsync(
- _context,
- pendingRequest.Request,
- _requestAborted)
- .ConfigureAwait(false);
-
- if (!pendingRequest.Completion.TrySetResult(response))
- {
- response.Dispose();
- }
- }
- catch (OperationCanceledException)
- {
- pendingRequest.Completion.TrySetCanceled();
- }
- catch (Exception ex)
- {
- pendingRequest.Completion.TrySetException(ex);
- }
- }
-
- private async ValueTask DispatchBatchAsync(
- ISourceSchemaClient client,
- ImmutableArray pendingRequests)
- {
- try
- {
- var requests = new SourceSchemaClientRequest[pendingRequests.Length];
-
- for (var i = 0; i < pendingRequests.Length; i++)
- {
- requests[i] = pendingRequests[i].Request;
- }
-
- var responses = await client.ExecuteBatchAsync(
- _context,
- ImmutableCollectionsMarshal.AsImmutableArray(requests),
- _requestAborted)
- .ConfigureAwait(false);
-
- if (responses.Length != pendingRequests.Length)
- {
- throw new InvalidOperationException(
- SourceSchemaRequestDispatcher_BatchResponseCountMismatch);
- }
-
- for (var i = 0; i < pendingRequests.Length; i++)
- {
- var pendingRequest = pendingRequests[i];
- var response = responses[i];
-
- if (!pendingRequest.Completion.TrySetResult(response))
- {
- response.Dispose();
- }
- }
- }
- catch (OperationCanceledException)
- {
- foreach (var pendingRequest in pendingRequests)
- {
- pendingRequest.Completion.TrySetCanceled();
- }
- }
- catch (Exception ex)
- {
- foreach (var pendingRequest in pendingRequests)
- {
- pendingRequest.Completion.TrySetException(ex);
- }
- }
- }
-
- private Exception CreateAbortException()
- => _abortError ?? new OperationCanceledException(SourceSchemaRequestDispatcher_OperationAborted);
-
- private void RemoveGroup(GroupState group)
- {
- _groups.Remove(group.Id);
-
- foreach (var nodeId in group.NodeIds)
- {
- if ((uint)nodeId < (uint)_groupByNodeIdSlots.Length)
- {
- _groupByNodeIdSlots[nodeId] = -1;
- _nodeStateSlots[nodeId] = NodeStateUnregistered;
- }
- }
- }
-
- private void ClearNodeIdSlots()
- {
- if (_trackedNodeIdSlots.Count == 0)
- {
- return;
- }
-
- foreach (var nodeId in _trackedNodeIdSlots)
- {
- if ((uint)nodeId < (uint)_groupByNodeIdSlots.Length)
- {
- _groupByNodeIdSlots[nodeId] = -1;
- _nodeStateSlots[nodeId] = NodeStateUnregistered;
- }
- }
-
- _trackedNodeIdSlots.Clear();
- }
-
- private void EnsureNodeIdSlotCapacity(int minCapacity)
- {
- if (_groupByNodeIdSlots.Length >= minCapacity)
- {
- return;
- }
-
- var newCapacity = _groupByNodeIdSlots.Length == 0 ? 8 : _groupByNodeIdSlots.Length;
-
- while (newCapacity < minCapacity)
- {
- newCapacity *= 2;
- }
-
- var groupByNodeIdSlots = new int[newCapacity];
- var nodeStateSlots = new int[newCapacity];
- Array.Fill(groupByNodeIdSlots, -1);
- Array.Fill(nodeStateSlots, NodeStateUnregistered);
-
- if (_groupByNodeIdSlots.Length > 0)
- {
- Array.Copy(_groupByNodeIdSlots, groupByNodeIdSlots, _groupByNodeIdSlots.Length);
- Array.Copy(_nodeStateSlots, nodeStateSlots, _nodeStateSlots.Length);
- }
-
- _groupByNodeIdSlots = groupByNodeIdSlots;
- _nodeStateSlots = nodeStateSlots;
- }
-
- private sealed class GroupState(int id, int initialCapacity)
- {
- private readonly List _nodeIds = new(initialCapacity);
- private readonly List _pendingRequests = new(initialCapacity);
- private int _remainingNodes;
- private bool _dispatchCreated;
-
- public int Id { get; } = id;
-
- public IEnumerable NodeIds => _nodeIds;
-
- public IEnumerable PendingRequests => _pendingRequests;
-
- public void RegisterNode(int nodeId)
- {
- _nodeIds.Add(nodeId);
- _remainingNodes++;
- }
-
- public bool TrySubmit(
- SourceSchemaClientRequest request,
- int[] nodeStateSlots,
- out PendingRequest? pendingRequest)
- {
- var nodeId = request.Node.Id;
- var nodeState =
- (uint)nodeId < (uint)nodeStateSlots.Length
- ? nodeStateSlots[nodeId]
- : NodeStateUnregistered;
-
- if (nodeState == NodeStateSubmitted)
- {
- throw new InvalidOperationException(
- string.Format(
- SourceSchemaRequestDispatcher_DuplicateNodeSubmission,
- nodeId));
- }
-
- if (nodeState != NodeStatePending)
- {
- pendingRequest = null;
- return false;
- }
-
- nodeStateSlots[nodeId] = NodeStateSubmitted;
- _remainingNodes--;
-
- pendingRequest = new PendingRequest(request);
- _pendingRequests.Add(pendingRequest);
-
- return true;
- }
-
- public void Skip(int nodeId, int[] nodeStateSlots)
- {
- if ((uint)nodeId < (uint)nodeStateSlots.Length
- && nodeStateSlots[nodeId] == NodeStatePending)
- {
- nodeStateSlots[nodeId] = NodeStateSkipped;
- _remainingNodes--;
- }
- }
-
- public bool TryCreateDispatch(out ImmutableArray pendingRequests)
- {
- if (_dispatchCreated || _remainingNodes > 0)
- {
- pendingRequests = [];
- return false;
- }
-
- _dispatchCreated = true;
-
- if (_pendingRequests.Count == 0)
- {
- pendingRequests = [];
- return true;
- }
-
- pendingRequests = [.. _pendingRequests];
- return true;
- }
- }
-
- private sealed class PendingRequest(SourceSchemaClientRequest request)
- {
- public SourceSchemaClientRequest Request { get; } = request;
-
- public TaskCompletionSource Completion { get; } =
- new(TaskCreationOptions.RunContinuationsAsynchronously);
- }
-}
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/ExecutionState.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/ExecutionState.cs
index e1018c02b9f..b930f55616f 100644
--- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/ExecutionState.cs
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/ExecutionState.cs
@@ -18,6 +18,7 @@ internal sealed class ExecutionState(bool collectTelemetry, CancellationTokenSou
private readonly List _trackedNodeStateSlots = [];
private readonly List _trackedDependencySlots = [];
private readonly ConcurrentQueue _completedResults = new();
+ private readonly HashSet _failedOrSkippedNodes = [];
private byte[] _nodeStates = [];
private int[] _remainingDependencies = [];
@@ -27,10 +28,15 @@ internal sealed class ExecutionState(bool collectTelemetry, CancellationTokenSou
public readonly OrderedDictionary Traces = [];
public readonly AsyncAutoResetEvent Signal = new();
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public bool IsNodeSkipped(int nodeId)
+ => _failedOrSkippedNodes.Contains(nodeId);
+
public void FillBacklog(OperationPlan plan)
{
_ready.Clear();
_backlogCount = 0;
+ _failedOrSkippedNodes.Clear();
ResetNodeStates();
ResetRemainingDependencies();
@@ -84,6 +90,7 @@ public void Reset()
_stack.Clear();
_ready.Clear();
_backlogCount = 0;
+ _failedOrSkippedNodes.Clear();
ResetNodeStates();
ResetRemainingDependencies();
@@ -106,7 +113,7 @@ public void StartNode(OperationPlanContext context, ExecutionNode node, Cancella
{
Interlocked.Increment(ref _activeNodes);
- if ((uint)node.Id < (uint)_remainingDependencies.Length)
+ if (node.Id < _remainingDependencies.Length)
{
_remainingDependencies[node.Id] = -1;
}
@@ -133,7 +140,7 @@ public void CancelProcessing()
}
public void CompleteNode(
- OperationPlanContext context,
+ OperationPlan plan,
ExecutionNode node,
ExecutionNodeResult result)
{
@@ -161,30 +168,54 @@ public void CompleteNode(
});
}
+ // When a batch node executes it may skip some of its individual operations
+ // because their specific dependencies failed. We record those operation
+ // definition identifiers here so that downstream nodes that depend on a
+ // particular operation inside the batch can see that it was skipped.
+ if (!result.SkippedDefinitions.IsDefaultOrEmpty)
+ {
+ foreach (var def in result.SkippedDefinitions)
+ {
+ _failedOrSkippedNodes.Add(def.Id);
+ }
+ }
+
if (result.Status is ExecutionStatus.Success or ExecutionStatus.PartialSuccess)
{
+ // a node can explicitly choose which of its dependents should run
+ // by calling EnqueueDependentForExecution during execution.
+ // if it did, any dependent not in that list is skipped.
if (result.DependentsToExecute.Length > 0)
{
var dependentsToExecute = result.DependentsToExecute;
foreach (var dependent in node.Dependents)
{
- if (!ContainsDependent(dependentsToExecute, dependent))
+ var executionDependent = plan.GetExecutionNode(dependent);
+
+ if (!ContainsDependent(dependentsToExecute, executionDependent))
{
- SkipNode(context, dependent);
+ SkipNode(plan, executionDependent);
}
}
}
+ // decrement the remaining dependency count for each dependent.
+ // when a dependent's count reaches 0 all its dependencies are
+ // fulfilled and it is ready to execute.
foreach (var dependent in node.Dependents)
{
- if ((uint)dependent.Id >= (uint)_remainingDependencies.Length)
+ // When the dependent is an operation definition inside a batch,
+ // there is no backlog entry to update. The batch node's own
+ // execution-level dependencies handle its scheduling. We just
+ // need to track the remaining dependency count for execution nodes.
+ var executionNode = plan.GetExecutionNode(dependent);
+ if (executionNode.Id >= _remainingDependencies.Length)
{
continue;
}
- var remainingDependencies = _remainingDependencies[dependent.Id];
-
+ var remainingDependencies = _remainingDependencies[executionNode.Id];
if (remainingDependencies <= 0)
{
continue;
@@ -192,32 +223,44 @@ public void CompleteNode(
if (remainingDependencies == 1)
{
- _remainingDependencies[dependent.Id] = 0;
- _ready.Add(dependent);
+ _remainingDependencies[executionNode.Id] = 0;
+ _ready.Add(executionNode);
}
else if (remainingDependencies > 1)
{
- _remainingDependencies[dependent.Id] = remainingDependencies - 1;
+ _remainingDependencies[executionNode.Id] = remainingDependencies - 1;
}
}
}
if (result.Status is ExecutionStatus.Skipped or ExecutionStatus.Failed)
{
- SkipNode(context, node);
+ SkipNode(plan, node);
}
}
- public void SkipNode(OperationPlanContext context, ExecutionNode node)
+ public void SkipNode(OperationPlan plan, ExecutionNode node)
{
_stack.Clear();
_stack.Push(node);
while (_stack.TryPop(out var current))
{
- context.SourceSchemaDispatcher.SkipNode(current.Id);
+ _failedOrSkippedNodes.Add(current.Id);
+
+ // When a batch node is skipped without executing, every operation
+ // definition inside it is also skipped. We mark each of their
+ // identifiers so that downstream nodes that depend on a specific
+ // operation inside the batch will see it as skipped.
+ if (current is OperationBatchExecutionNode batchNode)
+ {
+ foreach (var op in batchNode.Operations)
+ {
+ _failedOrSkippedNodes.Add(op.Id);
+ }
+ }
- if ((uint)current.Id < (uint)_remainingDependencies.Length)
+ if (current.Id < _remainingDependencies.Length)
{
_remainingDependencies[current.Id] = -1;
}
@@ -239,20 +282,106 @@ public void SkipNode(OperationPlanContext context, ExecutionNode node)
foreach (var dependent in current.Dependents)
{
- if ((uint)dependent.Id >= (uint)_remainingDependencies.Length
- || _remainingDependencies[dependent.Id] < 0)
+ // When the dependent is an operation definition inside a batch,
+ // we mark it as skipped so that the batch node can check each
+ // operation's dependencies during execution and skip the ones
+ // whose dependencies failed.
+ if (dependent is not ExecutionNode)
{
+ _failedOrSkippedNodes.Add(dependent.Id);
continue;
}
- if (IsInBacklog(dependent.Id))
+ var dependentNode = plan.GetExecutionNode(dependent);
+
+ if (dependentNode.Id >= _remainingDependencies.Length
+ || _remainingDependencies[dependentNode.Id] < 0)
+ {
+ continue;
+ }
+
+ if (!IsInBacklog(dependentNode.Id))
+ {
+ continue;
+ }
+
+ // Fast path: no optional dependencies, use existing behavior.
+ if (dependentNode.OptionalDependencies.Length == 0)
{
- _stack.Push(dependent);
+ _stack.Push(dependentNode);
+ continue;
+ }
+
+ // Check if the failed node is an optional dependency of the dependent.
+ if (IsOptionalDependency(dependentNode, current))
+ {
+ // Optional dependency failed: decrement counter but don't cascade skip.
+ var remaining = _remainingDependencies[dependentNode.Id];
+
+ if (remaining == 1)
+ {
+ _remainingDependencies[dependentNode.Id] = 0;
+
+ // All deps resolved. If the node has no required deps and all
+ // optional deps failed, skip it (nothing useful to execute).
+ if (ShouldSkipDueToAllOptionalDepsFailed(dependentNode))
+ {
+ _stack.Push(dependentNode);
+ }
+ else
+ {
+ _ready.Add(dependentNode);
+ }
+ }
+ else if (remaining > 1)
+ {
+ _remainingDependencies[dependentNode.Id] = remaining - 1;
+ }
+ }
+ else
+ {
+ // Required dependency failed: cascade skip (existing behavior).
+ _stack.Push(dependentNode);
}
}
}
}
+ private bool ShouldSkipDueToAllOptionalDepsFailed(ExecutionNode node)
+ {
+ // If the node has any required dependencies, it should not be skipped here.
+ // Required deps that failed would have already cascaded a skip; if we reach
+ // this point the required deps must have succeeded.
+ if (node.Dependencies.Length > 0)
+ {
+ return false;
+ }
+
+ // All dependencies are optional. Check if every one of them failed or was skipped.
+ foreach (var optDep in node.OptionalDependencies)
+ {
+ if (!_failedOrSkippedNodes.Contains(optDep.Id))
+ {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ private static bool IsOptionalDependency(ExecutionNode dependent, ExecutionNode dependency)
+ {
+ foreach (var optDep in dependent.OptionalDependencies)
+ {
+ if (ReferenceEquals(optDep, dependency))
+ {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
public bool EnqueueNextNodes(OperationPlanContext context, CancellationToken cancellationToken)
{
if (_ready.Count == 0)
@@ -266,7 +395,7 @@ public bool EnqueueNextNodes(OperationPlanContext context, CancellationToken can
foreach (var node in _ready)
{
- if ((uint)node.Id < (uint)_remainingDependencies.Length
+ if (node.Id < _remainingDependencies.Length
&& _remainingDependencies[node.Id] == 0)
{
if (node.Id < previousId)
@@ -286,7 +415,7 @@ public bool EnqueueNextNodes(OperationPlanContext context, CancellationToken can
{
var node = _ready[i];
- if ((uint)node.Id < (uint)_remainingDependencies.Length
+ if (node.Id < _remainingDependencies.Length
&& _remainingDependencies[node.Id] == 0)
{
StartNode(context, node, cancellationToken);
@@ -304,7 +433,7 @@ public bool EnqueueNextNodes(OperationPlanContext context, CancellationToken can
{
var node = _ready[i];
- if ((uint)node.Id < (uint)_remainingDependencies.Length
+ if (node.Id < _remainingDependencies.Length
&& _remainingDependencies[node.Id] == 0)
{
_stack.Push(node);
@@ -358,7 +487,7 @@ private void EnsureDependencyCapacity(int minCapacity)
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private bool IsInBacklog(int nodeId)
- => (uint)nodeId < (uint)_nodeStates.Length
+ => nodeId < _nodeStates.Length
&& _nodeStates[nodeId] == NodeStateBacklog;
[MethodImpl(MethodImplOptions.AggressiveInlining)]
@@ -382,7 +511,7 @@ private void AddToBacklog(ExecutionNode node)
{
var nodeId = node.Id;
- if ((uint)nodeId >= (uint)_nodeStates.Length)
+ if (nodeId >= _nodeStates.Length)
{
EnsureNodeStateCapacity(nodeId + 1);
}
@@ -400,7 +529,7 @@ private void AddToBacklog(ExecutionNode node)
_nodeStates[nodeId] = NodeStateBacklog;
_backlogCount++;
- var remainingDependencies = node.Dependencies.Length;
+ var remainingDependencies = node.Dependencies.Length + node.OptionalDependencies.Length;
EnsureDependencyCapacity(nodeId + 1);
_remainingDependencies[nodeId] = remainingDependencies;
_trackedDependencySlots.Add(nodeId);
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Extensions/OperationPlanContextExtensions.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Extensions/OperationPlanContextExtensions.cs
new file mode 100644
index 00000000000..c6db6287df7
--- /dev/null
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Extensions/OperationPlanContextExtensions.cs
@@ -0,0 +1,64 @@
+using System.Buffers;
+using System.Collections.Immutable;
+using HotChocolate.Fusion.Execution.Nodes;
+using HotChocolate.Fusion.Text.Json;
+
+namespace HotChocolate.Fusion.Execution;
+
+internal static class OperationPlanContextExtensions
+{
+ ///
+ /// Builds an error from the given exception and reports it against every
+ /// result path that the failed operation would have populated. When there
+ /// are no variables the error is placed at the root path. Otherwise the
+ /// primary and additional paths from each variable value set are collected
+ /// into a rented buffer so the error can be reported against every
+ /// affected location in a single call.
+ ///
+ public static void AddErrors(
+ this OperationPlanContext context,
+ Exception exception,
+ ImmutableArray variables,
+ ResultSelectionSet resultSelectionSet)
+ {
+ var error = ErrorBuilder.FromException(exception).Build();
+
+ if (variables.Length == 0)
+ {
+ context.AddErrors(error, resultSelectionSet, Path.Root);
+ }
+ else
+ {
+ var pathBufferLength = 0;
+
+ for (var i = 0; i < variables.Length; i++)
+ {
+ pathBufferLength += 1 + variables[i].AdditionalPaths.Length;
+ }
+
+ var pathBuffer = ArrayPool.Shared.Rent(pathBufferLength);
+
+ try
+ {
+ var pathBufferIndex = 0;
+
+ for (var i = 0; i < variables.Length; i++)
+ {
+ pathBuffer[pathBufferIndex++] = variables[i].Path;
+
+ foreach (var additionalPath in variables[i].AdditionalPaths)
+ {
+ pathBuffer[pathBufferIndex++] = additionalPath;
+ }
+ }
+
+ context.AddErrors(error, resultSelectionSet, pathBuffer.AsSpan(0, pathBufferLength));
+ }
+ finally
+ {
+ pathBuffer.AsSpan(0, pathBufferLength).Clear();
+ ArrayPool.Shared.Return(pathBuffer);
+ }
+ }
+ }
+}
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/BatchOperationDefinition.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/BatchOperationDefinition.cs
new file mode 100644
index 00000000000..e4ce5a3538a
--- /dev/null
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/BatchOperationDefinition.cs
@@ -0,0 +1,39 @@
+using HotChocolate.Execution;
+
+namespace HotChocolate.Fusion.Execution.Nodes;
+
+internal sealed class BatchOperationDefinition : OperationDefinition
+{
+ private readonly SelectionPath[] _targets;
+
+ internal BatchOperationDefinition(
+ int id,
+ OperationSourceText operation,
+ string? schemaName,
+ SelectionPath[] targets,
+ SelectionPath source,
+ OperationRequirement[] requirements,
+ string[] forwardedVariables,
+ ResultSelectionSet resultSelectionSet,
+ ExecutionNodeCondition[] conditions,
+ bool requiresFileUpload)
+ : base(
+ id,
+ operation,
+ schemaName,
+ source,
+ requirements,
+ forwardedVariables,
+ resultSelectionSet,
+ conditions,
+ requiresFileUpload)
+ {
+ _targets = targets;
+ }
+
+ ///
+ /// Gets the paths to the selection sets for which this batch operation
+ /// fetches data. Each target corresponds to one of the merged operations.
+ ///
+ public ReadOnlySpan Targets => _targets;
+}
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/ExecutionNode.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/ExecutionNode.cs
index 8fd0b8d9af6..7aa864145d8 100644
--- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/ExecutionNode.cs
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/ExecutionNode.cs
@@ -5,13 +5,15 @@
namespace HotChocolate.Fusion.Execution.Nodes;
-public abstract class ExecutionNode : IEquatable
+public abstract class ExecutionNode : IOperationPlanNode, IEquatable
{
private bool _isSealed;
- private ExecutionNode[] _dependents = [];
- private ExecutionNode[] _dependencies = [];
+ private IOperationPlanNode[] _dependents = [];
+ private IOperationPlanNode[] _dependencies = [];
+ private IOperationPlanNode[] _optionalDependencies = [];
private int _dependentCount;
private int _dependencyCount;
+ private int _optionalDependencyCount;
///
/// The unique id of this execution node.
@@ -39,12 +41,18 @@ public abstract class ExecutionNode : IEquatable
/// Gets the execution nodes that depend on this node to be completed
/// before they can be executed.
///
- public ReadOnlySpan Dependents => _dependents;
+ public ReadOnlySpan Dependents => _dependents;
///
- /// Gets the execution nodes that this operation depends on.
+ /// Gets the execution nodes that this operation depends on (required).
///
- public ReadOnlySpan Dependencies => _dependencies;
+ public ReadOnlySpan Dependencies => _dependencies;
+
+ ///
+ /// Gets the execution nodes that this operation optionally depends on.
+ /// When an optional dependency is skipped or failed this node still gets executed.
+ ///
+ public ReadOnlySpan OptionalDependencies => _optionalDependencies;
public async Task ExecuteAsync(
OperationPlanContext context,
@@ -85,6 +93,7 @@ public async Task ExecuteAsync(
Stopwatch.GetElapsedTime(start),
error,
context.GetDependentsToExecute(this),
+ context.GetSkippedDefinitions(this),
context.GetVariableValueSets(this),
context.GetTransportDetails(this));
@@ -108,7 +117,7 @@ protected void EnqueueDependentForExecution(OperationPlanContext context, Execut
context.EnqueueForExecution(this, dependent);
}
- internal void AddDependency(ExecutionNode node)
+ internal void AddDependency(IOperationPlanNode node)
{
ExpectMutable();
@@ -121,7 +130,7 @@ internal void AddDependency(ExecutionNode node)
if (_dependencies.Length == 0)
{
- _dependencies = new ExecutionNode[4];
+ _dependencies = new IOperationPlanNode[4];
}
if (_dependencyCount == _dependencies.Length)
@@ -132,7 +141,7 @@ internal void AddDependency(ExecutionNode node)
_dependencies[_dependencyCount++] = node;
}
- internal void AddDependent(ExecutionNode node)
+ internal void AddDependent(IOperationPlanNode node)
{
ExpectMutable();
@@ -145,7 +154,7 @@ internal void AddDependent(ExecutionNode node)
if (_dependents.Length == 0)
{
- _dependents = new ExecutionNode[4];
+ _dependents = new IOperationPlanNode[4];
}
if (_dependentCount == _dependents.Length)
@@ -156,6 +165,30 @@ internal void AddDependent(ExecutionNode node)
_dependents[_dependentCount++] = node;
}
+ internal void AddOptionalDependency(IOperationPlanNode node)
+ {
+ ExpectMutable();
+
+ ArgumentNullException.ThrowIfNull(node);
+
+ if (node.Equals(this))
+ {
+ throw new InvalidOperationException("An operation cannot depend on itself.");
+ }
+
+ if (_optionalDependencies.Length == 0)
+ {
+ _optionalDependencies = new IOperationPlanNode[4];
+ }
+
+ if (_optionalDependencyCount == _optionalDependencies.Length)
+ {
+ Array.Resize(ref _optionalDependencies, _optionalDependencyCount * 2);
+ }
+
+ _optionalDependencies[_optionalDependencyCount++] = node;
+ }
+
protected internal void Seal()
{
ExpectMutable();
@@ -170,6 +203,15 @@ protected internal void Seal()
Array.Resize(ref _dependents, _dependentCount);
}
+ if (_optionalDependencies.Length > _optionalDependencyCount)
+ {
+ Array.Resize(ref _optionalDependencies, _optionalDependencyCount);
+ }
+
+ Array.Sort(_dependencies, static (a, b) => a.Id.CompareTo(b.Id));
+ Array.Sort(_dependents, static (a, b) => a.Id.CompareTo(b.Id));
+ Array.Sort(_optionalDependencies, static (a, b) => a.Id.CompareTo(b.Id));
+
OnSealingNode();
_isSealed = true;
@@ -203,8 +245,7 @@ public override bool Equals(object? obj)
public override int GetHashCode()
=> Id;
- [MethodImpl(MethodImplOptions.AggressiveInlining)]
- private bool IsSkipped(OperationPlanContext context)
+ protected virtual bool IsSkipped(OperationPlanContext context)
{
if (Conditions.IsEmpty)
{
@@ -218,8 +259,7 @@ private bool IsSkipped(OperationPlanContext context)
{
if (!context.Variables.TryGetValue(condition.VariableName, out var booleanValueNode))
{
- throw new InvalidOperationException(
- $"Expected to have a boolean value for variable '${condition.VariableName}'");
+ throw ThrowHelper.MissingBooleanVariable(condition.VariableName);
}
if (booleanValueNode.Value != condition.PassingValue)
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/ExecutionNodeResult.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/ExecutionNodeResult.cs
index a6c2d894ed1..e071beaf866 100644
--- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/ExecutionNodeResult.cs
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/ExecutionNodeResult.cs
@@ -10,5 +10,6 @@ internal sealed record ExecutionNodeResult(
TimeSpan Duration,
Exception? Exception,
ImmutableArray DependentsToExecute,
+ ImmutableArray SkippedDefinitions,
ImmutableArray VariableValueSets,
(Uri? Uri, string? ContentType) TransportDetails = default);
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/IOperationPlanNode.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/IOperationPlanNode.cs
new file mode 100644
index 00000000000..c7917ef5df5
--- /dev/null
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/IOperationPlanNode.cs
@@ -0,0 +1,32 @@
+namespace HotChocolate.Fusion.Execution.Nodes;
+
+///
+/// Represents a node in the dependency graph.
+///
+public interface IOperationPlanNode
+{
+ ///
+ /// Gets the unique identifier of this node within the operation plan.
+ ///
+ int Id { get; }
+
+ ///
+ /// Gets the nodes that depend on this node. These nodes cannot
+ /// start executing until this node has completed.
+ ///
+ ReadOnlySpan Dependents { get; }
+
+ ///
+ /// Gets the nodes that this node requires to have completed
+ /// before it can start executing. If any required dependency
+ /// is skipped or fails, this node will be skipped as well.
+ ///
+ ReadOnlySpan Dependencies { get; }
+
+ ///
+ /// Gets the nodes that this node optionally depends on.
+ /// This node will still execute even if an optional dependency
+ /// is skipped or fails.
+ ///
+ ReadOnlySpan OptionalDependencies { get; }
+}
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationBatchExecutionNode.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationBatchExecutionNode.cs
index 2127bb4c254..75c49f53576 100644
--- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationBatchExecutionNode.cs
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationBatchExecutionNode.cs
@@ -1,323 +1,341 @@
-using System.Buffers;
using System.Collections.Immutable;
-using System.Runtime.InteropServices;
-using HotChocolate.Execution;
using HotChocolate.Fusion.Execution.Clients;
-using HotChocolate.Fusion.Text.Json;
+using HotChocolate.Language;
namespace HotChocolate.Fusion.Execution.Nodes;
public sealed class OperationBatchExecutionNode : ExecutionNode
{
- private readonly OperationRequirement[] _requirements;
- private readonly string[] _forwardedVariables;
- private readonly ResultSelectionSet _resultSelectionSet;
- private readonly ExecutionNodeCondition[] _conditions;
- private readonly bool _requiresFileUpload;
- private readonly OperationSourceText _operation;
- private readonly int? _batchingGroupId;
- private readonly string? _schemaName;
- private readonly SelectionPath[] _targets;
- private readonly SelectionPath _source;
+ private readonly OperationDefinition[] _operations;
internal OperationBatchExecutionNode(
int id,
- OperationSourceText operation,
- string? schemaName,
- SelectionPath[] targets,
- SelectionPath source,
- OperationRequirement[] requirements,
- string[] forwardedVariables,
- ResultSelectionSet resultSelectionSet,
- ExecutionNodeCondition[] conditions,
- int? batchingGroupId,
- bool requiresFileUpload)
+ OperationDefinition[] operations)
{
Id = id;
- _operation = operation;
- _batchingGroupId = batchingGroupId;
- _schemaName = schemaName;
- _targets = targets;
- _source = source;
- _requirements = requirements;
- _forwardedVariables = forwardedVariables;
- _resultSelectionSet = resultSelectionSet;
- _conditions = conditions;
- _requiresFileUpload = requiresFileUpload;
+ _operations = operations;
+ SchemaName = operations[0].SchemaName!;
}
- ///
public override int Id { get; }
- ///
public override ExecutionNodeType Type => ExecutionNodeType.OperationBatch;
- ///
- public override ReadOnlySpan Conditions => _conditions;
-
- ///
- /// Gets the operation definition that this execution node represents.
- ///
- public OperationSourceText Operation => _operation;
-
- ///
- /// Gets the deterministic batching group identifier assigned at planning time.
- ///
- public int? BatchingGroupId => _batchingGroupId;
-
- ///
- /// Gets the result selection set fulfilled by this operation.
- ///
- internal ResultSelectionSet ResultSelectionSet => _resultSelectionSet;
-
- ///
- public override string? SchemaName => _schemaName;
-
- ///
- /// Gets the paths to the selection sets for which this operation fetches data.
- ///
- public ReadOnlySpan Targets => _targets;
-
- ///
- /// Gets the path to the local selection set (the selection set within the source schema request)
- /// to extract the data from.
- ///
- public SelectionPath Source => _source;
-
- ///
- /// Gets the data requirements that are needed to execute this operation.
- ///
- public ReadOnlySpan Requirements => _requirements;
-
- ///
- /// Gets the variables that are needed to execute this operation.
- ///
- public ReadOnlySpan ForwardedVariables => _forwardedVariables;
-
- ///
- /// Gets whether this operation contains one or more variables
- /// that contain the Upload scalar.
- ///
- public bool RequiresFileUpload => _requiresFileUpload;
+ public override ReadOnlySpan Conditions => [];
+
+ public override string SchemaName { get; }
+
+ internal ReadOnlySpan Operations => _operations;
+
+ protected override IDisposable? CreateScope(OperationPlanContext context)
+ => context.DiagnosticEvents.ExecuteOperationBatchNode(context, this, SchemaName);
protected override async ValueTask OnExecuteAsync(
OperationPlanContext context,
CancellationToken cancellationToken = default)
{
var diagnosticEvents = context.DiagnosticEvents;
- var variables = context.CreateVariableValueSets(_targets, _forwardedVariables, _requirements);
+ var schemaName = SchemaName;
- if (variables.Length == 0 && (_requirements.Length > 0 || _forwardedVariables.Length > 0))
+ // Build the list of requests that will be sent as a single batch to the
+ // downstream source schema. Each operation definition becomes one request
+ // in the batch, and we track which operation sits at which index so we can
+ // match results back to operations when the responses stream in.
+ var requestBuilder = ImmutableArray.CreateBuilder(_operations.Length);
+ var operationByIndex = new List(_operations.Length);
+ var variablesByIndex = new List>(_operations.Length);
+
+ if (_operations.Length == 1 && _operations[0] is SingleOperationDefinition)
{
- return ExecutionStatus.Skipped;
+ // When the batch holds a single non-merged operation, the planner
+ // promotes all of its dependencies onto the batch node as required.
+ // So if we reach this point, every dependency has already succeeded.
+ // We can skip the per-operation condition and dependency checks
+ // entirely, which avoids unnecessary work for the common case.
+ // Note: BatchOperationDefinition (merged multi-target ops) uses the
+ // slow path because its deps are optional: some targets' deps may
+ // be skipped while others succeed.
+ var operation = _operations[0];
+
+ var variables = operation switch
+ {
+ SingleOperationDefinition single
+ => context.CreateVariableValueSets(
+ single.Target,
+ single.ForwardedVariables,
+ single.Requirements),
+ BatchOperationDefinition batch
+ => context.CreateVariableValueSets(
+ batch.Targets,
+ batch.ForwardedVariables,
+ batch.Requirements),
+ _ => throw new InvalidOperationException(
+ $"Unknown operation definition type: {operation.GetType().Name}")
+ };
+
+ if (variables.Length == 0 && (operation.Requirements.Length > 0 || operation.ForwardedVariables.Length > 0))
+ {
+ return ExecutionStatus.Skipped;
+ }
+
+ context.TrackVariableValueSets(this, variables);
+
+ requestBuilder.Add(new SourceSchemaClientRequest
+ {
+ Node = this,
+ SchemaName = schemaName,
+ OperationType = operation.Operation.Type,
+ OperationSourceText = operation.Operation.SourceText,
+ Variables = variables,
+ RequiresFileUpload = operation.RequiresFileUpload
+ });
+
+ operationByIndex.Add(operation);
+ variablesByIndex.Add(variables);
}
+ else
+ {
+ foreach (var operation in _operations)
+ {
+ if (IsSkipped(context, operation))
+ {
+ context.TrackSkippedDefinition(this, operation);
+ continue;
+ }
+
+ // If any of this operation's dependencies were skipped or failed,
+ // we skip this operation within the batch. The remaining operations
+ // whose dependencies succeeded can still proceed normally.
+ if (HasSkippedDependencies(context, operation))
+ {
+ context.TrackSkippedDefinition(this, operation);
+ continue;
+ }
+
+ var variables = operation switch
+ {
+ SingleOperationDefinition single
+ => context.CreateVariableValueSets(
+ single.Target,
+ single.ForwardedVariables,
+ single.Requirements),
+ BatchOperationDefinition batch
+ => context.CreateVariableValueSets(
+ batch.Targets,
+ batch.ForwardedVariables,
+ batch.Requirements),
+ _ => throw new InvalidOperationException(
+ $"Unknown operation definition type: {operation.GetType().Name}")
+ };
+
+ // The operation expects input (requirements or forwarded variables), but
+ // the result store produced no matching variable values. Without input
+ // there is nothing meaningful to fetch, so we skip this operation.
+ if (variables.Length == 0
+ && (operation.Requirements.Length > 0
+ || operation.ForwardedVariables.Length > 0))
+ {
+ context.TrackSkippedDefinition(this, operation);
+ continue;
+ }
- var schemaName = _schemaName ?? context.GetDynamicSchemaName(this);
+ context.TrackVariableValueSets(this, variables);
- context.TrackVariableValueSets(this, variables);
+ requestBuilder.Add(new SourceSchemaClientRequest
+ {
+ Node = this,
+ SchemaName = schemaName,
+ OperationType = operation.Operation.Type,
+ OperationSourceText = operation.Operation.SourceText,
+ Variables = variables,
+ RequiresFileUpload = operation.RequiresFileUpload
+ });
+
+ operationByIndex.Add(operation);
+ variablesByIndex.Add(variables);
+ }
+ }
- var request = new SourceSchemaClientRequest
+ // Every operation in the batch was either skipped or had no variable
+ // values to resolve. There is nothing to send to the downstream service.
+ if (requestBuilder.Count == 0)
{
- Node = this,
- SchemaName = schemaName,
- BatchingGroupId = _batchingGroupId,
- OperationType = _operation.Type,
- OperationSourceText = _operation.SourceText,
- Variables = variables,
- RequiresFileUpload = _requiresFileUpload
- };
-
- var index = 0;
- var bufferLength = 0;
- SourceSchemaResult[]? buffer = null;
- SourceSchemaResult? singleResult = null;
- var hasSomeErrors = false;
+ return ExecutionStatus.Skipped;
+ }
- try
- {
- // we execute the GraphQL request against a source schema
- var response = await context.SourceSchemaScheduler
- .ExecuteAsync(request, cancellationToken)
- .ConfigureAwait(false);
- context.TrackSourceSchemaClientResponse(this, response);
+ var requests = requestBuilder.ToImmutable();
- // we read the responses from the response stream.
- var totalPathCount = variables.Length;
+ // Obtain a transport client for the source schema and stream the batch
+ // response. As each individual result arrives, we merge it into the
+ // result store so downstream nodes can consume the data.
+ var client = context.GetClient(schemaName, requests[0].OperationType);
+ var receivedResults = new bool[requests.Length];
+ var overallStatus = ExecutionStatus.Success;
- for (var i = 0; i < variables.Length; i++)
+ try
+ {
+ await foreach (var batchResult in client.ExecuteBatchStreamAsync(context, requests, cancellationToken))
{
- totalPathCount += variables[i].AdditionalPaths.Length;
- }
+ var requestIndex = batchResult.RequestIndex;
+ var op = operationByIndex[requestIndex];
+ var result = batchResult.Result;
+ var hasErrors = result.Errors is not null;
- var initialBufferLength = Math.Max(totalPathCount, 2);
+ receivedResults[requestIndex] = true;
- await foreach (var result in response.ReadAsResultStreamAsync(cancellationToken))
- {
- // Store the first result without renting a buffer,
- // since it might be the only one (e.g. a request-level error).
- if (index == 0)
+ try
{
- singleResult = result;
- index = 1;
+ context.AddPartialResult(
+ op.Source,
+ result,
+ op.ResultSelectionSet,
+ hasErrors);
}
- else
+ catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested)
{
- // Once we see a second result, we know there are multiple,
- // so we rent a buffer and move the first result into it.
- if (buffer is null)
- {
- bufferLength = initialBufferLength;
- buffer = ArrayPool.Shared.Rent(bufferLength);
- buffer[0] = singleResult!;
- }
-
- buffer[index++] = result;
+ return ExecutionStatus.Failed;
+ }
+ catch (Exception exception)
+ {
+ diagnosticEvents.SourceSchemaStoreError(context, this, schemaName, exception);
+ context.AddErrors(exception, variablesByIndex[requestIndex], op.ResultSelectionSet);
+ overallStatus = ExecutionStatus.Failed;
+ continue;
}
- // Parsing errors here allows the result store to reuse the cached value
- // and avoids a second document lookup per result.
- if (result.Errors is not null)
+ if (hasErrors && overallStatus == ExecutionStatus.Success)
{
- hasSomeErrors = true;
+ overallStatus = ExecutionStatus.PartialSuccess;
}
}
}
catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested)
{
- // If the execution of the node was cancelled, either the entire request was cancelled
- // or the execution was halted. In both cases we do not want to produce any errors
- // and just exit the node as quickly as possible.
return ExecutionStatus.Failed;
}
catch (Exception exception)
{
diagnosticEvents.SourceSchemaTransportError(context, this, schemaName, exception);
- // if there is an error, we need to make sure that the pooled buffers for the JsonDocuments
- // are returned to the pool.
- if (buffer is not null && bufferLength > 0)
- {
- foreach (var result in buffer.AsSpan(0, index))
- {
- // ReSharper disable once ConditionalAccessQualifierIsNonNullableAccordingToAPIContract
- result?.Dispose();
- }
-
- buffer.AsSpan(0, index).Clear();
- ArrayPool.Shared.Return(buffer);
- }
- else if (singleResult is not null)
+ // The transport itself failed, so every operation in the batch is affected.
+ // We attach the error to each operation's result selection set.
+ for (var i = 0; i < operationByIndex.Count; i++)
{
- singleResult.Dispose();
+ context.AddErrors(exception, variablesByIndex[i], operationByIndex[i].ResultSelectionSet);
}
- AddErrors(context, exception, variables, _resultSelectionSet);
return ExecutionStatus.Failed;
}
- try
+ // Verify that the downstream service returned a result for every
+ // operation in the batch. A missing result means the service did
+ // not implement the batch protocol correctly. We surface this as
+ // an error so the issue is easy to diagnose.
+ var missingCount = 0;
+
+ for (var i = 0; i < receivedResults.Length; i++)
{
- if (buffer is not null)
- {
- context.AddPartialResults(
- _source,
- buffer.AsSpan(0, index),
- _resultSelectionSet,
- hasSomeErrors);
- }
- else if (singleResult is not null)
+ if (!receivedResults[i])
{
- var firstResult = singleResult;
- context.AddPartialResults(
- _source,
- MemoryMarshal.CreateReadOnlySpan(ref firstResult, 1),
- _resultSelectionSet,
- hasSomeErrors);
- }
- else
- {
- context.AddPartialResults(
- _source,
- [],
- _resultSelectionSet,
- hasSomeErrors);
+ missingCount++;
+ var operation = operationByIndex[i];
+ context.AddErrors(
+ ThrowHelper.MissingBatchResult(operation.Id),
+ variablesByIndex[i],
+ operation.ResultSelectionSet);
}
}
- catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested)
+
+ if (missingCount > 0)
{
- // If the execution of the node was cancelled, either the entire request was cancelled
- // or the execution was halted. In both cases we do not want to produce any errors
- // and just exit the node as quickly as possible.
- return ExecutionStatus.Failed;
+ overallStatus = missingCount == receivedResults.Length
+ ? ExecutionStatus.Failed
+ : ExecutionStatus.PartialSuccess;
}
- catch (Exception exception)
+
+ return overallStatus;
+ }
+
+ protected override bool IsSkipped(OperationPlanContext context)
+ {
+ if (_operations.Length == 1)
{
- diagnosticEvents.SourceSchemaStoreError(context, this, schemaName, exception);
- AddErrors(context, exception, variables, _resultSelectionSet);
- return ExecutionStatus.Failed;
+ return IsSkipped(context, _operations[0]);
+ }
+
+ if (_operations.Length == 2)
+ {
+ return IsSkipped(context, _operations[0])
+ && IsSkipped(context, _operations[1]);
+ }
+
+ if (_operations.Length == 3)
+ {
+ return IsSkipped(context, _operations[0])
+ && IsSkipped(context, _operations[1])
+ && IsSkipped(context, _operations[2]);
+ }
+
+ if (_operations.Length == 4)
+ {
+ return IsSkipped(context, _operations[0])
+ && IsSkipped(context, _operations[1])
+ && IsSkipped(context, _operations[2])
+ && IsSkipped(context, _operations[3]);
}
- finally
+
+ foreach (var operation in _operations)
{
- if (buffer is not null)
+ if (!IsSkipped(context, operation))
{
- buffer.AsSpan(0, index).Clear();
- ArrayPool.Shared.Return(buffer);
+ return false;
}
}
- return hasSomeErrors ? ExecutionStatus.PartialSuccess : ExecutionStatus.Success;
+ return true;
}
- protected override IDisposable CreateScope(OperationPlanContext context)
+ protected override void OnSealingNode()
{
- var schemaName = _schemaName ?? context.GetDynamicSchemaName(this);
- return context.DiagnosticEvents.ExecuteOperationBatchNode(context, this, schemaName);
+ foreach (var operation in _operations)
+ {
+ operation.Seal();
+ }
}
- private static void AddErrors(
- OperationPlanContext context,
- Exception exception,
- ImmutableArray variables,
- ResultSelectionSet resultSelectionSet)
+ private static bool IsSkipped(OperationPlanContext context, OperationDefinition operation)
{
- var error = ErrorBuilder.FromException(exception).Build();
-
- if (variables.Length == 0)
+ if (operation.Conditions.Length == 0)
{
- context.AddErrors(error, resultSelectionSet, Path.Root);
+ return false;
}
- else
- {
- var pathBufferLength = 0;
- for (var i = 0; i < variables.Length; i++)
+ foreach (var condition in operation.Conditions)
+ {
+ if (!context.Variables.TryGetValue(condition.VariableName, out var booleanValueNode))
{
- pathBufferLength += 1 + variables[i].AdditionalPaths.Length;
+ throw ThrowHelper.MissingBooleanVariable(condition.VariableName);
}
- var pathBuffer = ArrayPool.Shared.Rent(pathBufferLength);
-
- try
+ if (booleanValueNode.Value != condition.PassingValue)
{
- var pathBufferIndex = 0;
-
- for (var i = 0; i < variables.Length; i++)
- {
- pathBuffer[pathBufferIndex++] = variables[i].Path;
+ return true;
+ }
+ }
- foreach (var additionalPath in variables[i].AdditionalPaths)
- {
- pathBuffer[pathBufferIndex++] = additionalPath;
- }
- }
+ return false;
+ }
- context.AddErrors(error, resultSelectionSet, pathBuffer.AsSpan(0, pathBufferLength));
- }
- finally
+ private static bool HasSkippedDependencies(OperationPlanContext context, OperationDefinition operation)
+ {
+ foreach (var dep in operation.Dependencies)
+ {
+ if (context.IsNodeSkipped(dep.Id))
{
- pathBuffer.AsSpan(0, pathBufferLength).Clear();
- ArrayPool.Shared.Return(pathBuffer);
+ return true;
}
}
+
+ return false;
}
}
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationDefinition.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationDefinition.cs
new file mode 100644
index 00000000000..983a84afe02
--- /dev/null
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationDefinition.cs
@@ -0,0 +1,153 @@
+using HotChocolate.Execution;
+
+namespace HotChocolate.Fusion.Execution.Nodes;
+
+internal abstract class OperationDefinition : IOperationPlanNode
+{
+ private readonly OperationRequirement[] _requirements;
+ private readonly string[] _forwardedVariables;
+ private readonly ExecutionNodeCondition[] _conditions;
+ private IOperationPlanNode[] _dependents = [];
+ private IOperationPlanNode[] _dependencies = [];
+ private int _dependentCount;
+ private int _dependencyCount;
+
+ protected OperationDefinition(
+ int id,
+ OperationSourceText operation,
+ string? schemaName,
+ SelectionPath source,
+ OperationRequirement[] requirements,
+ string[] forwardedVariables,
+ ResultSelectionSet resultSelectionSet,
+ ExecutionNodeCondition[] conditions,
+ bool requiresFileUpload)
+ {
+ Id = id;
+ Operation = operation;
+ SchemaName = schemaName;
+ Source = source;
+ _requirements = requirements;
+ _forwardedVariables = forwardedVariables;
+ ResultSelectionSet = resultSelectionSet;
+ _conditions = conditions;
+ RequiresFileUpload = requiresFileUpload;
+ }
+
+ ///
+ /// Gets the unique identifier of this operation definition within the plan.
+ ///
+ public int Id { get; }
+
+ ///
+ /// Gets the source text and metadata for the GraphQL operation that this
+ /// definition represents.
+ ///
+ public OperationSourceText Operation { get; }
+
+ ///
+ /// Gets the name of the source schema that this operation targets,
+ /// or null when the schema is determined dynamically at runtime.
+ ///
+ public string? SchemaName { get; }
+
+ ///
+ /// Gets the path to the local selection set (the selection set within the
+ /// source schema request) to extract the data from.
+ ///
+ public SelectionPath Source { get; }
+
+ ///
+ /// Gets the data requirements that must be satisfied before this operation
+ /// can be executed.
+ ///
+ public ReadOnlySpan Requirements => _requirements;
+
+ ///
+ /// Gets the names of the variables that are forwarded from the original
+ /// client request into this operation.
+ ///
+ public ReadOnlySpan ForwardedVariables => _forwardedVariables;
+
+ ///
+ /// Gets the result selection set that describes which fields this operation
+ /// populates in the composite result.
+ ///
+ public ResultSelectionSet ResultSelectionSet { get; }
+
+ ///
+ /// Gets the conditions that control whether this operation is skipped.
+ ///
+ public ReadOnlySpan Conditions => _conditions;
+
+ ///
+ /// Gets whether this operation contains one or more variables that reference
+ /// the Upload scalar.
+ ///
+ public bool RequiresFileUpload { get; }
+
+ ///
+ /// Gets the nodes that cannot start until this operation definition has
+ /// completed.
+ ///
+ public ReadOnlySpan Dependents => _dependents.AsSpan(0, _dependentCount);
+
+ ///
+ /// Gets the nodes that must complete before this operation definition can
+ /// start. These point to the original plan nodes (other operation definitions
+ /// or standalone execution nodes), never to batch wrapper nodes.
+ ///
+ public ReadOnlySpan Dependencies => _dependencies.AsSpan(0, _dependencyCount);
+
+ ///
+ /// Operation definitions never have optional dependencies, so this always
+ /// returns an empty span.
+ ///
+ public ReadOnlySpan OptionalDependencies => [];
+
+ internal void AddDependency(IOperationPlanNode node)
+ {
+ if (_dependencies.Length == 0)
+ {
+ _dependencies = new IOperationPlanNode[4];
+ }
+
+ if (_dependencyCount == _dependencies.Length)
+ {
+ Array.Resize(ref _dependencies, _dependencyCount * 2);
+ }
+
+ _dependencies[_dependencyCount++] = node;
+ }
+
+ internal void AddDependent(IOperationPlanNode node)
+ {
+ if (_dependents.Length == 0)
+ {
+ _dependents = new IOperationPlanNode[4];
+ }
+
+ if (_dependentCount == _dependents.Length)
+ {
+ Array.Resize(ref _dependents, _dependentCount * 2);
+ }
+
+ _dependents[_dependentCount++] = node;
+ }
+
+ internal void Seal()
+ {
+ if (_dependencies.Length > _dependencyCount)
+ {
+ Array.Resize(ref _dependencies, _dependencyCount);
+ }
+
+ if (_dependents.Length > _dependentCount)
+ {
+ Array.Resize(ref _dependents, _dependentCount);
+ }
+
+ Array.Sort(_dependencies, static (a, b) => a.Id.CompareTo(b.Id));
+ Array.Sort(_dependents, static (a, b) => a.Id.CompareTo(b.Id));
+ }
+}
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationExecutionNode.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationExecutionNode.cs
index 29e67ccf6da..07c6b2151a0 100644
--- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationExecutionNode.cs
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationExecutionNode.cs
@@ -6,7 +6,6 @@
using HotChocolate.Execution;
using HotChocolate.Fusion.Diagnostics;
using HotChocolate.Fusion.Execution.Clients;
-using HotChocolate.Fusion.Text.Json;
namespace HotChocolate.Fusion.Execution.Nodes;
@@ -18,7 +17,6 @@ public sealed class OperationExecutionNode : ExecutionNode
private readonly ExecutionNodeCondition[] _conditions;
private readonly bool _requiresFileUpload;
private readonly OperationSourceText _operation;
- private readonly int? _batchingGroupId;
private readonly string? _schemaName;
private readonly SelectionPath _target;
private readonly SelectionPath _source;
@@ -33,12 +31,10 @@ internal OperationExecutionNode(
string[] forwardedVariables,
ResultSelectionSet resultSelectionSet,
ExecutionNodeCondition[] conditions,
- int? batchingGroupId,
bool requiresFileUpload)
{
Id = id;
_operation = operation;
- _batchingGroupId = batchingGroupId;
_schemaName = schemaName;
_target = target;
_source = source;
@@ -63,11 +59,6 @@ internal OperationExecutionNode(
///
public OperationSourceText Operation => _operation;
- ///
- /// Gets the deterministic batching group identifier assigned at planning time.
- ///
- public int? BatchingGroupId => _batchingGroupId;
-
///
/// Gets the result selection set fulfilled by this operation.
///
@@ -119,14 +110,12 @@ protected override async ValueTask OnExecuteAsync(
}
var schemaName = _schemaName ?? context.GetDynamicSchemaName(this);
-
context.TrackVariableValueSets(this, variables);
var request = new SourceSchemaClientRequest
{
Node = this,
SchemaName = schemaName,
- BatchingGroupId = _batchingGroupId,
OperationType = _operation.Type,
OperationSourceText = _operation.SourceText,
Variables = variables,
@@ -142,9 +131,8 @@ protected override async ValueTask OnExecuteAsync(
try
{
// we execute the GraphQL request against a source schema
- var response = await context.SourceSchemaScheduler
- .ExecuteAsync(request, cancellationToken)
- .ConfigureAwait(false);
+ var client = context.GetClient(schemaName, _operation.Type);
+ var response = await client.ExecuteAsync(context, request, cancellationToken).ConfigureAwait(false);
context.TrackSourceSchemaClientResponse(this, response);
// we read the responses from the response stream.
@@ -157,7 +145,7 @@ protected override async ValueTask OnExecuteAsync(
var initialBufferLength = Math.Max(totalPathCount, 2);
- await foreach (var result in response.ReadAsResultStreamAsync(cancellationToken))
+ await foreach (var result in response.ReadAsResultStreamAsync(cancellationToken).ConfigureAwait(false))
{
// If there is only one response, we skip the buffer rental.
if (index == 0)
@@ -215,7 +203,7 @@ protected override async ValueTask OnExecuteAsync(
singleResult.Dispose();
}
- AddErrors(context, exception, variables, _resultSelectionSet);
+ context.AddErrors(exception, variables, _resultSelectionSet);
return ExecutionStatus.Failed;
}
@@ -257,7 +245,7 @@ protected override async ValueTask OnExecuteAsync(
catch (Exception exception)
{
diagnosticEvents.SourceSchemaStoreError(context, this, schemaName, exception);
- AddErrors(context, exception, variables, _resultSelectionSet);
+ context.AddErrors(exception, variables, _resultSelectionSet);
return ExecutionStatus.Failed;
}
finally
@@ -317,59 +305,12 @@ internal async Task SubscribeAsync(
}
catch (Exception ex)
{
- AddErrors(context, ex, variables, _resultSelectionSet);
+ context.AddErrors(ex, variables, _resultSelectionSet);
context.DiagnosticEvents.SourceSchemaTransportError(context, this, schemaName, ex);
return SubscriptionResult.Failed(subscriptionId, ex);
}
}
- private static void AddErrors(
- OperationPlanContext context,
- Exception exception,
- ImmutableArray variables,
- ResultSelectionSet resultSelectionSet)
- {
- var error = ErrorBuilder.FromException(exception).Build();
-
- if (variables.Length == 0)
- {
- context.AddErrors(error, resultSelectionSet, Path.Root);
- }
- else
- {
- var pathBufferLength = 0;
-
- for (var i = 0; i < variables.Length; i++)
- {
- pathBufferLength += 1 + variables[i].AdditionalPaths.Length;
- }
-
- var pathBuffer = ArrayPool.Shared.Rent(pathBufferLength);
-
- try
- {
- var pathBufferIndex = 0;
-
- for (var i = 0; i < variables.Length; i++)
- {
- pathBuffer[pathBufferIndex++] = variables[i].Path;
-
- foreach (var additionalPath in variables[i].AdditionalPaths)
- {
- pathBuffer[pathBufferIndex++] = additionalPath;
- }
- }
-
- context.AddErrors(error, resultSelectionSet, pathBuffer.AsSpan(0, pathBufferLength));
- }
- finally
- {
- pathBuffer.AsSpan(0, pathBufferLength).Clear();
- ArrayPool.Shared.Return(pathBuffer);
- }
- }
- }
-
private sealed class SubscriptionEnumerable : IAsyncEnumerable
{
private readonly OperationPlanContext _context;
@@ -492,7 +433,7 @@ public async ValueTask MoveNextAsync()
if (hasResult)
{
_resultBuffer[0] = _resultEnumerator.Current;
- _context.AddPartialResults(_node._source, _resultBuffer, _node._resultSelectionSet);
+ _context.AddPartialResults(_node._source, _resultBuffer, _node._resultSelectionSet, containsErrors: true);
Current = new EventMessageResult(
_node.Id,
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationPlan.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationPlan.cs
index cabbffe0071..04360c2d5fe 100644
--- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationPlan.cs
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationPlan.cs
@@ -14,7 +14,6 @@ public sealed record OperationPlan
{
private static readonly JsonOperationPlanFormatter s_formatter = new();
private readonly ExecutionNode?[] _nodesById = [];
- private readonly ImmutableArray _batchingGroups;
private OperationPlan(
string id,
@@ -31,7 +30,6 @@ private OperationPlan(
SearchSpace = searchSpace;
ExpandedNodes = expandedNodes;
_nodesById = CreateNodeLookup(allNodes);
- _batchingGroups = CreateBatchingGroups(allNodes);
}
///
@@ -75,13 +73,6 @@ public IReadOnlyList VariableDefinitions
///
public int ExpandedNodes { get; }
- ///
- /// The batching groups derived from the execution nodes in this plan. Each group contains
- /// the IDs of nodes that belong to the same batch and should be executed together.
- ///
- internal ImmutableArray BatchingGroups
- => _batchingGroups;
-
///
/// Retrieves an execution node by its unique identifier.
///
@@ -96,7 +87,35 @@ public ExecutionNode GetNodeById(int id)
return node;
}
- throw new KeyNotFoundException();
+ throw ThrowHelper.NodeNotFound(id);
+ }
+
+ ///
+ ///
+ /// Returns the that is responsible for executing
+ /// the given plan node.
+ ///
+ ///
+ /// If the plan node is already an execution node it is returned directly.
+ /// If it is a child operation plan node (such as an
+ /// inside a batch) this method returns the parent execution node that is
+ /// responsible for its execution.
+ ///
+ ///
+ public ExecutionNode GetExecutionNode(IOperationPlanNode planNode)
+ {
+ if (planNode is ExecutionNode executionNode)
+ {
+ return executionNode;
+ }
+
+ if ((uint)planNode.Id < (uint)_nodesById.Length
+ && _nodesById[planNode.Id] is { } node)
+ {
+ return node;
+ }
+
+ throw ThrowHelper.NodeNotFound(planNode.Id);
}
///
@@ -170,51 +189,6 @@ public static OperationPlan Create(
return new OperationPlan(id, operation, rootNodes, allNodes, searchSpace, expandedNodes);
}
- private static ImmutableArray CreateBatchingGroups(
- ImmutableArray allNodes)
- {
- Dictionary>? groups = null;
-
- foreach (var executionNode in allNodes)
- {
- var groupId = executionNode switch
- {
- OperationExecutionNode n => n.BatchingGroupId,
- OperationBatchExecutionNode n => n.BatchingGroupId,
- _ => null
- };
-
- if (groupId is null)
- {
- continue;
- }
-
- groups ??= [];
-
- if (!groups.TryGetValue(groupId.Value, out var nodeIds))
- {
- nodeIds = [];
- groups.Add(groupId.Value, nodeIds);
- }
-
- nodeIds.Add(executionNode.Id);
- }
-
- if (groups is null)
- {
- return [];
- }
-
- var registrations = ImmutableArray.CreateBuilder(groups.Count);
-
- foreach (var (groupId, nodeIds) in groups)
- {
- registrations.Add(new BatchingGroupRegistration(groupId, [.. nodeIds]));
- }
-
- return registrations.MoveToImmutable();
- }
-
private static ExecutionNode?[] CreateNodeLookup(ImmutableArray allNodes)
{
if (allNodes.IsDefaultOrEmpty)
@@ -227,6 +201,14 @@ private static ImmutableArray CreateBatchingGroups(
foreach (var node in allNodes)
{
maxId = Math.Max(maxId, node.Id);
+
+ if (node is OperationBatchExecutionNode batchNode)
+ {
+ foreach (var op in batchNode.Operations)
+ {
+ maxId = Math.Max(maxId, op.Id);
+ }
+ }
}
var nodesById = new ExecutionNode?[maxId + 1];
@@ -234,12 +216,18 @@ private static ImmutableArray CreateBatchingGroups(
foreach (var node in allNodes)
{
nodesById[node.Id] = node;
+
+ // Map each operation definition ID to the containing batch node,
+ // so GetNodeById can resolve definition IDs to execution nodes.
+ if (node is OperationBatchExecutionNode batchNode)
+ {
+ foreach (var op in batchNode.Operations)
+ {
+ nodesById[op.Id] = batchNode;
+ }
+ }
}
return nodesById;
}
-
- internal readonly record struct BatchingGroupRegistration(
- int GroupId,
- int[] NodeIds);
}
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/JsonOperationPlanFormatter.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/JsonOperationPlanFormatter.cs
index e28bb047f4e..3124a277e93 100644
--- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/JsonOperationPlanFormatter.cs
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/JsonOperationPlanFormatter.cs
@@ -134,7 +134,7 @@ private static void WriteNodes(
break;
case OperationBatchExecutionNode batchNode:
- WriteOperationBatchNode(jsonWriter, operation, batchNode, nodeTrace);
+ WriteBatchExecutionNode(jsonWriter, operation, batchNode, nodeTrace);
break;
case IntrospectionExecutionNode introspectionNode:
@@ -185,11 +185,6 @@ private static void WriteOperationNode(
jsonWriter.WriteString("target", node.Target.ToString());
}
- if (node.BatchingGroupId.HasValue)
- {
- jsonWriter.WriteNumber("batchingGroupId", node.BatchingGroupId.Value);
- }
-
if (node.Requirements.Length > 0)
{
jsonWriter.WritePropertyName("requirements");
@@ -245,56 +240,170 @@ private static void WriteOperationNode(
jsonWriter.WriteEndObject();
}
- private static void WriteOperationBatchNode(
+ private static void WriteBatchExecutionNode(
+ Utf8JsonWriter jsonWriter,
+ Operation operation,
+ OperationBatchExecutionNode batchNode,
+ ExecutionNodeTrace? trace)
+ {
+ // Each operation within the batch is serialized as its own node entry,
+ // using the batch node's ID as batchGroupId to preserve the grouping.
+ foreach (var operationDef in batchNode.Operations)
+ {
+ switch (operationDef)
+ {
+ case SingleOperationDefinition single:
+ WriteOperationDefinitionAsNode(jsonWriter, operation, batchNode, single, trace);
+ break;
+
+ case BatchOperationDefinition batch:
+ WriteBatchOperationDefinitionAsNode(jsonWriter, operation, batchNode, batch, trace);
+ break;
+ }
+ }
+ }
+
+ private static void WriteOperationDefinitionAsNode(
Utf8JsonWriter jsonWriter,
Operation operation,
- OperationBatchExecutionNode node,
+ OperationBatchExecutionNode batchNode,
+ SingleOperationDefinition operationDef,
ExecutionNodeTrace? trace)
{
jsonWriter.WriteStartObject();
- jsonWriter.WriteNumber("id", node.Id);
- jsonWriter.WriteString("type", node.Type.ToString());
+ jsonWriter.WriteNumber("id", operationDef.Id);
+ jsonWriter.WriteString("type", nameof(ExecutionNodeType.Operation));
- if (!string.IsNullOrEmpty(node.SchemaName))
+ if (!string.IsNullOrEmpty(operationDef.SchemaName))
{
- jsonWriter.WriteString("schema", node.SchemaName);
+ jsonWriter.WriteString("schema", operationDef.SchemaName);
}
jsonWriter.WriteStartObject("operation");
- jsonWriter.WriteString("name", node.Operation.Name);
- jsonWriter.WriteString("kind", node.Operation.Type.ToString());
- jsonWriter.WriteString("document", node.Operation.SourceText);
- jsonWriter.WriteString("hash", node.Operation.Hash);
- jsonWriter.WriteString("shortHash", node.Operation.Hash[..8]);
+ jsonWriter.WriteString("name", operationDef.Operation.Name);
+ jsonWriter.WriteString("kind", operationDef.Operation.Type.ToString());
+ jsonWriter.WriteString("document", operationDef.Operation.SourceText);
+ jsonWriter.WriteString("hash", operationDef.Operation.Hash);
+ jsonWriter.WriteString("shortHash", operationDef.Operation.Hash[..8]);
jsonWriter.WriteEndObject();
- jsonWriter.WriteString("resultSelectionSet", node.ResultSelectionSet.ToString(indented: false));
+ jsonWriter.WriteString("resultSelectionSet", operationDef.ResultSelectionSet.ToString(indented: false));
- if (!node.Source.IsRoot)
+ if (!operationDef.Source.IsRoot)
{
- jsonWriter.WriteString("source", node.Source.ToString());
+ jsonWriter.WriteString("source", operationDef.Source.ToString());
+ }
+
+ if (!operationDef.Target.IsRoot)
+ {
+ jsonWriter.WriteString("target", operationDef.Target.ToString());
+ }
+
+ jsonWriter.WriteNumber("batchingGroupId", batchNode.Id);
+
+ if (operationDef.Requirements.Length > 0)
+ {
+ jsonWriter.WritePropertyName("requirements");
+ jsonWriter.WriteStartArray();
+
+ foreach (var requirement in operationDef.Requirements)
+ {
+ jsonWriter.WriteStartObject();
+ jsonWriter.WriteString("name", requirement.Key);
+ jsonWriter.WriteString("type", requirement.Type.ToString());
+ jsonWriter.WriteString("path", requirement.Path.ToString());
+ jsonWriter.WriteString("selectionMap", requirement.Map.ToString());
+ jsonWriter.WriteEndObject();
+ }
+
+ jsonWriter.WriteEndArray();
+ }
+
+ WriteConditions(jsonWriter, operationDef.Conditions);
+
+ if (operationDef.ForwardedVariables.Length > 0)
+ {
+ jsonWriter.WriteStartArray("forwardedVariables");
+
+ foreach (var variableName in operationDef.ForwardedVariables)
+ {
+ jsonWriter.WriteStringValue(variableName);
+ }
+
+ jsonWriter.WriteEndArray();
+ }
+
+ if (operationDef.RequiresFileUpload)
+ {
+ jsonWriter.WriteBoolean("requiresFileUpload", true);
+ }
+
+ if (operationDef.Dependencies.Length > 0)
+ {
+ jsonWriter.WritePropertyName("dependencies");
+ jsonWriter.WriteStartArray();
+
+ foreach (var dependency in operationDef.Dependencies)
+ {
+ jsonWriter.WriteNumberValue(dependency.Id);
+ }
+
+ jsonWriter.WriteEndArray();
+ }
+
+ TryWriteNodeTrace(jsonWriter, operation, trace);
+
+ jsonWriter.WriteEndObject();
+ }
+
+ private static void WriteBatchOperationDefinitionAsNode(
+ Utf8JsonWriter jsonWriter,
+ Operation operation,
+ OperationBatchExecutionNode batchNode,
+ BatchOperationDefinition operationDef,
+ ExecutionNodeTrace? trace)
+ {
+ jsonWriter.WriteStartObject();
+ jsonWriter.WriteNumber("id", operationDef.Id);
+ jsonWriter.WriteString("type", ExecutionNodeType.OperationBatch.ToString());
+
+ if (!string.IsNullOrEmpty(operationDef.SchemaName))
+ {
+ jsonWriter.WriteString("schema", operationDef.SchemaName);
+ }
+
+ jsonWriter.WriteStartObject("operation");
+ jsonWriter.WriteString("name", operationDef.Operation.Name);
+ jsonWriter.WriteString("kind", operationDef.Operation.Type.ToString());
+ jsonWriter.WriteString("document", operationDef.Operation.SourceText);
+ jsonWriter.WriteString("hash", operationDef.Operation.Hash);
+ jsonWriter.WriteString("shortHash", operationDef.Operation.Hash[..8]);
+ jsonWriter.WriteEndObject();
+
+ jsonWriter.WriteString("resultSelectionSet", operationDef.ResultSelectionSet.ToString(indented: false));
+
+ if (!operationDef.Source.IsRoot)
+ {
+ jsonWriter.WriteString("source", operationDef.Source.ToString());
}
jsonWriter.WriteStartArray("targets");
- foreach (var target in node.Targets)
+ foreach (var target in operationDef.Targets)
{
jsonWriter.WriteStringValue(target.ToString());
}
jsonWriter.WriteEndArray();
- if (node.BatchingGroupId.HasValue)
- {
- jsonWriter.WriteNumber("batchingGroupId", node.BatchingGroupId.Value);
- }
+ jsonWriter.WriteNumber("batchingGroupId", batchNode.Id);
- if (node.Requirements.Length > 0)
+ if (operationDef.Requirements.Length > 0)
{
jsonWriter.WritePropertyName("requirements");
jsonWriter.WriteStartArray();
- foreach (var requirement in node.Requirements)
+ foreach (var requirement in operationDef.Requirements)
{
jsonWriter.WriteStartObject();
jsonWriter.WriteString("name", requirement.Key);
@@ -307,13 +416,13 @@ private static void WriteOperationBatchNode(
jsonWriter.WriteEndArray();
}
- TryWriteConditions(jsonWriter, node);
+ WriteConditions(jsonWriter, operationDef.Conditions);
- if (node.ForwardedVariables.Length > 0)
+ if (operationDef.ForwardedVariables.Length > 0)
{
jsonWriter.WriteStartArray("forwardedVariables");
- foreach (var variableName in node.ForwardedVariables)
+ foreach (var variableName in operationDef.ForwardedVariables)
{
jsonWriter.WriteStringValue(variableName);
}
@@ -321,17 +430,17 @@ private static void WriteOperationBatchNode(
jsonWriter.WriteEndArray();
}
- if (node.RequiresFileUpload)
+ if (operationDef.RequiresFileUpload)
{
jsonWriter.WriteBoolean("requiresFileUpload", true);
}
- if (node.Dependencies.Length > 0)
+ if (operationDef.Dependencies.Length > 0)
{
jsonWriter.WritePropertyName("dependencies");
jsonWriter.WriteStartArray();
- foreach (var dependency in node.Dependencies)
+ foreach (var dependency in operationDef.Dependencies)
{
jsonWriter.WriteNumberValue(dependency.Id);
}
@@ -344,6 +453,25 @@ private static void WriteOperationBatchNode(
jsonWriter.WriteEndObject();
}
+ private static void WriteConditions(Utf8JsonWriter jsonWriter, ReadOnlySpan conditions)
+ {
+ if (conditions.Length > 0)
+ {
+ jsonWriter.WritePropertyName("conditions");
+ jsonWriter.WriteStartArray();
+
+ foreach (var condition in conditions)
+ {
+ jsonWriter.WriteStartObject();
+ jsonWriter.WriteString("variable", "$" + condition.VariableName);
+ jsonWriter.WriteBoolean("passingValue", condition.PassingValue);
+ jsonWriter.WriteEndObject();
+ }
+
+ jsonWriter.WriteEndArray();
+ }
+ }
+
private static void WriteIntrospectionNode(
Utf8JsonWriter jsonWriter,
Operation operation,
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/JsonOperationPlanParser.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/JsonOperationPlanParser.cs
index cdb3decf17b..4cd9e7bec58 100644
--- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/JsonOperationPlanParser.cs
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/JsonOperationPlanParser.cs
@@ -8,8 +8,9 @@
namespace HotChocolate.Fusion.Execution.Nodes.Serialization;
///
-/// Parses a JSON-encoded operation plan into an ,
-/// reconstructing the operation, execution nodes, and their dependency graph.
+/// Turns a JSON-encoded operation plan back into a living
+/// object, including the original GraphQL operation, every execution node, and the
+/// dependency graph that connects them.
///
public sealed class JsonOperationPlanParser : OperationPlanParser
{
@@ -51,10 +52,13 @@ public override OperationPlan Parse(ReadOnlyMemory planSourceText)
var nodes = ParseNodes(rootElement.GetProperty("nodes"), operation);
+ // Root nodes are the entry points of the execution plan. A node is a
+ // root when it has no dependencies at all, meaning the executor can
+ // start it immediately without waiting for other nodes to finish.
return OperationPlan.Create(
id,
operation,
- [.. nodes.Where(n => n.Dependencies.Length == 0)],
+ [.. nodes.Where(n => n.Dependencies.Length == 0 && n.OptionalDependencies.Length == 0)],
nodes,
searchSpace,
expandedNodes);
@@ -71,9 +75,7 @@ private Operation ParseOperation(JsonElement operationElement)
if (operationDefinition is null)
{
- throw new InvalidOperationException(
- "There must be exactly one operation definition in the "
- + "operation document of the operation plan.");
+ throw ThrowHelper.SingleOperationRequired();
}
return _operationCompiler.Compile(id, hash, operationDefinition);
@@ -81,7 +83,10 @@ private Operation ParseOperation(JsonElement operationElement)
private ImmutableArray ParseNodes(JsonElement nodesElement, Operation operation)
{
- var nodes = new List<(ExecutionNode, int[]?, Dictionary?, int?)>();
+ // Phase 1: Read every JSON node element into a lightweight intermediate
+ // object. We do not create real execution nodes yet because we first need
+ // to know which operations belong to the same batch group.
+ var parsedNodes = new List();
foreach (var nodeElement in nodesElement.EnumerateArray())
{
@@ -89,29 +94,146 @@ private ImmutableArray ParseNodes(JsonElement nodesElement, Opera
var id = nodeElement.GetProperty("id").GetInt32();
var schema = _operationCompiler.Schema;
- (ExecutionNode, int[]?, Dictionary?, int?) node = nodeType switch
+
+ switch (nodeType)
{
- "Operation" => ParseOperationNode(nodeElement, id, schema),
- "OperationBatch" => ParseOperationBatchNode(nodeElement, id, schema),
- "Introspection" => ParseIntrospectionNode(nodeElement, id, operation),
- "Node" => ParseNodeFieldNode(nodeElement, id, operation),
- _ => throw new NotSupportedException($"Unsupported node type: {nodeType}")
- };
+ case "Operation":
+ parsedNodes.Add(ParseOperationNodeInfo(nodeElement, id, schema));
+ break;
+
+ case "OperationBatch":
+ parsedNodes.Add(ParseOperationBatchNodeInfo(nodeElement, id, schema));
+ break;
+
+ case "Introspection":
+ parsedNodes.Add(ParseIntrospectionNodeInfo(nodeElement, id, operation));
+ break;
+
+ case "Node":
+ parsedNodes.Add(ParseNodeFieldNodeInfo(nodeElement, id, operation));
+ break;
+
+ default:
+ throw new NotSupportedException($"Unsupported node type: {nodeType}");
+ }
+ }
+
+ // Phase 2: Separate operations that share a batching group identifier
+ // from those that stand alone. Operations in the same group will be
+ // merged into a single OperationBatchExecutionNode later, so the
+ // gateway can send them to the downstream service in one network call.
+ var batchGroups = new Dictionary>();
+ var standaloneNodes = new List();
+
+ foreach (var parsed in parsedNodes)
+ {
+ if (parsed is ParsedOperationNodeInfo opInfo && opInfo.BatchingGroupId.HasValue)
+ {
+ if (!batchGroups.TryGetValue(opInfo.BatchingGroupId.Value, out var group))
+ {
+ group = [];
+ batchGroups[opInfo.BatchingGroupId.Value] = group;
+ }
+
+ group.Add(opInfo);
+ }
+ else
+ {
+ standaloneNodes.Add(parsed);
+ }
+ }
+
+ // Phase 3: Turn the intermediate objects into real execution nodes.
+ // We also build a lookup from node identifier to execution node so that
+ // Phase 4 can wire up dependencies efficiently.
+ var allNodes = new List<(ExecutionNode Node, int[]? Dependencies, Dictionary? Branches, int? Fallback)>();
+ var nodeMap = new Dictionary();
+
+ // Merge each batch group into a single OperationBatchExecutionNode.
+ // The group identifier becomes the node identifier, and every member
+ // operation becomes an entry in the batch node's operation list.
+ foreach (var (groupId, groupMembers) in batchGroups)
+ {
+ var operations = new List();
+ var allDeps = new HashSet();
+
+ foreach (var member in groupMembers)
+ {
+ operations.Add(member.ToOperationDefinition());
+
+ if (member.Dependencies is not null)
+ {
+ foreach (var dep in member.Dependencies)
+ {
+ allDeps.Add(dep);
+ }
+ }
+ }
- nodes.Add(node);
+ var batchNode = new OperationBatchExecutionNode(groupId, operations.ToArray());
+ allNodes.Add((batchNode, allDeps.Count > 0 ? allDeps.ToArray() : null, null, null));
+ nodeMap[groupId] = batchNode;
}
- var nodeMap = nodes.ToDictionary(n => n.Item1.Id, n => n.Item1);
+ // Convert every node that does not belong to a batch group into its
+ // own execution node (for example, a single-operation node or an
+ // introspection node).
+ foreach (var parsed in standaloneNodes)
+ {
+ var (node, deps, branches, fallback) = parsed.ToExecutionNodeTuple();
+ allNodes.Add((node, deps, branches, fallback));
+ nodeMap[node.Id] = node;
+ }
- foreach (var (node, dependencies, branches, fallback) in nodes)
+ // When multiple operations are merged into one batch node, only the
+ // group identifier survives as a real node identifier. Other code may
+ // still reference the original member identifiers in dependency lists,
+ // so we build a redirect map that translates each absorbed member
+ // identifier to the batch node's group identifier.
+ var idRedirects = new Dictionary();
+
+ foreach (var (groupId, groupMembers) in batchGroups)
+ {
+ foreach (var member in groupMembers)
+ {
+ if (member.Id != groupId)
+ {
+ idRedirects[member.Id] = groupId;
+ }
+ }
+ }
+
+ // Phase 4: Connect every node to the nodes it depends on. We use the
+ // redirect map from above so that a dependency on a merged member
+ // identifier correctly resolves to the batch node that now contains it.
+ foreach (var (node, dependencies, branches, fallback) in allNodes)
{
if (dependencies is not null)
{
- foreach (var dependencyId in dependencies)
+ foreach (var rawDepId in dependencies)
{
+ var dependencyId = idRedirects.TryGetValue(rawDepId, out var redirectId)
+ ? redirectId
+ : rawDepId;
+
if (nodeMap.TryGetValue(dependencyId, out var dependencyNode))
{
- node.AddDependency(dependencyNode);
+ // A batch node that holds more than one operation can still
+ // run even if some of its dependencies are skipped, because
+ // each operation inside the batch tracks its own fine-grained
+ // dependencies. We mark these as optional so the executor
+ // does not block the entire batch when only one member's
+ // dependency is missing. Single-operation nodes (and
+ // non-batch nodes) need a strict dependency instead.
+ if (node is OperationBatchExecutionNode { Operations.Length: > 1 })
+ {
+ node.AddOptionalDependency(dependencyNode);
+ }
+ else
+ {
+ node.AddDependency(dependencyNode);
+ }
+
dependencyNode.AddDependent(node);
}
else
@@ -126,8 +248,10 @@ private ImmutableArray ParseNodes(JsonElement nodesElement, Opera
{
if (branches is not null)
{
- foreach (var (typeName, nodeId) in branches)
+ foreach (var (typeName, rawNodeId) in branches)
{
+ var nodeId = idRedirects.TryGetValue(rawNodeId, out var rId) ? rId : rawNodeId;
+
if (nodeMap.TryGetValue(nodeId, out var branchNode))
{
nodeExecutionNode.AddBranch(typeName, branchNode);
@@ -155,127 +279,135 @@ private ImmutableArray ParseNodes(JsonElement nodesElement, Opera
}
}
- foreach (var (node, _, _, _) in nodes)
- {
- node.Seal();
- }
-
- return [.. nodeMap.Values.OrderBy(t => t.Id)];
- }
+ // Build a unified lookup that maps every plan-level identifier to its
+ // node. This includes execution nodes *and* the individual operation
+ // definitions inside batch nodes. We need both because a member
+ // operation's dependency list uses the original identifiers, which may
+ // point to another operation definition rather than a top-level node.
+ var planNodeMap = new Dictionary(nodeMap.Count);
- private static (OperationExecutionNode, int[]?, Dictionary?, int?) ParseOperationNode(
- JsonElement nodeElement, int id, ISchemaDefinition schema)
- {
- string? schemaName = null;
- if (nodeElement.TryGetProperty("schema", out var schemaElement))
+ foreach (var (id, node) in nodeMap)
{
- schemaName = schemaElement.GetString()!;
- }
+ planNodeMap[id] = node;
- var operationElement = nodeElement.GetProperty("operation");
- var operationName = operationElement.GetProperty("name").GetString()!;
- var operationType = Enum.Parse(operationElement.GetProperty("kind").GetString()!);
- var document = operationElement.GetProperty("document").GetString()!;
- var hash = operationElement.GetProperty("hash").GetString()!;
-
- SelectionPath? source = null;
- SelectionPath? target = null;
- List? requirements = null;
- string[]? forwardedVariables = null;
- SelectionSetNode? resultSelectionSet = null;
- int[]? dependencies = null;
- int? batchingGroupId = null;
-
- if (nodeElement.TryGetProperty("source", out var sourceElement))
- {
- source = SelectionPath.Parse(sourceElement.GetString()!);
+ if (node is OperationBatchExecutionNode bn)
+ {
+ foreach (var op in bn.Operations)
+ {
+ planNodeMap[op.Id] = op;
+ }
+ }
}
- if (nodeElement.TryGetProperty("target", out var targetElement))
+ // Each operation definition inside a batch node tracks its own
+ // dependencies so the executor can skip individual operations whose
+ // prerequisites were not met. Here we resolve those per-operation
+ // dependencies using the original identifiers from the JSON.
+ foreach (var (groupId, groupMembers) in batchGroups)
{
- target = SelectionPath.Parse(targetElement.GetString()!);
- }
+ if (nodeMap.TryGetValue(groupId, out var batchNode) && batchNode is OperationBatchExecutionNode batch)
+ {
+ var memberIndex = 0;
- if (nodeElement.TryGetProperty("requirements", out var requirementsElement))
- {
- requirements = [];
+ foreach (var member in groupMembers)
+ {
+ if (member.Dependencies is { Length: > 0 })
+ {
+ var opDef = batch.Operations[memberIndex];
- foreach (var requirementElement in requirementsElement.EnumerateArray())
- {
- var requirementName = requirementElement.GetProperty("name").GetString()!;
- var requirementType = requirementElement.GetProperty("type").GetString()!;
- var requirementPath = requirementElement.GetProperty("path").GetString()!;
- var selectionMap = requirementElement.GetProperty("selectionMap").GetString()!;
+ foreach (var depId in member.Dependencies)
+ {
+ if (planNodeMap.TryGetValue(depId, out var depNode))
+ {
+ opDef.AddDependency(depNode);
+ }
+ }
+ }
- requirements.Add(new OperationRequirement(
- requirementName,
- Utf8GraphQLParser.Syntax.ParseTypeReference(requirementType),
- SelectionPath.Parse(requirementPath),
- FieldSelectionMapParser.Parse(selectionMap)));
+ memberIndex++;
+ }
}
}
- if (nodeElement.TryGetProperty("forwardedVariables", out var forwardedVariablesElement))
+ // Seal every node so its dependency and dependent lists become
+ // immutable. After this point no further wiring changes are allowed.
+ foreach (var (node, _, _, _) in allNodes)
{
- forwardedVariables = forwardedVariablesElement
- .EnumerateArray()
- .Select(e => e.GetString()!)
- .ToArray();
+ node.Seal();
}
- if (nodeElement.TryGetProperty("resultSelectionSet", out var resultSelectionSetElement)
- && resultSelectionSetElement.GetString() is { Length: > 0 } resultSelectionSetSyntax)
- {
- resultSelectionSet = Utf8GraphQLParser.Syntax.ParseSelectionSet(resultSelectionSetSyntax);
- }
+ return [.. nodeMap.Values.OrderBy(t => t.Id)];
+ }
- if (resultSelectionSet is null)
- {
- throw new InvalidOperationException("The resultSelectionSet is required in a valid operation plan.");
- }
+ private static ParsedOperationNodeInfo ParseOperationNodeInfo(
+ JsonElement nodeElement, int id, ISchemaDefinition schema)
+ {
+ var (schemaName, opSource, source, requirements, forwardedVariables,
+ resultSelectionSet, dependencies, batchingGroupId, conditions,
+ requiresFileUpload) = ParseCommonOperationFields(nodeElement, schema);
- if (nodeElement.TryGetProperty("dependencies", out var dependenciesElement))
+ SelectionPath? target = null;
+
+ if (nodeElement.TryGetProperty("target", out var targetElement))
{
- dependencies = dependenciesElement
- .EnumerateArray()
- .Select(e => e.GetInt32())
- .ToArray();
+ target = SelectionPath.Parse(targetElement.GetString()!);
}
- if (nodeElement.TryGetProperty("batchingGroupId", out var batchingGroupIdElement))
+ return new ParsedSingleOperationNodeInfo
{
- batchingGroupId = batchingGroupIdElement.GetInt32();
- }
+ Id = id,
+ SchemaName = schemaName,
+ OperationSource = opSource,
+ Source = source ?? SelectionPath.Root,
+ Target = target ?? SelectionPath.Root,
+ Requirements = requirements?.ToArray() ?? [],
+ ForwardedVariables = forwardedVariables ?? [],
+ ResultSelectionSet = ResultSelectionSet.Create(resultSelectionSet!, schema),
+ Dependencies = dependencies,
+ BatchingGroupId = batchingGroupId,
+ Conditions = conditions,
+ RequiresFileUpload = requiresFileUpload,
+ Schema = schema
+ };
+ }
- var conditions = TryParseConditions(nodeElement);
+ private static ParsedOperationNodeInfo ParseOperationBatchNodeInfo(
+ JsonElement nodeElement, int id, ISchemaDefinition schema)
+ {
+ var (schemaName, opSource, source, requirements, forwardedVariables,
+ resultSelectionSet, dependencies, batchingGroupId, conditions,
+ requiresFileUpload) = ParseCommonOperationFields(nodeElement, schema);
- var requiresFileUpload = nodeElement.TryGetProperty("requiresFileUpload", out var requiresFileUploadElement)
- && requiresFileUploadElement.ValueKind == JsonValueKind.True;
+ var targets = nodeElement.TryGetProperty("targets", out var targetsElement)
+ ? targetsElement.EnumerateArray().Select(e => SelectionPath.Parse(e.GetString()!)).ToArray()
+ : [];
- var node = new OperationExecutionNode(
- id,
- new OperationSourceText(
- operationName,
- operationType,
- document,
- hash),
- schemaName,
- target ?? SelectionPath.Root,
- source ?? SelectionPath.Root,
- requirements?.ToArray() ?? [],
- forwardedVariables ?? [],
- ResultSelectionSet.Create(resultSelectionSet, schema),
- conditions,
- batchingGroupId,
- requiresFileUpload);
-
- return (node, dependencies, null, null);
+ return new ParsedBatchOperationNodeInfo
+ {
+ Id = id,
+ SchemaName = schemaName,
+ OperationSource = opSource,
+ Source = source ?? SelectionPath.Root,
+ Targets = targets,
+ Requirements = requirements?.ToArray() ?? [],
+ ForwardedVariables = forwardedVariables ?? [],
+ ResultSelectionSet = ResultSelectionSet.Create(resultSelectionSet!, schema),
+ Dependencies = dependencies,
+ BatchingGroupId = batchingGroupId,
+ Conditions = conditions,
+ RequiresFileUpload = requiresFileUpload,
+ Schema = schema
+ };
}
- private static (OperationBatchExecutionNode, int[]?, Dictionary?, int?) ParseOperationBatchNode(
- JsonElement nodeElement, int id, ISchemaDefinition schema)
+ private static (string? schemaName, OperationSourceText opSource, SelectionPath? source,
+ List? requirements, string[]? forwardedVariables,
+ SelectionSetNode? resultSelectionSet, int[]? dependencies, int? batchingGroupId,
+ ExecutionNodeCondition[] conditions, bool requiresFileUpload)
+ ParseCommonOperationFields(JsonElement nodeElement, ISchemaDefinition schema)
{
string? schemaName = null;
+
if (nodeElement.TryGetProperty("schema", out var schemaElement))
{
schemaName = schemaElement.GetString()!;
@@ -286,6 +418,7 @@ private static (OperationBatchExecutionNode, int[]?, Dictionary?, i
var operationType = Enum.Parse(operationElement.GetProperty("kind").GetString()!);
var document = operationElement.GetProperty("document").GetString()!;
var hash = operationElement.GetProperty("hash").GetString()!;
+ var opSource = new OperationSourceText(operationName, operationType, document, hash);
SelectionPath? source = null;
List? requirements = null;
@@ -299,10 +432,6 @@ private static (OperationBatchExecutionNode, int[]?, Dictionary?, i
source = SelectionPath.Parse(sourceElement.GetString()!);
}
- var targets = nodeElement.TryGetProperty("targets", out var targetsElement)
- ? targetsElement.EnumerateArray().Select(e => SelectionPath.Parse(e.GetString()!)).ToArray()
- : [];
-
if (nodeElement.TryGetProperty("requirements", out var requirementsElement))
{
requirements = [];
@@ -359,27 +488,11 @@ private static (OperationBatchExecutionNode, int[]?, Dictionary?, i
var requiresFileUpload = nodeElement.TryGetProperty("requiresFileUpload", out var requiresFileUploadElement)
&& requiresFileUploadElement.ValueKind == JsonValueKind.True;
- var node = new OperationBatchExecutionNode(
- id,
- new OperationSourceText(
- operationName,
- operationType,
- document,
- hash),
- schemaName,
- targets,
- source ?? SelectionPath.Root,
- requirements?.ToArray() ?? [],
- forwardedVariables ?? [],
- ResultSelectionSet.Create(resultSelectionSet, schema),
- conditions,
- batchingGroupId,
- requiresFileUpload);
-
- return (node, dependencies, null, null);
+ return (schemaName, opSource, source, requirements, forwardedVariables,
+ resultSelectionSet, dependencies, batchingGroupId, conditions, requiresFileUpload);
}
- private static (IntrospectionExecutionNode, int[]?, Dictionary?, int?) ParseIntrospectionNode(
+ private static ParsedNodeInfo ParseIntrospectionNodeInfo(
JsonElement nodeElement,
int id,
Operation operation)
@@ -396,12 +509,12 @@ private static (IntrospectionExecutionNode, int[]?, Dictionary?, in
var conditions = TryParseConditions(nodeElement);
- var node = new IntrospectionExecutionNode(
- id,
- selections.ToArray(),
- conditions);
-
- return (node, null, null, null);
+ return new ParsedIntrospectionNodeInfo
+ {
+ Id = id,
+ Selections = selections.ToArray(),
+ Conditions = conditions
+ };
Selection GetRootSelection(string responseName)
{
@@ -418,7 +531,7 @@ Selection GetRootSelection(string responseName)
}
}
- private static (NodeFieldExecutionNode, int[]?, Dictionary?, int?) ParseNodeFieldNode(
+ private static ParsedNodeInfo ParseNodeFieldNodeInfo(
JsonElement nodeElement, int id, Operation operation)
{
var responseName = nodeElement.GetProperty("responseName").GetString()!;
@@ -455,13 +568,15 @@ private static (NodeFieldExecutionNode, int[]?, Dictionary?, int?)
var conditions = TryParseConditions(nodeElement);
- var node = new NodeFieldExecutionNode(
- id,
- responseName,
- idValue,
- conditions);
-
- return (node, null, branches, fallbackNodeId);
+ return new ParsedNodeFieldNodeInfo
+ {
+ Id = id,
+ ResponseName = responseName,
+ IdValue = idValue,
+ Conditions = conditions,
+ Branches = branches,
+ FallbackNodeId = fallbackNodeId
+ };
}
private static ExecutionNodeCondition[] TryParseConditions(JsonElement nodeElement)
@@ -484,4 +599,132 @@ private static ExecutionNodeCondition[] TryParseConditions(JsonElement nodeEleme
return conditions.ToArray();
}
+
+ // The classes below are lightweight intermediate representations used only
+ // during parsing. They hold the raw values extracted from JSON so we can
+ // first group and redirect identifiers before creating the final execution
+ // nodes and wiring their dependencies.
+
+ private abstract class ParsedNodeInfo
+ {
+ public int Id { get; init; }
+ public int[]? Dependencies { get; init; }
+
+ public abstract (ExecutionNode Node, int[]? Dependencies, Dictionary? Branches, int? Fallback)
+ ToExecutionNodeTuple();
+ }
+
+ private abstract class ParsedOperationNodeInfo : ParsedNodeInfo
+ {
+ public string? SchemaName { get; init; }
+ public required OperationSourceText OperationSource { get; init; }
+ public required SelectionPath Source { get; init; }
+ public OperationRequirement[] Requirements { get; init; } = [];
+ public string[] ForwardedVariables { get; init; } = [];
+ public required ResultSelectionSet ResultSelectionSet { get; init; }
+ public int? BatchingGroupId { get; init; }
+ public ExecutionNodeCondition[] Conditions { get; init; } = [];
+ public bool RequiresFileUpload { get; init; }
+ public required ISchemaDefinition Schema { get; init; }
+
+ public abstract OperationDefinition ToOperationDefinition();
+ }
+
+ private sealed class ParsedSingleOperationNodeInfo : ParsedOperationNodeInfo
+ {
+ public required SelectionPath Target { get; init; }
+
+ public override OperationDefinition ToOperationDefinition()
+ {
+ return new SingleOperationDefinition(
+ Id,
+ OperationSource,
+ SchemaName,
+ Target,
+ Source,
+ Requirements,
+ ForwardedVariables,
+ ResultSelectionSet,
+ Conditions,
+ RequiresFileUpload);
+ }
+
+ public override (ExecutionNode, int[]?, Dictionary?, int?) ToExecutionNodeTuple()
+ {
+ var node = new OperationExecutionNode(
+ Id,
+ OperationSource,
+ SchemaName,
+ Target,
+ Source,
+ Requirements,
+ ForwardedVariables,
+ ResultSelectionSet,
+ Conditions,
+ RequiresFileUpload);
+
+ return (node, Dependencies, null, null);
+ }
+ }
+
+ private sealed class ParsedBatchOperationNodeInfo : ParsedOperationNodeInfo
+ {
+ public SelectionPath[] Targets { get; init; } = [];
+
+ public override OperationDefinition ToOperationDefinition()
+ {
+ return new BatchOperationDefinition(
+ Id,
+ OperationSource,
+ SchemaName,
+ Targets,
+ Source,
+ Requirements,
+ ForwardedVariables,
+ ResultSelectionSet,
+ Conditions,
+ RequiresFileUpload);
+ }
+
+ public override (ExecutionNode, int[]?, Dictionary?, int?) ToExecutionNodeTuple()
+ {
+ // This batch operation does not share a batching group with any other
+ // operation, so it stands alone. We still wrap it in an
+ // OperationBatchExecutionNode because the executor expects batch
+ // operations to run through the batch execution path.
+ var opDef = ToOperationDefinition();
+ var batchNode = new OperationBatchExecutionNode(Id, [opDef]);
+
+ return (batchNode, Dependencies, null, null);
+ }
+ }
+
+ private sealed class ParsedIntrospectionNodeInfo : ParsedNodeInfo
+ {
+ public Selection[] Selections { get; init; } = [];
+ public ExecutionNodeCondition[] Conditions { get; init; } = [];
+
+ public override (ExecutionNode, int[]?, Dictionary?, int?) ToExecutionNodeTuple()
+ {
+ var node = new IntrospectionExecutionNode(Id, Selections, Conditions);
+
+ return (node, Dependencies, null, null);
+ }
+ }
+
+ private sealed class ParsedNodeFieldNodeInfo : ParsedNodeInfo
+ {
+ public string ResponseName { get; init; } = "";
+ public IValueNode IdValue { get; init; } = null!;
+ public ExecutionNodeCondition[] Conditions { get; init; } = [];
+ public Dictionary? Branches { get; init; }
+ public int FallbackNodeId { get; init; }
+
+ public override (ExecutionNode, int[]?, Dictionary?, int?) ToExecutionNodeTuple()
+ {
+ var node = new NodeFieldExecutionNode(Id, ResponseName, IdValue, Conditions);
+
+ return (node, Dependencies, Branches, FallbackNodeId);
+ }
+ }
}
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/YamlOperationPlanFormatter.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/YamlOperationPlanFormatter.cs
index d028ab0cef5..d3e2d16dbf8 100644
--- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/YamlOperationPlanFormatter.cs
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/YamlOperationPlanFormatter.cs
@@ -32,7 +32,7 @@ public override string Format(OperationPlan plan, OperationPlanTrace? trace = nu
break;
case OperationBatchExecutionNode batchNode:
- WriteOperationBatchNode(batchNode, nodeTrace, writer);
+ WriteBatchExecutionNode(batchNode, nodeTrace, writer);
break;
case IntrospectionExecutionNode introspectionNode:
@@ -134,11 +134,6 @@ private static void WriteOperationNode(OperationExecutionNode node, ExecutionNod
writer.WriteLine("target: {0}", node.Target.ToString());
}
- if (node.BatchingGroupId.HasValue)
- {
- writer.WriteLine("batchingGroupId: {0}", node.BatchingGroupId.Value);
- }
-
if (node.Requirements.Length > 0)
{
writer.WriteLine("requirements:");
@@ -188,7 +183,7 @@ private static void WriteOperationNode(OperationExecutionNode node, ExecutionNod
{
writer.WriteLine("dependencies:");
writer.Indent();
- foreach (var dependency in node.Dependencies.ToArray().OrderBy(t => t.Id))
+ foreach (var dependency in node.Dependencies)
{
writer.WriteLine("- id: {0}", dependency.Id);
}
@@ -201,21 +196,96 @@ private static void WriteOperationNode(OperationExecutionNode node, ExecutionNod
writer.Unindent();
}
- private static void WriteOperationBatchNode(OperationBatchExecutionNode node, ExecutionNodeTrace? trace, CodeWriter writer)
+ private static void WriteBatchExecutionNode(OperationBatchExecutionNode batchNode, ExecutionNodeTrace? trace, CodeWriter writer)
{
- writer.WriteLine("- id: {0}", node.Id);
+ foreach (var opDef in batchNode.Operations)
+ {
+ switch (opDef)
+ {
+ case SingleOperationDefinition single:
+ WriteOperationDefinitionAsNode(batchNode, single, trace, writer);
+ break;
+
+ case BatchOperationDefinition batch:
+ WriteBatchOperationDefinitionAsNode(batchNode, batch, trace, writer);
+ break;
+ }
+ }
+ }
+
+ private static void WriteOperationDefinitionAsNode(
+ OperationBatchExecutionNode batchNode,
+ SingleOperationDefinition opDef,
+ ExecutionNodeTrace? trace,
+ CodeWriter writer)
+ {
+ writer.WriteLine("- id: {0}", opDef.Id);
+ writer.Indent();
+
+ writer.WriteLine("type: {0}", "Operation");
+
+ if (opDef.SchemaName is not null)
+ {
+ writer.WriteLine("schema: {0}", opDef.SchemaName);
+ }
+
+ writer.WriteLine("operation: |");
+ writer.Indent();
+ var reader = new StringReader(opDef.Operation.SourceText);
+ var line = reader.ReadLine();
+ while (line != null)
+ {
+ writer.WriteLine(line);
+ line = reader.ReadLine();
+ }
+ writer.Unindent();
+
+ if (!opDef.Source.IsRoot)
+ {
+ writer.WriteLine("source: {0}", opDef.Source.ToString());
+ }
+
+ if (!opDef.Target.IsRoot)
+ {
+ writer.WriteLine("target: {0}", opDef.Target.ToString());
+ }
+
+ writer.WriteLine("batchingGroupId: {0}", batchNode.Id);
+
+ WriteRequirements(opDef.Requirements, writer);
+ WriteConditions(opDef.Conditions, writer);
+ WriteForwardedVariables(opDef.ForwardedVariables, writer);
+
+ if (opDef.RequiresFileUpload)
+ {
+ writer.WriteLine("requiresFileUpload: true");
+ }
+
+ WriteDependencies(opDef.Dependencies, writer);
+ TryWriteNodeTrace(writer, trace);
+
+ writer.Unindent();
+ }
+
+ private static void WriteBatchOperationDefinitionAsNode(
+ OperationBatchExecutionNode batchNode,
+ BatchOperationDefinition opDef,
+ ExecutionNodeTrace? trace,
+ CodeWriter writer)
+ {
+ writer.WriteLine("- id: {0}", opDef.Id);
writer.Indent();
writer.WriteLine("type: {0}", "OperationBatch");
- if (node.SchemaName is not null)
+ if (opDef.SchemaName is not null)
{
- writer.WriteLine("schema: {0}", node.SchemaName);
+ writer.WriteLine("schema: {0}", opDef.SchemaName);
}
writer.WriteLine("operation: |");
writer.Indent();
- var reader = new StringReader(node.Operation.SourceText);
+ var reader = new StringReader(opDef.Operation.SourceText);
var line = reader.ReadLine();
while (line != null)
{
@@ -224,32 +294,46 @@ private static void WriteOperationBatchNode(OperationBatchExecutionNode node, Ex
}
writer.Unindent();
- if (!node.Source.IsRoot)
+ if (!opDef.Source.IsRoot)
{
- writer.WriteLine("source: {0}", node.Source.ToString());
+ writer.WriteLine("source: {0}", opDef.Source.ToString());
}
- if (node.Targets.Length > 0)
+ if (opDef.Targets.Length > 0)
{
writer.WriteLine("targets:");
writer.Indent();
- foreach (var target in node.Targets)
+ foreach (var target in opDef.Targets)
{
writer.WriteLine("- {0}", target.ToString());
}
writer.Unindent();
}
- if (node.BatchingGroupId.HasValue)
+ writer.WriteLine("batchingGroupId: {0}", batchNode.Id);
+
+ WriteRequirements(opDef.Requirements, writer);
+ WriteConditions(opDef.Conditions, writer);
+ WriteForwardedVariables(opDef.ForwardedVariables, writer);
+
+ if (opDef.RequiresFileUpload)
{
- writer.WriteLine("batchingGroupId: {0}", node.BatchingGroupId.Value);
+ writer.WriteLine("requiresFileUpload: true");
}
- if (node.Requirements.Length > 0)
+ WriteDependencies(opDef.Dependencies, writer);
+ TryWriteNodeTrace(writer, trace);
+
+ writer.Unindent();
+ }
+
+ private static void WriteRequirements(ReadOnlySpan requirements, CodeWriter writer)
+ {
+ if (requirements.Length > 0)
{
writer.WriteLine("requirements:");
writer.Indent();
- foreach (var requirement in node.Requirements.ToArray().OrderBy(t => t.Key))
+ foreach (var requirement in requirements)
{
writer.WriteLine("- name: {0}", requirement.Key);
writer.Indent();
@@ -269,42 +353,57 @@ private static void WriteOperationBatchNode(OperationBatchExecutionNode node, Ex
writer.Unindent();
}
+ }
- TryWriteConditions(writer, node);
+ private static void WriteConditions(ReadOnlySpan conditions, CodeWriter writer)
+ {
+ if (conditions.Length > 0)
+ {
+ writer.WriteLine("conditions:");
+ writer.Indent();
+ foreach (var condition in conditions)
+ {
+ writer.WriteLine("- variable: {0}", "$" + condition.VariableName);
+ writer.Indent();
- if (node.ForwardedVariables.Length > 0)
+ writer.WriteLine("passingValue: {0}", condition.PassingValue ? "true" : "false");
+ writer.Unindent();
+ }
+
+ writer.Unindent();
+ }
+ }
+
+ private static void WriteForwardedVariables(ReadOnlySpan forwardedVariables, CodeWriter writer)
+ {
+ if (forwardedVariables.Length > 0)
{
writer.WriteLine("forwardedVariables:");
writer.Indent();
- foreach (var variableName in node.ForwardedVariables)
+ foreach (var variableName in forwardedVariables)
{
writer.WriteLine("- {0}", variableName);
}
writer.Unindent();
}
+ }
- if (node.RequiresFileUpload)
- {
- writer.WriteLine("requiresFileUpload: true");
- }
-
- if (node.Dependencies.Length > 0)
+ private static void WriteDependencies(ReadOnlySpan dependencies, CodeWriter writer)
+ {
+ if (dependencies.Length > 0)
{
writer.WriteLine("dependencies:");
writer.Indent();
- foreach (var dependency in node.Dependencies.ToArray().OrderBy(t => t.Id))
+
+ foreach (var dependency in dependencies)
{
writer.WriteLine("- id: {0}", dependency.Id);
}
writer.Unindent();
}
-
- TryWriteNodeTrace(writer, trace);
-
- writer.Unindent();
}
private static void WriteIntrospectionNode(IntrospectionExecutionNode node, ExecutionNodeTrace? trace, CodeWriter writer)
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/SingleOperationDefinition.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/SingleOperationDefinition.cs
new file mode 100644
index 00000000000..fc7f8c8aca2
--- /dev/null
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/SingleOperationDefinition.cs
@@ -0,0 +1,36 @@
+using HotChocolate.Execution;
+
+namespace HotChocolate.Fusion.Execution.Nodes;
+
+internal sealed class SingleOperationDefinition : OperationDefinition
+{
+ internal SingleOperationDefinition(
+ int id,
+ OperationSourceText operation,
+ string? schemaName,
+ SelectionPath target,
+ SelectionPath source,
+ OperationRequirement[] requirements,
+ string[] forwardedVariables,
+ ResultSelectionSet resultSelectionSet,
+ ExecutionNodeCondition[] conditions,
+ bool requiresFileUpload)
+ : base(
+ id,
+ operation,
+ schemaName,
+ source,
+ requirements,
+ forwardedVariables,
+ resultSelectionSet,
+ conditions,
+ requiresFileUpload)
+ {
+ Target = target;
+ }
+
+ ///
+ /// Gets the path to the selection set for which this operation fetches data.
+ ///
+ public SelectionPath Target { get; }
+}
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/OperationPlanContext.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/OperationPlanContext.cs
index c3e42a2a891..b5b8fff4be7 100644
--- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/OperationPlanContext.cs
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/OperationPlanContext.cs
@@ -27,11 +27,11 @@ public sealed class OperationPlanContext : IFeatureProvider, IAsyncDisposable
private readonly ImmutableArray[] _variableValueSets;
private readonly Uri?[] _transportUris;
private readonly string?[] _transportContentTypes;
+ private readonly List?[] _skippedDefinitions;
private readonly IFusionExecutionDiagnosticEvents _diagnosticEvents;
private readonly FetchResultStorePool _resultStorePool;
private readonly FetchResultStore _resultStore;
private readonly ExecutionState _executionState;
- private readonly SourceSchemaRequestDispatcher _sourceSchemaDispatcher;
private readonly INodeIdParser _nodeIdParser;
private readonly bool _collectTelemetry;
private ISourceSchemaClientScope _clientScope;
@@ -79,7 +79,6 @@ public OperationPlanContext(
requestContext.Schema.GetOptions().PathSegmentLocalPoolCapacity);
_executionState = new ExecutionState(_collectTelemetry, cancellationTokenSource);
- _sourceSchemaDispatcher = new SourceSchemaRequestDispatcher(this);
var maxNodeId = 0;
@@ -98,6 +97,7 @@ public OperationPlanContext(
_variableValueSets = new ImmutableArray[nodeSlotCount];
_transportUris = new Uri?[nodeSlotCount];
_transportContentTypes = new string?[nodeSlotCount];
+ _skippedDefinitions = new List?[nodeSlotCount];
}
public OperationPlan OperationPlan { get; }
@@ -110,12 +110,11 @@ public OperationPlanContext(
public ISourceSchemaClientScope ClientScope => _clientScope;
- public ISourceSchemaScheduler SourceSchemaScheduler => _sourceSchemaDispatcher;
-
- public ISourceSchemaDispatcher SourceSchemaDispatcher => _sourceSchemaDispatcher;
-
internal ExecutionState ExecutionState => _executionState;
+ internal bool IsNodeSkipped(int nodeId)
+ => _executionState.IsNodeSkipped(nodeId);
+
public ulong IncludeFlags { get; }
public bool CollectTelemetry => _collectTelemetry;
@@ -151,6 +150,26 @@ internal ImmutableArray GetDependentsToExecute(ExecutionNode node
return nodeCompletionSet?.GetSnapshot() ?? [];
}
+ internal void TrackSkippedDefinition(ExecutionNode node, IOperationPlanNode skippedDefinition)
+ {
+ var nodeId = node.Id;
+ var list = _skippedDefinitions[nodeId];
+
+ if (list is null)
+ {
+ list = [];
+ _skippedDefinitions[nodeId] = list;
+ }
+
+ list.Add(skippedDefinition);
+ }
+
+ internal ImmutableArray GetSkippedDefinitions(ExecutionNode node)
+ {
+ var list = _skippedDefinitions[node.Id];
+ return list is null or { Count: 0 } ? [] : [.. list];
+ }
+
internal void SetDynamicSchemaName(ExecutionNode node, string schemaName)
=> _schemaNames[node.Id] = schemaName;
@@ -321,11 +340,26 @@ private CompactPath ToResultPath(SelectionPath selectionSet)
return builder.ToPath();
}
+ internal void AddPartialResult(
+ SelectionPath sourcePath,
+ SourceSchemaResult result,
+ ResultSelectionSet resultSelectionSet,
+ bool containsErrors)
+ {
+ var canExecutionContinue =
+ _resultStore.AddPartialResult(sourcePath, result, resultSelectionSet, containsErrors);
+
+ if (!canExecutionContinue)
+ {
+ ExecutionState.CancelProcessing();
+ }
+ }
+
internal void AddPartialResults(
SelectionPath sourcePath,
ReadOnlySpan results,
ResultSelectionSet resultSelectionSet,
- bool containsErrors = true)
+ bool containsErrors)
{
var canExecutionContinue =
_resultStore.AddPartialResults(sourcePath, results, resultSelectionSet, containsErrors);
@@ -502,7 +536,6 @@ public async ValueTask DisposeAsync()
{
_disposed = true;
DisposeNodeState();
- _sourceSchemaDispatcher.Abort();
_resultStorePool.Return(_resultStore);
await _clientScope.DisposeAsync();
}
@@ -511,6 +544,7 @@ public async ValueTask DisposeAsync()
private void ResetNodeState()
{
Array.Clear(_schemaNames);
+ Array.Clear(_skippedDefinitions);
if (_collectTelemetry)
{
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/OperationPlanExecutor.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/OperationPlanExecutor.cs
index 5b71ef1cb7c..006b9b1e0c7 100644
--- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/OperationPlanExecutor.cs
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/OperationPlanExecutor.cs
@@ -21,39 +21,26 @@ public async Task ExecuteAsync(
await using var context = new OperationPlanContext(requestContext, variables, operationPlan, executionCts);
context.Begin();
- try
+ switch (operationPlan.Operation.Definition.Operation)
{
- switch (operationPlan.Operation.Definition.Operation)
- {
- case OperationType.Query:
- await ExecuteQueryAsync(context, operationPlan, executionCts.Token);
- break;
-
- case OperationType.Mutation:
- await ExecuteMutationAsync(context, operationPlan, executionCts.Token);
- break;
+ case OperationType.Query:
+ await ExecuteQueryAsync(context, operationPlan, executionCts.Token);
+ break;
- default:
- throw new InvalidOperationException("Only queries and mutations can be executed.");
- }
+ case OperationType.Mutation:
+ await ExecuteMutationAsync(context, operationPlan, executionCts.Token);
+ break;
- if (executionCts.IsCancellationRequested)
- {
- context.SourceSchemaDispatcher.Abort();
- }
+ default:
+ throw new InvalidOperationException("Only queries and mutations can be executed.");
+ }
- // If the original CancellationToken of the request was cancelled,
- // the Execution nodes and the PlanExecutor should have been gracefully cancelled,
- // so we throw here to properly cancel the request execution.
- cancellationToken.ThrowIfCancellationRequested();
+ // If the original CancellationToken of the request was cancelled,
+ // the Execution nodes and the PlanExecutor should have been gracefully cancelled,
+ // so we throw here to properly cancel the request execution.
+ cancellationToken.ThrowIfCancellationRequested();
- return context.Complete();
- }
- catch (Exception ex)
- {
- context.SourceSchemaDispatcher.Abort(ex);
- throw;
- }
+ return context.Complete();
}
public async Task SubscribeAsync(
@@ -76,6 +63,7 @@ public async Task SubscribeAsync(
// without also cancelling the entire request pipeline.
var executionCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken);
OperationPlanContext? context = null;
+ CancellationTokenRegistration? cancellationRegistration = null;
try
{
@@ -83,12 +71,7 @@ public async Task SubscribeAsync(
var subscriptionResult = await subscriptionNode.SubscribeAsync(context, executionCts.Token);
var executionState = context.ExecutionState;
- executionCts.Token.Register(
- () =>
- {
- executionState.Signal.TryResetToIdle();
- context.SourceSchemaDispatcher.Abort();
- });
+ cancellationRegistration = executionCts.Token.Register(() => executionState.Signal.TryResetToIdle());
if (subscriptionResult.Status is not ExecutionStatus.Success)
{
@@ -107,11 +90,15 @@ public async Task SubscribeAsync(
stream.RegisterForCleanup(executionCts);
return stream;
}
- catch (Exception ex)
+ catch (Exception)
{
- context?.SourceSchemaDispatcher.Abort(ex);
executionCts.Dispose();
+ if (cancellationRegistration is { } r)
+ {
+ await r.DisposeAsync();
+ }
+
throw;
}
}
@@ -123,14 +110,8 @@ private static async Task ExecuteQueryAsync(
{
var executionState = context.ExecutionState;
- await using var cancellationRegistration = cancellationToken.Register(
- () =>
- {
- executionState.Signal.TryResetToIdle();
- context.SourceSchemaDispatcher.Abort();
- });
-
- RegisterBatchingGroups(context, plan);
+ await using var cancellationRegistration =
+ cancellationToken.Register(() => executionState.Signal.TryResetToIdle());
// GraphQL queries allow us to execute the plan by using full parallelism.
// We fill the backlog with all nodes from the operation plan.
@@ -147,7 +128,7 @@ private static async Task ExecuteQueryAsync(
while (executionState.TryDequeueCompletedResult(out var result))
{
var node = plan.GetNodeById(result.Id);
- executionState.CompleteNode(context, node, result);
+ executionState.CompleteNode(plan, node, result);
}
executionState.EnqueueNextNodes(context, cancellationToken);
@@ -175,14 +156,8 @@ private static async Task ExecuteMutationAsync(
{
var executionState = context.ExecutionState;
- await using var cancellationRegistration = cancellationToken.Register(
- () =>
- {
- executionState.Signal.TryResetToIdle();
- context.SourceSchemaDispatcher.Abort();
- });
-
- RegisterBatchingGroups(context, plan);
+ await using var cancellationRegistration =
+ cancellationToken.Register(() => executionState.Signal.TryResetToIdle());
// For mutations, we fill the backlog with all nodes from the operation plan just like for queries.
executionState.FillBacklog(plan);
@@ -204,7 +179,7 @@ private static async Task ExecuteMutationAsync(
while (executionState.TryDequeueCompletedResult(out var result))
{
var node = plan.GetNodeById(result.Id);
- executionState.CompleteNode(context, node, result);
+ executionState.CompleteNode(plan, node, result);
}
executionState.EnqueueNextNodes(context, cancellationToken);
@@ -243,11 +218,7 @@ private static async IAsyncEnumerable CreateSubscriptionEnumera
var stream = subscriptionResult.ReadStreamAsync()
.WithCancellation(executionCancellationToken);
await using var cancellationRegistration = executionCancellationToken.Register(
- () =>
- {
- executionState.Signal.TryResetToIdle();
- context.SourceSchemaDispatcher.Abort();
- });
+ () => executionState.Signal.TryResetToIdle());
await foreach (var eventArgs in stream)
{
@@ -264,9 +235,7 @@ private static async IAsyncEnumerable CreateSubscriptionEnumera
context.Begin(eventArgs.StartTimestamp, eventArgs.Activity?.TraceId.ToHexString());
executionState.Reset();
- context.SourceSchemaDispatcher.Reset();
- RegisterBatchingGroups(context, plan);
executionState.FillBacklog(plan);
executionState.EnqueueForCompletion(
new ExecutionNodeResult(
@@ -276,6 +245,7 @@ private static async IAsyncEnumerable CreateSubscriptionEnumera
eventArgs.Duration,
Exception: null,
DependentsToExecute: [],
+ SkippedDefinitions: [],
VariableValueSets: eventArgs.VariableValueSets));
while (!executionCancellationToken.IsCancellationRequested && executionState.IsProcessing())
@@ -283,7 +253,7 @@ private static async IAsyncEnumerable CreateSubscriptionEnumera
while (executionState.TryDequeueCompletedResult(out var nodeResult))
{
var node = plan.GetNodeById(nodeResult.Id);
- executionState.CompleteNode(context, node, nodeResult);
+ executionState.CompleteNode(plan, node, nodeResult);
}
executionState.EnqueueNextNodes(context, executionCancellationToken);
@@ -307,7 +277,6 @@ private static async IAsyncEnumerable CreateSubscriptionEnumera
}
catch (Exception ex) when (ex is not OperationCanceledException)
{
- context.SourceSchemaDispatcher.Abort(ex);
context.DiagnosticEvents.SubscriptionEventError(
context,
subscriptionNode,
@@ -325,12 +294,4 @@ private static async IAsyncEnumerable CreateSubscriptionEnumera
yield return result;
}
}
-
- private static void RegisterBatchingGroups(OperationPlanContext context, OperationPlan plan)
- {
- foreach (var group in plan.BatchingGroups)
- {
- context.SourceSchemaDispatcher.RegisterGroup(group.GroupId, group.NodeIds);
- }
- }
}
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Results/FetchResultStore.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Results/FetchResultStore.cs
index 0d389cc3a2b..ef118d4bfc8 100644
--- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Results/FetchResultStore.cs
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Results/FetchResultStore.cs
@@ -51,11 +51,19 @@ internal sealed partial class FetchResultStore : IDisposable
public ConcurrentStack MemoryOwners => _memory;
- public bool AddPartialResults(
+ public bool AddPartialResult(
SelectionPath sourcePath,
- ReadOnlySpan results,
- ResultSelectionSet resultSelectionSet)
- => AddPartialResults(sourcePath, results, resultSelectionSet, containsErrors: true);
+ SourceSchemaResult result,
+ ResultSelectionSet resultSelectionSet,
+ bool containsErrors)
+ {
+ ObjectDisposedException.ThrowIf(_disposed, this);
+ ArgumentNullException.ThrowIfNull(sourcePath);
+
+ return containsErrors
+ ? AddSinglePartialResult(sourcePath, result, resultSelectionSet)
+ : AddSinglePartialResultNoErrors(sourcePath, result, resultSelectionSet);
+ }
public bool AddPartialResults(
SelectionPath sourcePath,
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/ThrowHelper.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/ThrowHelper.cs
new file mode 100644
index 00000000000..71f4cd3a421
--- /dev/null
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/ThrowHelper.cs
@@ -0,0 +1,34 @@
+using HotChocolate.Fusion.Properties;
+
+namespace HotChocolate.Fusion.Execution;
+
+internal static class ThrowHelper
+{
+ public static InvalidOperationException MissingBooleanVariable(string variableName)
+ => new(string.Format(
+ FusionExecutionResources.ExecutionNode_MissingBooleanVariable,
+ variableName));
+
+ public static KeyNotFoundException NodeNotFound(int id)
+ => new(string.Format(
+ FusionExecutionResources.OperationPlan_NodeNotFound,
+ id));
+
+ public static InvalidOperationException MissingBatchResult(int operationId)
+ => new(string.Format(
+ FusionExecutionResources.OperationBatchExecutionNode_MissingBatchResult,
+ operationId));
+
+ public static InvalidOperationException SingleOperationRequired()
+ => new(FusionExecutionResources.JsonOperationPlanParser_SingleOperationRequired);
+
+ public static InvalidOperationException RequestIndexOutOfRange(int requestIndex)
+ => new(string.Format(
+ FusionExecutionResources.SourceSchemaHttpClient_InvalidRequestIndex,
+ requestIndex));
+
+ public static InvalidOperationException VariableIndexOutOfRange(int variableIndex)
+ => new(string.Format(
+ FusionExecutionResources.SourceSchemaHttpClient_VariableIndexOutOfRange,
+ variableIndex));
+}
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/OperationPlanner.BuildExecutionTree.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/OperationPlanner.BuildExecutionTree.cs
index e5f21aea176..0c1e7685182 100644
--- a/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/OperationPlanner.BuildExecutionTree.cs
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/OperationPlanner.BuildExecutionTree.cs
@@ -16,7 +16,8 @@ public sealed partial class OperationPlanner
private const string DynamicSchemaNameMarker = "__dynamic__";
///
- /// Builds the actual execution plan from the provided .
+ /// Converts the planner's intermediate plan steps into a final execution plan
+ /// that the executor can run against the downstream source schemas.
///
private OperationPlan BuildExecutionPlan(
Operation operation,
@@ -38,31 +39,21 @@ private OperationPlan BuildExecutionPlan(
return OperationPlan.Create(operation, nodes, nodes, searchSpace, expandedNodes);
}
- var completedSteps = new HashSet();
- var completedNodes = new Dictionary();
- var dependencyLookup = new Dictionary>();
- var branchesLookup = new Dictionary>();
- var fallbackLookup = new Dictionary();
+ var ctx = new ExecutionPlanBuildContext();
var hasVariables = operationDefinition.VariableDefinitions.Count > 0;
- planSteps = PrepareSteps(planSteps, operationDefinition, dependencyLookup, branchesLookup, fallbackLookup);
- BuildExecutionNodes(
- planSteps,
- completedSteps,
- completedNodes,
- dependencyLookup,
- _schema,
- _options.EnableRequestGrouping,
- hasVariables);
- MergeEquivalentOperationNodes(completedNodes, dependencyLookup);
- BuildDependencyStructure(completedNodes, dependencyLookup, branchesLookup, fallbackLookup);
+ planSteps = TransformPlanSteps(planSteps, operationDefinition);
+ IndexDependencies(planSteps, ctx);
+ BuildExecutionNodes(planSteps, ctx, _schema, hasVariables);
+ MergeAndBatchOperations(ctx, _options.EnableRequestGrouping);
+ WireExecutionDependencies(ctx);
var rootNodes = planSteps
- .Where(t => !dependencyLookup.ContainsKey(t.Id) && completedNodes.ContainsKey(t.Id))
- .Select(t => completedNodes[t.Id])
+ .Where(t => !ctx.DependenciesByStepId.ContainsKey(t.Id) && ctx.ExecutionNodes.ContainsKey(t.Id))
+ .Select(t => ctx.ExecutionNodes[t.Id])
.ToImmutableArray();
- var allNodes = completedNodes
+ var allNodes = ctx.ExecutionNodes
.OrderBy(t => t.Key)
.Select(t => t.Value)
.ToImmutableArray();
@@ -85,12 +76,9 @@ private OperationPlan BuildExecutionPlan(
return OperationPlan.Create(operation, rootNodes, allNodes, searchSpace, expandedNodes);
}
- private static ImmutableList PrepareSteps(
+ private static ImmutableList TransformPlanSteps(
ImmutableList planSteps,
- OperationDefinitionNode originalOperation,
- Dictionary> dependencyLookup,
- Dictionary> branchesLookup,
- Dictionary fallbackLookup)
+ OperationDefinitionNode originalOperation)
{
var updatedPlanSteps = planSteps;
var forwardVariableContext = new ForwardVariableRewriter.Context();
@@ -102,58 +90,122 @@ private static ImmutableList PrepareSteps(
foreach (var step in planSteps)
{
- if (step is OperationPlanStep operationPlanStep)
+ if (step is not OperationPlanStep operationPlanStep)
{
- // Planning may leave temporary `{}` child selections after requirement rewrites.
- // We normalize those first, then only remove the step if the root selection set
- // itself became empty.
- operationPlanStep = RemoveEmptySelectionSets(operationPlanStep);
+ continue;
+ }
- if (!ReferenceEquals(step, operationPlanStep))
- {
- updatedPlanSteps = updatedPlanSteps.Replace(step, operationPlanStep);
- }
+ // Requirement rewriting can leave behind empty child selection sets.
+ // We remove them here so later stages do not treat them as real selections.
+ operationPlanStep = RemoveEmptySelectionSets(operationPlanStep);
+
+ if (!ReferenceEquals(step, operationPlanStep))
+ {
+ updatedPlanSteps = updatedPlanSteps.Replace(step, operationPlanStep);
+ }
+
+ // Discard steps that have no meaningful selections left.
+ if (IsEmptyOperation(operationPlanStep))
+ {
+ updatedPlanSteps = updatedPlanSteps.Remove(operationPlanStep);
+ continue;
+ }
+
+ // When every root selection carries a @skip or @include directive,
+ // we promote those directives to node-level conditions. This lets
+ // the executor skip the entire network call when the condition is
+ // not met, rather than sending a request that returns nothing.
+ if (operationPlanStep.AreAllProvidedSelectionsConditional())
+ {
+ var updated = ExtractConditionsAndRewriteSelectionSet(operationPlanStep);
+ updatedPlanSteps = updatedPlanSteps.Replace(operationPlanStep, updated);
+ operationPlanStep = updated;
+ }
+
+ // Attach variable definitions so the operation is syntactically valid
+ // when sent to the downstream service.
+ updatedPlanSteps = updatedPlanSteps.Replace(
+ operationPlanStep,
+ AddVariableDefinitions(operationPlanStep, forwardVariableContext));
+ }
+
+ return updatedPlanSteps;
+
+ static bool IsEmptyOperation(OperationPlanStep step)
+ {
+ if (step.Definition.SelectionSet.Selections.Count == 0)
+ {
+ return true;
+ }
- // During the planing process we keep incomplete operation steps around
- // in order to inline requirements. If those do not materialize these
- // operation fragments need to be removed before we can build the
- // execution plan.
- if (IsEmptyOperation(operationPlanStep))
+ return step.Definition.SelectionSet.Selections is
+ [
+#pragma warning disable format
+ FieldNode
{
- updatedPlanSteps = updatedPlanSteps.Remove(operationPlanStep);
- continue;
+ Alias: null,
+ Name.Value: IntrospectionFieldNames.TypeName,
+ Directives: [{ Name.Value: "fusion__empty" }]
}
+#pragma warning restore format
+ ];
+ }
- // If all the root selections are conditional, we can pull those conditionals
- // out as conditions onto the execution node.
- // We can do the same for conditional selections below lookup fields.
- if (operationPlanStep.AreAllProvidedSelectionsConditional())
- {
- var updatedOperationPlanStep = ExtractConditionsAndRewriteSelectionSet(operationPlanStep);
+ static OperationPlanStep RemoveEmptySelectionSets(OperationPlanStep step)
+ {
+ var updatedDefinition = RemoveEmptySelections(step.Definition);
+ return ReferenceEquals(updatedDefinition, step.Definition)
+ ? step
+ : step with { Definition = updatedDefinition };
+ }
- updatedPlanSteps = updatedPlanSteps.Replace(operationPlanStep, updatedOperationPlanStep);
+ static OperationPlanStep AddVariableDefinitions(
+ OperationPlanStep step,
+ ForwardVariableRewriter.Context forwardVariableContext)
+ {
+ forwardVariableContext.Reset();
- operationPlanStep = updatedOperationPlanStep;
- }
+ foreach (var (key, requirement) in step.Requirements.OrderBy(t => t.Key))
+ {
+ forwardVariableContext.Requirements[key] =
+ new VariableDefinitionNode(
+ null,
+ new VariableNode(null, new NameNode(key)),
+ description: null,
+ requirement.Type,
+ null,
+ []);
+ }
- // The operation definition of the current OperationPlanStep do not yet
- // have variable definitions declared, so we need to traverse the operation definition
- // and look at what variables and requirements are used within the operation definition.
- updatedPlanSteps = updatedPlanSteps.Replace(
- operationPlanStep,
- AddVariableDefinitions(operationPlanStep));
-
- // Each PlanStep tracks dependant PlanSteps,
- // so PlanSteps that require data (lookup or field requirements)
- // from the current step.
- // For a simpler planing algorithm we are building a lookup in reverse,
- // that tracks the dependencies each node has.
+ var rewrittenNode = s_forwardVariableRewriter.Rewrite(step.Definition, forwardVariableContext);
+
+ if (rewrittenNode is OperationDefinitionNode rewrittenOperationNode
+ && !ReferenceEquals(rewrittenOperationNode, step.Definition))
+ {
+ return step with { Definition = rewrittenOperationNode };
+ }
+
+ return step;
+ }
+ }
+
+ private static void IndexDependencies(
+ ImmutableList planSteps,
+ ExecutionPlanBuildContext ctx)
+ {
+ foreach (var step in planSteps)
+ {
+ if (step is OperationPlanStep operationPlanStep)
+ {
+ // Plan steps store which steps they feed into ("dependents").
+ // We invert that here so each step knows which steps it
+ // depends on, which is what the executor needs for scheduling.
foreach (var dependent in operationPlanStep.Dependents)
{
- if (!dependencyLookup.TryGetValue(dependent, out var dependencies))
+ if (!ctx.DependenciesByStepId.TryGetValue(dependent, out var dependencies))
{
dependencies = [];
- dependencyLookup[dependent] = dependencies;
+ ctx.DependenciesByStepId[dependent] = dependencies;
}
dependencies.Add(step.Id);
@@ -163,198 +215,723 @@ private static ImmutableList PrepareSteps(
{
foreach (var (_, dependent) in nodePlanStep.Branches)
{
- if (!dependencyLookup.TryGetValue(dependent.Id, out var dependencies))
+ if (!ctx.DependenciesByStepId.TryGetValue(dependent.Id, out var dependencies))
{
dependencies = [];
- dependencyLookup[dependent.Id] = dependencies;
+ ctx.DependenciesByStepId[dependent.Id] = dependencies;
}
dependencies.Add(nodePlanStep.Id);
}
- if (!dependencyLookup.TryGetValue(nodePlanStep.FallbackQuery.Id, out var fallbackDependencies))
+ if (!ctx.DependenciesByStepId.TryGetValue(nodePlanStep.FallbackQuery.Id, out var fallbackDependencies))
{
fallbackDependencies = [];
- dependencyLookup[nodePlanStep.FallbackQuery.Id] = fallbackDependencies;
+ ctx.DependenciesByStepId[nodePlanStep.FallbackQuery.Id] = fallbackDependencies;
}
fallbackDependencies.Add(nodePlanStep.Id);
- branchesLookup.Add(nodePlanStep.Id, nodePlanStep.Branches.ToDictionary(x => x.Key, x => x.Value.Id));
- fallbackLookup.Add(nodePlanStep.Id, nodePlanStep.FallbackQuery.Id);
+ ctx.BranchesByNodeId.Add(
+ nodePlanStep.Id,
+ nodePlanStep.Branches.ToDictionary(x => x.Key, x => x.Value.Id));
+ ctx.FallbackByNodeId.Add(nodePlanStep.Id, nodePlanStep.FallbackQuery.Id);
}
}
+ }
- return updatedPlanSteps;
+ private static void BuildExecutionNodes(
+ ImmutableList planSteps,
+ ExecutionPlanBuildContext ctx,
+ ISchemaDefinition schema,
+ bool hasVariables)
+ {
+ var requiresUpload = schema.Types.TryGetType(UploadScalarName, out var uploadType) && uploadType.IsScalarType();
+ var readySteps = planSteps.Where(t => !ctx.DependenciesByStepId.ContainsKey(t.Id)).ToList();
+ var variableBuffer = hasVariables ? new List() : null;
- bool IsEmptyOperation(OperationPlanStep step)
+ while (ctx.ProcessedStepIds.Count < planSteps.Count)
{
- if (step.Definition.SelectionSet.Selections.Count == 0)
+ foreach (var step in readySteps)
{
- return true;
+ if (!ctx.ProcessedStepIds.Add(step.Id))
+ {
+ continue;
+ }
+
+ if (step is OperationPlanStep operationStep)
+ {
+ ctx.ExecutionNodes.Add(step.Id,
+ CreateOperationExecutionNode(operationStep, schema, requiresUpload, variableBuffer));
+ }
+ else if (step is NodeFieldPlanStep nodeStep)
+ {
+ ctx.ExecutionNodes.Add(step.Id,
+ new NodeFieldExecutionNode(nodeStep.Id, nodeStep.ResponseName, nodeStep.IdValue, nodeStep.Conditions));
+ }
}
- return step.Definition.SelectionSet.Selections is
- [
- FieldNode
-#pragma warning disable format
+ readySteps.Clear();
+
+ foreach (var step in planSteps)
+ {
+ if (ctx.DependenciesByStepId.TryGetValue(step.Id, out var stepDependencies)
+ && ctx.ProcessedStepIds.IsSupersetOf(stepDependencies))
{
- Alias: null,
- Name.Value: IntrospectionFieldNames.TypeName,
- Directives: [{ Name.Value: "fusion__empty" }]
+ readySteps.Add(step);
}
-#pragma warning restore format
- ];
+ }
+
+ if (readySteps.Count == 0)
+ {
+ break;
+ }
}
+ }
+
+ private static OperationExecutionNode CreateOperationExecutionNode(
+ OperationPlanStep operationStep,
+ ISchemaDefinition schema,
+ bool requiresUpload,
+ List? variableBuffer)
+ {
+ var requirements = operationStep.Requirements.IsEmpty
+ ? Array.Empty()
+ : operationStep.Requirements.OrderBy(t => t.Key).Select(t => t.Value).ToArray();
- OperationPlanStep RemoveEmptySelectionSets(OperationPlanStep step)
+ var forwardedVariables = Array.Empty();
+
+ if (variableBuffer is not null && operationStep.Definition.VariableDefinitions.Count > 0)
{
- var updatedDefinition = RemoveEmptySelections(step.Definition);
- return ReferenceEquals(updatedDefinition, step.Definition)
- ? step
- : step with { Definition = updatedDefinition };
+ variableBuffer.Clear();
+ var requirementKeys = new HashSet(requirements.Select(r => r.Key));
+
+ foreach (var variableDef in operationStep.Definition.VariableDefinitions)
+ {
+ var name = variableDef.Variable.Name.Value;
+
+ if (!requirementKeys.Contains(name))
+ {
+ variableBuffer.Add(name);
+ }
+ }
+
+ if (variableBuffer.Count > 0)
+ {
+ forwardedVariables = variableBuffer.ToArray();
+ }
+ }
+
+ var requiresFileUpload = requiresUpload
+ && DoVariablesContainUploadScalar(operationStep.Definition.VariableDefinitions, schema);
+
+ var operation = RemoveEmptyTypeNames(operationStep.Definition);
+ var operationSource = operation.ToSourceText();
+
+ var selectionSetNode = GetSelectionSetNodeFromPath(operationStep.Definition, operationStep.Source);
+ selectionSetNode = PruneNonValueTypeChildren(selectionSetNode, operationStep.Type, schema);
+ var resultSelectionSet = ResultSelectionSet.Create(selectionSetNode, schema);
+
+ return new OperationExecutionNode(
+ operationStep.Id,
+ operationSource,
+ operationStep.SchemaName,
+ operationStep.Target,
+ operationStep.Source,
+ requirements,
+ forwardedVariables,
+ resultSelectionSet,
+ operationStep.Conditions,
+ requiresFileUpload);
+ }
+
+ private static void MergeAndBatchOperations(
+ ExecutionPlanBuildContext ctx,
+ bool enableRequestGrouping)
+ {
+ var nodeFieldBoundCache = new Dictionary();
+ var mergeResults = MergeStructurallyIdenticalOperations(ctx, nodeFieldBoundCache);
+
+ // Capture each node's dependency identifiers now, because the batching
+ // step below will rewrite the dependency lookup as it merges nodes.
+ var originalDependencies = new Dictionary(ctx.DependenciesByStepId.Count);
+
+ foreach (var (nodeId, dependencies) in ctx.DependenciesByStepId)
+ {
+ originalDependencies[nodeId] = dependencies.ToArray();
}
- OperationPlanStep AddVariableDefinitions(OperationPlanStep step)
+ var perOperationDependencies = GroupBySchemaAndDepthIntoBatches(
+ ctx, nodeFieldBoundCache, mergeResults, originalDependencies, enableRequestGrouping);
+
+ WrapRemainingMergedOperations(ctx, mergeResults, perOperationDependencies, originalDependencies);
+ WirePerOperationDependencies(ctx, perOperationDependencies);
+ }
+
+ ///
+ /// Finds query operations that are structurally identical and merges
+ /// them into a single . This
+ /// reduces the number of network requests the executor has to send.
+ ///
+ private static Dictionary MergeStructurallyIdenticalOperations(
+ ExecutionPlanBuildContext ctx,
+ Dictionary nodeFieldBoundCache)
+ {
+ var candidates = new Dictionary>(StringComparer.Ordinal);
+
+ foreach (var node in ctx.ExecutionNodes.Values.OfType())
{
- forwardVariableContext.Reset();
+ if (node.Operation.Type != OperationType.Query)
+ {
+ continue;
+ }
- foreach (var (key, requirement) in step.Requirements.OrderBy(t => t.Key))
+ if (IsNodeFieldBound(node.Id, ctx, nodeFieldBoundCache))
{
- forwardVariableContext.Requirements[key] =
- new VariableDefinitionNode(
- null,
- new VariableNode(null, new NameNode(key)),
- description: null,
- requirement.Type,
- null,
- []);
+ continue;
}
- var rewrittenNode = s_forwardVariableRewriter.Rewrite(step.Definition, forwardVariableContext);
+ var signature = ComputeCanonicalSignature(node);
- if (rewrittenNode is OperationDefinitionNode rewrittenOperationNode
- && !ReferenceEquals(rewrittenOperationNode, step.Definition))
+ if (!candidates.TryGetValue(signature, out var list))
+ {
+ list = [];
+ candidates[signature] = list;
+ }
+
+ list.Add(node);
+ }
+
+ var mergeResults = new Dictionary();
+
+ foreach (var (_, equivalentNodes) in candidates)
+ {
+ if (equivalentNodes.Count <= 1)
+ {
+ continue;
+ }
+
+ foreach (var group in PartitionIntoMergeableGroups(equivalentNodes, ctx.DependenciesByStepId))
+ {
+ if (group.Count <= 1)
+ {
+ continue;
+ }
+
+ group.Sort((a, b) => a.Id.CompareTo(b.Id));
+
+ var primary = group[0];
+ var (canonicalOp, canonicalRequirements) = CanonicalizeOperation(primary);
+ var targets = new SelectionPath[group.Count];
+
+ for (var i = 0; i < group.Count; i++)
+ {
+ targets[i] = group[i].Target;
+ }
+
+ mergeResults[primary.Id] = new MergeResult(
+ targets, canonicalOp, canonicalRequirements, primary);
+
+ AbsorbMergedNodes(ctx, primary.Id, group);
+ }
+ }
+
+ return mergeResults;
+ }
+
+ ///
+ /// Removes merged nodes from the execution graph and folds their
+ /// dependencies into the primary node that represents them all.
+ ///
+ private static void AbsorbMergedNodes(
+ ExecutionPlanBuildContext ctx,
+ int primaryId,
+ List group)
+ {
+ var absorbedIds = new HashSet(group.Count - 1);
+
+ if (!ctx.DependenciesByStepId.TryGetValue(primaryId, out var primaryDeps))
+ {
+ primaryDeps = [];
+ }
+
+ for (var i = 1; i < group.Count; i++)
+ {
+ var otherId = group[i].Id;
+ absorbedIds.Add(otherId);
+ ctx.ExecutionNodes.Remove(otherId);
+
+ if (ctx.DependenciesByStepId.TryGetValue(otherId, out var otherDependencies))
+ {
+ foreach (var dependency in otherDependencies)
+ {
+ primaryDeps.Add(dependency);
+ }
+
+ ctx.DependenciesByStepId.Remove(otherId);
+ }
+ }
+
+ if (primaryDeps.Count > 0)
+ {
+ ctx.DependenciesByStepId[primaryId] = primaryDeps;
+ }
+ else
+ {
+ ctx.DependenciesByStepId.Remove(primaryId);
+ }
+
+ RedirectDependencyReferences(ctx.DependenciesByStepId, absorbedIds, primaryId);
+ }
+
+ ///
+ /// Groups query nodes by their target schema and dependency depth into
+ /// batch execution nodes. Nodes at the same depth targeting the same
+ /// source schema are independent of each other, so the executor can
+ /// send them together in a single batched network request.
+ ///
+ private static Dictionary>
+ GroupBySchemaAndDepthIntoBatches(
+ ExecutionPlanBuildContext ctx,
+ Dictionary nodeFieldBoundCache,
+ Dictionary mergeResults,
+ Dictionary originalDependencies,
+ bool enableRequestGrouping)
+ {
+ var consumedMergeIds = new HashSet();
+ var perOperationDependencies = new Dictionary>();
+
+ if (!enableRequestGrouping)
+ {
+ return perOperationDependencies;
+ }
+
+ var queryNodes = ctx.ExecutionNodes.Values
+ .OfType()
+ .Where(n => n.Operation.Type == OperationType.Query)
+ .Where(n => !IsNodeFieldBound(n.Id, ctx, nodeFieldBoundCache))
+ .ToList();
+
+ var depthLookup = new Dictionary();
+ var recursionStack = new HashSet();
+
+ foreach (var node in queryNodes)
+ {
+ GetDependencyDepth(node.Id, ctx.DependenciesByStepId, depthLookup, recursionStack);
+ }
+
+ var batchGroups = new Dictionary<(string schema, int depth), List>();
+
+ foreach (var node in queryNodes)
+ {
+ var schemaKey = node.SchemaName ?? DynamicSchemaNameMarker;
+ var depth = depthLookup.TryGetValue(node.Id, out var d) ? d : 0;
+ var key = (schemaKey, depth);
+
+ if (!batchGroups.TryGetValue(key, out var group))
+ {
+ group = [];
+ batchGroups[key] = group;
+ }
+
+ group.Add(node);
+ }
+
+ // Process from shallowest to deepest so that deeper groups
+ // reference the already-redirected identifiers from earlier merges.
+ foreach (var (_, groupMembers) in batchGroups.OrderBy(t => t.Key.depth))
+ {
+ if (groupMembers.Count <= 1)
+ {
+ continue;
+ }
+
+ groupMembers.Sort((a, b) => a.Id.CompareTo(b.Id));
+
+ var operations = new List();
+
+ foreach (var member in groupMembers)
+ {
+ if (mergeResults.TryGetValue(member.Id, out var merge))
+ {
+ consumedMergeIds.Add(member.Id);
+ operations.Add(CreateBatchOperationDefinition(merge));
+ }
+ else
+ {
+ operations.Add(CreateSingleOperationDefinition(member));
+ }
+ }
+
+ var lowestId = groupMembers[0].Id;
+ var batchNode = new OperationBatchExecutionNode(lowestId, operations.ToArray());
+
+ // Save each member's dependencies before replacing the individual
+ // nodes, because the replacement will remove them from the lookup.
+ var memberDependencies = new Dictionary();
+
+ foreach (var member in groupMembers)
+ {
+ if (originalDependencies.TryGetValue(member.Id, out var memberDeps))
+ {
+ memberDependencies[member.Id] = memberDeps;
+ }
+ }
+
+ ReplaceMembersWithBatchNode(ctx, groupMembers, batchNode, lowestId);
+ perOperationDependencies[batchNode] = memberDependencies;
+ }
+
+ // Remove consumed merge results so the caller knows which ones still
+ // need to be wrapped as standalone batch nodes.
+ foreach (var id in consumedMergeIds)
+ {
+ mergeResults.Remove(id);
+ }
+
+ return perOperationDependencies;
+ }
+
+ ///
+ /// Wraps merged operations that were not included in any multi-member
+ /// batch group into standalone batch execution nodes.
+ ///
+ private static void WrapRemainingMergedOperations(
+ ExecutionPlanBuildContext ctx,
+ Dictionary remainingMerges,
+ Dictionary> perOperationDependencies,
+ Dictionary originalDependencies)
+ {
+ foreach (var (primaryId, merge) in remainingMerges)
+ {
+ var operationDefinition = CreateBatchOperationDefinition(merge);
+ var standaloneBatchNode = new OperationBatchExecutionNode(primaryId, [operationDefinition]);
+ ctx.ExecutionNodes[primaryId] = standaloneBatchNode;
+
+ perOperationDependencies[standaloneBatchNode] =
+ new Dictionary
+ {
+ [operationDefinition.Id] = originalDependencies.TryGetValue(primaryId, out var primaryDeps)
+ ? primaryDeps
+ : []
+ };
+ }
+ }
+
+ ///
+ /// Connects each inner operation inside a batch node to the upstream
+ /// operation definitions it depends on. This gives the executor
+ /// fine-grained visibility into per-operation readiness.
+ ///
+ private static void WirePerOperationDependencies(
+ ExecutionPlanBuildContext ctx,
+ Dictionary> perOperationDependencies)
+ {
+ if (perOperationDependencies.Count == 0)
+ {
+ return;
+ }
+
+ var planNodeById = new Dictionary();
+
+ foreach (var node in ctx.ExecutionNodes.Values)
+ {
+ planNodeById[node.Id] = node;
+
+ if (node is OperationBatchExecutionNode batch)
+ {
+ foreach (var operation in batch.Operations)
+ {
+ planNodeById[operation.Id] = operation;
+ }
+ }
+ }
+
+ foreach (var (_, memberDependencies) in perOperationDependencies)
+ {
+ foreach (var (operationId, dependencyIds) in memberDependencies)
+ {
+ if (planNodeById.TryGetValue(operationId, out var operationNode)
+ && operationNode is OperationDefinition operationDefinition)
+ {
+ foreach (var dependencyId in dependencyIds)
+ {
+ if (planNodeById.TryGetValue(dependencyId, out var dependencyNode))
+ {
+ operationDefinition.AddDependency(dependencyNode);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ ///
+ /// Replaces individual member nodes in the execution graph with a single
+ /// batch node, merging all of their dependencies into the batch node.
+ ///
+ private static void ReplaceMembersWithBatchNode(
+ ExecutionPlanBuildContext ctx,
+ List members,
+ OperationBatchExecutionNode batchNode,
+ int batchNodeId)
+ {
+ var batchDependencies = new HashSet();
+ var memberIds = new HashSet(members.Count);
+
+ foreach (var member in members)
+ {
+ memberIds.Add(member.Id);
+ ctx.ExecutionNodes.Remove(member.Id);
+
+ if (ctx.DependenciesByStepId.TryGetValue(member.Id, out var memberDependencies))
+ {
+ foreach (var dependency in memberDependencies)
+ {
+ batchDependencies.Add(dependency);
+ }
+
+ ctx.DependenciesByStepId.Remove(member.Id);
+ }
+ }
+
+ ctx.ExecutionNodes[batchNodeId] = batchNode;
+
+ if (batchDependencies.Count > 0)
+ {
+ ctx.DependenciesByStepId[batchNodeId] = batchDependencies;
+ }
+
+ RedirectDependencyReferences(ctx.DependenciesByStepId, memberIds, batchNodeId);
+ }
+
+ private static BatchOperationDefinition CreateBatchOperationDefinition(MergeResult merge)
+ {
+ var primary = merge.Primary;
+ return new BatchOperationDefinition(
+ primary.Id,
+ merge.CanonicalOp,
+ primary.SchemaName,
+ merge.Targets,
+ primary.Source,
+ merge.CanonicalRequirements,
+ primary.ForwardedVariables.ToArray(),
+ primary.ResultSelectionSet,
+ primary.Conditions.ToArray(),
+ primary.RequiresFileUpload);
+ }
+
+ private static SingleOperationDefinition CreateSingleOperationDefinition(OperationExecutionNode member)
+ {
+ return new SingleOperationDefinition(
+ member.Id,
+ member.Operation,
+ member.SchemaName,
+ member.Target,
+ member.Source,
+ member.Requirements.ToArray(),
+ member.ForwardedVariables.ToArray(),
+ member.ResultSelectionSet,
+ member.Conditions.ToArray(),
+ member.RequiresFileUpload);
+ }
+
+ ///
+ /// Rewrites the dependency graph so that every reference to any of
+ /// points to instead.
+ ///
+ private static void RedirectDependencyReferences(
+ Dictionary> dependenciesByStepId,
+ HashSet oldIds,
+ int newId)
+ {
+ foreach (var depSet in dependenciesByStepId.Values)
+ {
+ var hadOld = false;
+
+ foreach (var oldId in oldIds)
+ {
+ if (depSet.Remove(oldId))
+ {
+ hadOld = true;
+ }
+ }
+
+ if (hadOld)
+ {
+ depSet.Add(newId);
+ }
+ }
+ }
+
+ ///
+ /// Checks whether a node is transitively dependent on a
+ /// . Operations beneath a node-field
+ /// dispatch must keep their original identifiers because the dispatch
+ /// logic references them by identifier to select the correct branch.
+ ///
+ private static bool IsNodeFieldBound(
+ int nodeId,
+ ExecutionPlanBuildContext ctx,
+ Dictionary cache)
+ {
+ if (cache.TryGetValue(nodeId, out var cached))
+ {
+ return cached;
+ }
+
+ if (!ctx.DependenciesByStepId.TryGetValue(nodeId, out var dependencies) || dependencies.Count == 0)
+ {
+ cache[nodeId] = false;
+ return false;
+ }
+
+ foreach (var dependencyId in dependencies)
+ {
+ if (ctx.ExecutionNodes.TryGetValue(dependencyId, out var dependencyNode)
+ && dependencyNode is NodeFieldExecutionNode)
{
- return step with { Definition = rewrittenOperationNode };
+ cache[nodeId] = true;
+ return true;
}
- return step;
+ if (IsNodeFieldBound(dependencyId, ctx, cache))
+ {
+ cache[nodeId] = true;
+ return true;
+ }
}
+
+ cache[nodeId] = false;
+ return false;
}
- private static void BuildExecutionNodes(
- ImmutableList planSteps,
- HashSet completedSteps,
- Dictionary completedNodes,
- Dictionary> dependencyLookup,
- ISchemaDefinition schema,
- bool enableRequestGrouping,
- bool hasVariables)
+ private static void WireExecutionDependencies(ExecutionPlanBuildContext ctx)
+ {
+ WireOperationDependencies(ctx);
+ WireNodeFieldBranchesAndFallbacks(ctx);
+ }
+
+ private static void WireOperationDependencies(ExecutionPlanBuildContext ctx)
{
- var hasUploadScalar =
- schema.Types.TryGetType(UploadScalarName, out var uploadType) && uploadType.IsScalarType();
- var batchingGroupLookup = CreateBatchingGroupLookup(
- planSteps,
- dependencyLookup,
- enableRequestGrouping);
- var readySteps = planSteps.Where(t => !dependencyLookup.ContainsKey(t.Id)).ToList();
- List? variables = null;
+ // Build a lookup from every operation identifier to its containing
+ // execution node. A batch node wraps several operations, so each
+ // inner operation identifier also maps back to the parent batch node.
+ var executionNodeById = new Dictionary();
- while (completedSteps.Count < planSteps.Count)
+ foreach (var node in ctx.ExecutionNodes.Values)
{
- foreach (var step in readySteps)
+ executionNodeById[node.Id] = node;
+
+ if (node is OperationBatchExecutionNode batch)
{
- if (!completedSteps.Add(step.Id))
+ foreach (var operation in batch.Operations)
{
- continue;
+ executionNodeById[operation.Id] = batch;
}
+ }
+ }
- if (step is OperationPlanStep operationStep)
- {
- var requirements = Array.Empty();
+ foreach (var (nodeId, stepDependencies) in ctx.DependenciesByStepId)
+ {
+ if (!ctx.ExecutionNodes.TryGetValue(nodeId, out var entry)
+ || entry is not (OperationExecutionNode or OperationBatchExecutionNode))
+ {
+ continue;
+ }
- if (!operationStep.Requirements.IsEmpty)
- {
- var temp = new List();
+ if (entry is OperationBatchExecutionNode batchEntry)
+ {
+ WireBatchNodeDependencies(batchEntry, stepDependencies, executionNodeById);
+ continue;
+ }
- foreach (var (_, requirement) in operationStep.Requirements.OrderBy(t => t.Key))
- {
- temp.Add(requirement);
- }
+ // For a standalone operation node, wire dependencies directly.
+ foreach (var dependencyId in stepDependencies)
+ {
+ if (!ctx.ExecutionNodes.TryGetValue(dependencyId, out var childEntry)
+ || childEntry is not (OperationExecutionNode or OperationBatchExecutionNode or NodeFieldExecutionNode))
+ {
+ continue;
+ }
- requirements = temp.ToArray();
- }
+ childEntry.AddDependent(entry);
+ entry.AddDependency(childEntry);
+ }
+ }
+ }
- variables?.Clear();
+ private static void WireBatchNodeDependencies(
+ OperationBatchExecutionNode batchEntry,
+ HashSet stepDependencies,
+ Dictionary executionNodeById)
+ {
+ var seenExecutionDependencies = new HashSet();
- if (hasVariables && operationStep.Definition.VariableDefinitions.Count > 0)
- {
- variables ??= [];
+ foreach (var dependencyId in stepDependencies)
+ {
+ if (dependencyId == batchEntry.Id)
+ {
+ continue;
+ }
- foreach (var variableDef in operationStep.Definition.VariableDefinitions)
- {
- if (requirements.Any(r => r.Key == variableDef.Variable.Name.Value))
- {
- continue;
- }
+ if (!executionNodeById.TryGetValue(dependencyId, out var dependencyExecutionNode)
+ || dependencyExecutionNode.Id == batchEntry.Id)
+ {
+ continue;
+ }
- variables.Add(variableDef.Variable.Name.Value);
- }
- }
+ if (!seenExecutionDependencies.Add(dependencyExecutionNode.Id))
+ {
+ continue;
+ }
- var requiresFileUpload = hasUploadScalar
- && DoVariablesContainUploadScalar(operationStep.Definition.VariableDefinitions, schema);
-
- var operation = RemoveEmptyTypeNames(operationStep.Definition);
- var operationSource = operation.ToSourceText();
- int? batchingGroupId = batchingGroupLookup.TryGetValue(step.Id, out var groupId) ? groupId : null;
-
- var selectionSetNode = GetSelectionSetNodeFromPath(operationStep.Definition, operationStep.Source);
- selectionSetNode = PruneNonValueTypeChildren(selectionSetNode, operationStep.Type, schema);
- var resultSelectionSet = ResultSelectionSet.Create(selectionSetNode, schema);
-
- var node = new OperationExecutionNode(
- operationStep.Id,
- operationSource,
- operationStep.SchemaName,
- operationStep.Target,
- operationStep.Source,
- requirements,
- variables?.Count > 0 ? variables.ToArray() : [],
- resultSelectionSet,
- operationStep.Conditions,
- batchingGroupId,
- requiresFileUpload);
-
- completedNodes.Add(step.Id, node);
- }
- else if (step is NodeFieldPlanStep nodeStep)
- {
- var node = new NodeFieldExecutionNode(
- nodeStep.Id,
- nodeStep.ResponseName,
- nodeStep.IdValue,
- nodeStep.Conditions);
+ dependencyExecutionNode.AddDependent(batchEntry);
- completedNodes.Add(step.Id, node);
- }
+ // When a batch holds multiple operations, or a single batch
+ // operation definition with multiple targets, the dependency is
+ // optional. The executor evaluates each operation individually
+ // and only waits for the specific upstream results it needs.
+ if (batchEntry.Operations.Length > 1
+ || batchEntry.Operations[0] is BatchOperationDefinition)
+ {
+ batchEntry.AddOptionalDependency(dependencyExecutionNode);
+ }
+ else
+ {
+ batchEntry.AddDependency(dependencyExecutionNode);
}
+ }
+ }
- readySteps.Clear();
+ private static void WireNodeFieldBranchesAndFallbacks(ExecutionPlanBuildContext ctx)
+ {
+ foreach (var (nodeId, branches) in ctx.BranchesByNodeId)
+ {
+ if (!ctx.ExecutionNodes.TryGetValue(nodeId, out var entry) || entry is not NodeFieldExecutionNode node)
+ {
+ continue;
+ }
- foreach (var step in planSteps)
+ foreach (var (typeName, branchNodeId) in branches)
{
- if (dependencyLookup.TryGetValue(step.Id, out var stepDependencies)
- && completedSteps.IsSupersetOf(stepDependencies))
+ if (ctx.ExecutionNodes.TryGetValue(branchNodeId, out var branchNode))
{
- readySteps.Add(step);
+ node.AddBranch(typeName, branchNode);
}
}
+ }
- if (readySteps.Count == 0)
+ foreach (var (nodeId, fallbackNodeId) in ctx.FallbackByNodeId)
+ {
+ if (!ctx.ExecutionNodes.TryGetValue(nodeId, out var entry) || entry is not NodeFieldExecutionNode node)
{
- break;
+ continue;
+ }
+
+ if (ctx.ExecutionNodes.TryGetValue(fallbackNodeId, out var fallbackNode))
+ {
+ node.AddFallbackQuery(fallbackNode);
}
}
}
@@ -453,18 +1030,18 @@ internal static Dictionary CreateBatchingGroupLookup(
private static int GetDependencyDepth(
int stepId,
- Dictionary> dependencyLookup,
- Dictionary dependencyDepthLookup,
+ Dictionary> dependenciesByStepId,
+ Dictionary depthLookup,
HashSet recursionStack)
{
- if (dependencyDepthLookup.TryGetValue(stepId, out var depth))
+ if (depthLookup.TryGetValue(stepId, out var depth))
{
return depth;
}
- if (!dependencyLookup.TryGetValue(stepId, out var directDependencies) || directDependencies.Count == 0)
+ if (!dependenciesByStepId.TryGetValue(stepId, out var directDependencies) || directDependencies.Count == 0)
{
- dependencyDepthLookup[stepId] = 0;
+ depthLookup[stepId] = 0;
return 0;
}
@@ -479,225 +1056,25 @@ private static int GetDependencyDepth(
{
var dependencyDepth = GetDependencyDepth(
dependency,
- dependencyLookup,
- dependencyDepthLookup,
+ dependenciesByStepId,
+ depthLookup,
recursionStack);
maxDepth = Math.Max(maxDepth, dependencyDepth + 1);
}
recursionStack.Remove(stepId);
- dependencyDepthLookup[stepId] = maxDepth;
+ depthLookup[stepId] = maxDepth;
return maxDepth;
}
- private static void BuildDependencyStructure(
- Dictionary completedNodes,
- Dictionary> dependencyLookup,
- Dictionary> branchesLookup,
- Dictionary fallbackLookup)
- {
- foreach (var (nodeId, stepDependencies) in dependencyLookup)
- {
- if (!completedNodes.TryGetValue(nodeId, out var entry)
- || entry is not (OperationExecutionNode or OperationBatchExecutionNode))
- {
- continue;
- }
-
- foreach (var dependencyId in stepDependencies)
- {
- if (!completedNodes.TryGetValue(dependencyId, out var childEntry)
- || childEntry is not (OperationExecutionNode or OperationBatchExecutionNode or NodeFieldExecutionNode))
- {
- continue;
- }
-
- childEntry.AddDependent(entry);
- entry.AddDependency(childEntry);
- }
- }
-
- foreach (var (nodeId, branches) in branchesLookup)
- {
- if (!completedNodes.TryGetValue(nodeId, out var entry) || entry is not NodeFieldExecutionNode node)
- {
- continue;
- }
-
- foreach (var (typeName, branchNodeId) in branches)
- {
- if (!completedNodes.TryGetValue(branchNodeId, out var branchNode))
- {
- continue;
- }
-
- node.AddBranch(typeName, branchNode);
- }
- }
-
- foreach (var (nodeId, fallbackNodeId) in fallbackLookup)
- {
- if (!completedNodes.TryGetValue(nodeId, out var entry) || entry is not NodeFieldExecutionNode node)
- {
- continue;
- }
-
- if (!completedNodes.TryGetValue(fallbackNodeId, out var fallbackNode))
- {
- continue;
- }
-
- node.AddFallbackQuery(fallbackNode);
- }
- }
-
- private static void MergeEquivalentOperationNodes(
- Dictionary completedNodes,
- Dictionary> dependencyLookup)
- {
- // We group OperationExecutionNodes by (schemaName, sortedDependencies).
- // Nodes must have identical dependency sets to be safely mergeable.
- //
- // A node with different dependencies may be gated behind a conditional branch
- // (e.g. NodeField inline-fragment dispatch) that never fires for certain entity types,
- // so merging them would create a node whose dependency union can never be fully satisfied,
- // possibly causing a deadlock.
- var candidates = new Dictionary<(string schema, string deps), List>();
-
- foreach (var node in completedNodes.Values.OfType())
- {
- var schemaKey = node.SchemaName ?? DynamicSchemaNameMarker;
- var depsKey = dependencyLookup.TryGetValue(node.Id, out var depsSet)
- ? string.Join(",", depsSet.Order())
- : string.Empty;
- var groupKey = (schemaKey, depsKey);
-
- if (!candidates.TryGetValue(groupKey, out var list))
- {
- list = [];
- candidates[groupKey] = list;
- }
-
- list.Add(node);
- }
-
- // Within each bucket, find sub-groups with identical canonical signatures and merge them.
- foreach (var (_, groupNodes) in candidates)
- {
- if (groupNodes.Count <= 1)
- {
- continue;
- }
-
- var bySignature = new Dictionary>(StringComparer.Ordinal);
-
- foreach (var node in groupNodes)
- {
- var sig = ComputeCanonicalSignature(node);
-
- if (!bySignature.TryGetValue(sig, out var sigGroup))
- {
- sigGroup = [];
- bySignature[sig] = sigGroup;
- }
-
- sigGroup.Add(node);
- }
-
- foreach (var (_, equivalentNodes) in bySignature)
- {
- if (equivalentNodes.Count <= 1)
- {
- continue;
- }
-
- // Stable order: lowest ID becomes the canonical node.
- equivalentNodes.Sort((a, b) => a.Id.CompareTo(b.Id));
-
- var primary = equivalentNodes[0];
- var otherIds = equivalentNodes.Skip(1).Select(n => n.Id).ToList();
-
- var (canonicalOp, canonicalRequirements) = CanonicalizeOperation(primary);
- var targets = equivalentNodes.Select(n => n.Target).ToArray();
-
- var mergedNode = new OperationBatchExecutionNode(
- primary.Id,
- canonicalOp,
- primary.SchemaName,
- targets,
- primary.Source,
- canonicalRequirements,
- primary.ForwardedVariables.ToArray(),
- primary.ResultSelectionSet,
- primary.Conditions.ToArray(),
- primary.BatchingGroupId,
- primary.RequiresFileUpload);
-
- completedNodes[primary.Id] = mergedNode;
-
- foreach (var otherId in otherIds)
- {
- completedNodes.Remove(otherId);
- }
-
- // Union all dependency sets under the primary ID.
- if (!dependencyLookup.TryGetValue(primary.Id, out var primaryDeps))
- {
- primaryDeps = [];
- }
-
- foreach (var otherId in otherIds)
- {
- if (dependencyLookup.TryGetValue(otherId, out var otherDeps))
- {
- foreach (var dep in otherDeps)
- {
- primaryDeps.Add(dep);
- }
-
- dependencyLookup.Remove(otherId);
- }
- }
-
- if (primaryDeps.Count > 0)
- {
- dependencyLookup[primary.Id] = primaryDeps;
- }
- else
- {
- dependencyLookup.Remove(primary.Id);
- }
-
- // Replace all references to the removed IDs with the primary ID.
- var otherIdSet = new HashSet(otherIds);
-
- foreach (var depSet in dependencyLookup.Values)
- {
- var hadOther = false;
-
- foreach (var otherId in otherIdSet)
- {
- if (depSet.Remove(otherId))
- {
- hadOther = true;
- }
- }
-
- if (hadOther)
- {
- depSet.Add(primary.Id);
- }
- }
- }
- }
- }
-
private static string ComputeCanonicalSignature(OperationExecutionNode node)
{
var replacements = BuildPrefixReplacements(node.Requirements);
var normalizedText = ApplyPrefixReplacements(node.Operation.SourceText, replacements);
- // Skip the first line — it contains the operation name which embeds the step ID.
+ // The first line contains the operation name, which embeds a
+ // step-specific identifier. We skip it so that two operations
+ // with the same structure produce the same signature.
var firstNewline = normalizedText.IndexOf('\n');
var bodyText = firstNewline >= 0 ? normalizedText[(firstNewline + 1)..] : normalizedText;
@@ -711,19 +1088,13 @@ private static string ComputeCanonicalSignature(OperationExecutionNode node)
private static (OperationSourceText operation, OperationRequirement[] requirements) CanonicalizeOperation(
OperationExecutionNode node)
{
- // Use the primary node's operation and requirements as-is.
- // The primary has the lowest ID (and therefore the lowest __fusion_{N}_ prefix numbers),
- // which preserves the globally-unique numbering assigned by the planner.
- // ComputeCanonicalSignature normalises prefixes only for equivalence comparison,
- // but the actual merged operation must keep the original numbers.
return (node.Operation, node.Requirements.ToArray());
}
///
- /// Builds a list of (original, canonical) string pairs for normalizing
- /// __fusion_{N}_ variable-name prefixes. Prefixes are sorted
- /// deterministically by the alphabetically-joined set of their argument names
- /// so that structurally identical operations always produce the same mapping.
+ /// Builds replacement pairs that normalize step-specific
+ /// __fusion_{N}_ variable name prefixes into a canonical
+ /// form, so structurally identical operations produce matching text.
///
private static (string original, string canonical)[] BuildPrefixReplacements(
ReadOnlySpan requirements)
@@ -779,6 +1150,90 @@ private static string ApplyPrefixReplacements(
return text;
}
+ ///
+ /// Partitions structurally identical operations into groups that can
+ /// each be safely merged. Two operations cannot share a group if one
+ /// transitively depends on the other, because merging them would
+ /// create a cycle in the dependency graph.
+ ///
+ private static List> PartitionIntoMergeableGroups(
+ List candidates,
+ Dictionary> dependenciesByStepId)
+ {
+ var groups = new List>();
+ var visited = new HashSet();
+
+ foreach (var candidate in candidates)
+ {
+ var placed = false;
+
+ foreach (var group in groups)
+ {
+ var canJoin = true;
+
+ foreach (var existing in group)
+ {
+ visited.Clear();
+
+ if (IsTransitivelyReachable(candidate.Id, existing.Id, dependenciesByStepId, visited))
+ {
+ canJoin = false;
+ break;
+ }
+
+ visited.Clear();
+
+ if (IsTransitivelyReachable(existing.Id, candidate.Id, dependenciesByStepId, visited))
+ {
+ canJoin = false;
+ break;
+ }
+ }
+
+ if (canJoin)
+ {
+ group.Add(candidate);
+ placed = true;
+ break;
+ }
+ }
+
+ if (!placed)
+ {
+ groups.Add([candidate]);
+ }
+ }
+
+ return groups;
+ }
+
+ private static bool IsTransitivelyReachable(
+ int fromId,
+ int targetId,
+ Dictionary> dependenciesByStepId,
+ HashSet visited)
+ {
+ if (!dependenciesByStepId.TryGetValue(fromId, out var dependencies))
+ {
+ return false;
+ }
+
+ foreach (var dependency in dependencies)
+ {
+ if (dependency == targetId)
+ {
+ return true;
+ }
+
+ if (visited.Add(dependency) && IsTransitivelyReachable(dependency, targetId, dependenciesByStepId, visited))
+ {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
private static SelectionSetNode GetSelectionSetNodeFromPath(
OperationDefinitionNode operationDefinition,
SelectionPath path)
@@ -829,9 +1284,9 @@ private static SelectionSetNode GetSelectionSetNodeFromPath(
}
///
- /// Strips child selection sets from fields whose return type is not a value type.
- /// This allows to only build the tree along value-type paths,
- /// reducing memory for the common case where most fields are not value types.
+ /// Strips child selection sets from fields whose return type is not a
+ /// value type. Only value-type subtrees are relevant for the result
+ /// selection set; the rest are resolved by separate execution nodes.
///
private static SelectionSetNode PruneNonValueTypeChildren(
SelectionSetNode selectionSet,
@@ -862,7 +1317,6 @@ private static SelectionSetNode PruneNonValueTypeChildren(
if (fieldNamedType is FusionComplexTypeDefinition { IsValueType: true } valueType)
{
- // Recurse into value type children to prune their non-value-type descendants.
var pruned = PruneNonValueTypeChildren(field.SelectionSet, valueType, schema);
if (!ReferenceEquals(pruned, field.SelectionSet))
@@ -875,7 +1329,6 @@ private static SelectionSetNode PruneNonValueTypeChildren(
}
else
{
- // Not a value type — strip the child selection set.
selections[i] = new FieldNode(
field.Name, field.Alias, field.Directives, field.Arguments, null);
changed = true;
@@ -966,8 +1419,6 @@ private static bool DoVariablesContainUploadScalar(
private static OperationDefinitionNode RemoveEmptySelections(OperationDefinitionNode operationDefinition)
{
- // Remove fields/fragments whose selection sets collapsed to `{}` during rewriting.
- // This is local cleanup and intentionally does not remove the whole operation node.
return SyntaxRewriter.Create(
rewrite: node =>
{
@@ -1073,8 +1524,10 @@ private static OperationDefinitionNode RemoveEmptyTypeNames(OperationDefinitionN
}
///
- /// Pulls out conditions around the root selection set or the selection set below a lookup field,
- /// and adds them as conditions to .
+ /// Extracts @skip and @include directives from every selection in the
+ /// root selection set (or beneath a lookup field) and promotes them to
+ /// node-level conditions on the plan step. This allows the executor to
+ /// evaluate the conditions once and skip the entire request if needed.
///
private static OperationPlanStep ExtractConditionsAndRewriteSelectionSet(OperationPlanStep step)
{
@@ -1114,8 +1567,6 @@ private static OperationPlanStep ExtractConditionsAndRewriteSelectionSet(Operati
newOperation = step.Definition.WithSelectionSet(newRootSelectionSet);
}
- // Merge extracted conditions with any conditions already propagated from work items,
- // deduplicating by value equality.
var mergedConditions = context.Conditions;
foreach (var existing in step.Conditions)
@@ -1206,10 +1657,25 @@ private static SelectionSetNode RewriteConditionalSelectionSet(
return new SelectionSetNode(selections);
}
+ private sealed class ExecutionPlanBuildContext
+ {
+ public HashSet ProcessedStepIds { get; } = [];
+ public Dictionary ExecutionNodes { get; } = [];
+ public Dictionary> DependenciesByStepId { get; } = [];
+ public Dictionary> BranchesByNodeId { get; } = [];
+ public Dictionary FallbackByNodeId { get; } = [];
+ }
+
private sealed class ConditionalSelectionSetRewriterContext
{
public HashSet Conditions { get; } = [];
}
+
+ private readonly record struct MergeResult(
+ SelectionPath[] Targets,
+ OperationSourceText CanonicalOp,
+ OperationRequirement[] CanonicalRequirements,
+ OperationExecutionNode Primary);
}
file static class Extensions
@@ -1217,13 +1683,10 @@ file static class Extensions
private static readonly Encoding s_encoding = Encoding.UTF8;
///
- /// Checks if an entire selection set, either on the root or below
- /// a lookup field, is conditional.
+ /// Returns when every selection in the relevant
+ /// selection set carries a @skip or @include directive, meaning the
+ /// entire operation is conditional and can potentially be skipped.
///
- ///
- /// true, if all provided selections on either the root
- /// or below a lookup field are conditional, otherwise false.
- ///
public static bool AreAllProvidedSelectionsConditional(this OperationPlanStep step)
{
var selectionSetNode = step.Definition.SelectionSet;
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Properties/FusionExecutionResources.Designer.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Properties/FusionExecutionResources.Designer.cs
index d276e60b333..615d774f6c1 100644
--- a/src/HotChocolate/Fusion/src/Fusion.Execution/Properties/FusionExecutionResources.Designer.cs
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Properties/FusionExecutionResources.Designer.cs
@@ -123,6 +123,12 @@ internal static string SourceSchemaHttpClient_InvalidVariableIndex {
}
}
+ internal static string SourceSchemaHttpClient_VariableIndexOutOfRange {
+ get {
+ return ResourceManager.GetString("SourceSchemaHttpClient_VariableIndexOutOfRange", resourceCulture);
+ }
+ }
+
internal static string SourceSchemaHttpClient_NoResultForNode {
get {
return ResourceManager.GetString("SourceSchemaHttpClient_NoResultForNode", resourceCulture);
@@ -152,5 +158,29 @@ internal static string SourceSchemaRequestDispatcher_BatchResponseCountMismatch
return ResourceManager.GetString("SourceSchemaRequestDispatcher_BatchResponseCountMismatch", resourceCulture);
}
}
+
+ internal static string OperationPlan_NodeNotFound {
+ get {
+ return ResourceManager.GetString("OperationPlan_NodeNotFound", resourceCulture);
+ }
+ }
+
+ internal static string OperationBatchExecutionNode_MissingBatchResult {
+ get {
+ return ResourceManager.GetString("OperationBatchExecutionNode_MissingBatchResult", resourceCulture);
+ }
+ }
+
+ internal static string ExecutionNode_MissingBooleanVariable {
+ get {
+ return ResourceManager.GetString("ExecutionNode_MissingBooleanVariable", resourceCulture);
+ }
+ }
+
+ internal static string JsonOperationPlanParser_SingleOperationRequired {
+ get {
+ return ResourceManager.GetString("JsonOperationPlanParser_SingleOperationRequired", resourceCulture);
+ }
+ }
}
}
diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Properties/FusionExecutionResources.resx b/src/HotChocolate/Fusion/src/Fusion.Execution/Properties/FusionExecutionResources.resx
index a5c276b9ef4..ce9782f6f98 100644
--- a/src/HotChocolate/Fusion/src/Fusion.Execution/Properties/FusionExecutionResources.resx
+++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Properties/FusionExecutionResources.resx
@@ -57,6 +57,9 @@
The batch response contains an invalid variableIndex '{0}' for node '{1}'.
+
+ The batch response contains an out-of-range variableIndex '{0}'.
+
The batch response does not contain any result for node '{0}'.
@@ -72,4 +75,16 @@
The client did not return a response for each request in the batch.
+
+ No execution node with id '{0}' exists in this plan.
+
+
+ The batch response does not contain any result for operation '{0}'.
+
+
+ Expected to have a boolean value for variable '${0}'.
+
+
+ There must be exactly one operation definition in the operation document of the operation plan.
+
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/RequestGroupingExecutionTests.cs b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/RequestGroupingExecutionTests.cs
index 91a6d7f70e4..90e1555db0c 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/RequestGroupingExecutionTests.cs
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/RequestGroupingExecutionTests.cs
@@ -1,4 +1,6 @@
using System.Collections.Concurrent;
+using System.Net;
+using System.Text;
using System.Text.Json;
using HotChocolate.Transport.Http;
using HotChocolate.Types.Composite;
@@ -151,6 +153,136 @@ public async Task Execute_With_RequestGrouping_Enabled_Does_Not_Deadlock_Across_
Assert.Equal(JsonValueKind.Array, topProducts[0].GetProperty("reviews").ValueKind);
}
+ [Fact]
+ public async Task Execute_With_RequestGrouping_Enabled_When_Subgraph_Rejects_Request_Without_Indexes()
+ {
+ // arrange
+ using var serverA = CreateSourceSchema(
+ "a",
+ builder => builder.AddQueryType());
+
+ using var serverB = CreateSourceSchema(
+ "b",
+ """
+ schema {
+ query: Query
+ }
+
+ type Product @key(fields: "id") {
+ id: Int!
+ rating: Int!
+ }
+
+ type Query {
+ productById(id: Int!): Product @lookup @internal
+ }
+ """,
+ httpClient: new HttpClient(new RejectedBeforeExecutionHandler()));
+
+ using var serverC = CreateSourceSchema(
+ "c",
+ builder => builder.AddQueryType());
+
+ using var gateway = await CreateCompositeSchemaAsync(
+ [
+ ("a", serverA),
+ ("b", serverB),
+ ("c", serverC)
+ ],
+ configureGatewayBuilder: builder =>
+ builder.ModifyPlannerOptions(options => options.EnableRequestGrouping = true));
+
+ using var client = GraphQLHttpClient.Create(gateway.CreateClient());
+
+ // act
+ using var result = await client.PostAsync(
+ """
+ {
+ first {
+ id
+ rating
+ deliveryEstimate
+ }
+ second {
+ id
+ rating
+ deliveryEstimate
+ }
+ }
+ """,
+ new Uri("http://localhost:5000/graphql"));
+
+ using var response = await result.ReadAsResultAsync();
+
+ // assert
+ Assert.Equal(JsonValueKind.Array, response.Errors.ValueKind);
+ Assert.True(response.Errors.GetArrayLength() > 0);
+
+ var bInteractions = AssertSchemaInteractions(gateway.Interactions, "b");
+ Assert.Contains(
+ bInteractions.Values.SelectMany(interaction => interaction.Results),
+ result => result.Contains("Cannot query field", StringComparison.Ordinal));
+ }
+
+ [Fact]
+ public async Task Execute_When_Subgraph_Rejects_Variable_Batch_Without_VariableIndex()
+ {
+ // arrange
+ using var serverA = CreateSourceSchema(
+ "a",
+ builder => builder.AddQueryType());
+
+ using var serverB = CreateSourceSchema(
+ "b",
+ """
+ schema {
+ query: Query
+ }
+
+ type Product @key(fields: "id") {
+ id: Int!
+ rating: Int!
+ }
+
+ type Query {
+ productById(id: Int!): Product @lookup @internal
+ }
+ """,
+ httpClient: new HttpClient(new RejectedBeforeExecutionHandler()));
+
+ using var gateway = await CreateCompositeSchemaAsync(
+ [
+ ("a", serverA),
+ ("b", serverB)
+ ]);
+
+ using var client = GraphQLHttpClient.Create(gateway.CreateClient());
+
+ // act
+ using var result = await client.PostAsync(
+ """
+ {
+ products {
+ id
+ rating
+ }
+ }
+ """,
+ new Uri("http://localhost:5000/graphql"));
+
+ using var response = await result.ReadAsResultAsync();
+
+ // assert
+ Assert.Equal(JsonValueKind.Array, response.Errors.ValueKind);
+ Assert.True(response.Errors.GetArrayLength() > 0);
+
+ var bInteractions = AssertSchemaInteractions(gateway.Interactions, "b");
+ Assert.Contains(
+ bInteractions.Values.SelectMany(interaction => interaction.Results),
+ result => result.Contains("Cannot query field", StringComparison.Ordinal));
+ AssertAllRequestsAreVariableBatches(bInteractions, expectedVariablesCount: 2);
+ }
+
private static ConcurrentDictionary AssertSchemaInteractions(
ConcurrentDictionary> interactions,
string schemaName)
@@ -191,6 +323,8 @@ public sealed class Query
public Product GetFirst() => new(1);
public Product GetSecond() => new(2);
+
+ public IReadOnlyList GetProducts() => [new(1), new(2)];
}
}
@@ -289,4 +423,24 @@ public sealed class Query
public Product GetProductById(int id) => new(id);
}
}
+
+ private sealed class RejectedBeforeExecutionHandler : HttpMessageHandler
+ {
+ protected override Task SendAsync(
+ HttpRequestMessage request,
+ CancellationToken cancellationToken)
+ {
+ var response = new HttpResponseMessage(HttpStatusCode.OK)
+ {
+ Content = new StringContent(
+ """
+ {"errors":[{"message":"Cannot query field \"rating\" on type \"Product\"."}],"data":null}
+ """,
+ Encoding.UTF8,
+ "application/json")
+ };
+
+ return Task.FromResult(response);
+ }
+ }
}
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Interface_Field_Without_Type_Refinements_With_Concrete_Lookups.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Interface_Field_Without_Type_Refinements_With_Concrete_Lookups.yaml
index a8244d7da65..78f6df6ced4 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Interface_Field_Without_Type_Refinements_With_Concrete_Lookups.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Interface_Field_Without_Type_Refinements_With_Concrete_Lookups.yaml
@@ -149,7 +149,7 @@ operationPlan:
}
source: $.discussionById
target: $.votable
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
@@ -170,7 +170,7 @@ operationPlan:
}
source: $.commentById
target: $.votable
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_2_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Interface_Field_Without_Type_Refinements_With_Concrete_Lookups_And_Field_From_Specific_Source.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Interface_Field_Without_Type_Refinements_With_Concrete_Lookups_And_Field_From_Specific_Source.yaml
index 3a99a10b171..be9c57537de 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Interface_Field_Without_Type_Refinements_With_Concrete_Lookups_And_Field_From_Specific_Source.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Interface_Field_Without_Type_Refinements_With_Concrete_Lookups_And_Field_From_Specific_Source.yaml
@@ -174,7 +174,7 @@ operationPlan:
}
source: $.discussionById
target: $.votable
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
@@ -195,7 +195,7 @@ operationPlan:
}
source: $.commentById
target: $.votable
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_2_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Union_Field_With_Type_Refinements_And_Concrete_Lookups.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Union_Field_With_Type_Refinements_And_Concrete_Lookups.yaml
index e41ab8a3ee0..66f52b8dfb9 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Union_Field_With_Type_Refinements_And_Concrete_Lookups.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Union_Field_With_Type_Refinements_And_Concrete_Lookups.yaml
@@ -157,7 +157,7 @@ operationPlan:
}
source: $.productById
target: $.search
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
@@ -177,7 +177,7 @@ operationPlan:
}
source: $.userById
target: $.search
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_2_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Union_Field_With_Type_Refinements_And_Concrete_Lookups_With_Additional_Concrete_Dependency.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Union_Field_With_Type_Refinements_And_Concrete_Lookups_With_Additional_Concrete_Dependency.yaml
index a1166f5a98d..6eb982c1eba 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Union_Field_With_Type_Refinements_And_Concrete_Lookups_With_Additional_Concrete_Dependency.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Union_Field_With_Type_Refinements_And_Concrete_Lookups_With_Additional_Concrete_Dependency.yaml
@@ -198,7 +198,7 @@ operationPlan:
}
source: $.productById
target: $.search
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
@@ -218,7 +218,7 @@ operationPlan:
}
source: $.userById
target: $.search
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_2_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Union_Field_With_Type_Refinements_And_Union_Lookup.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Union_Field_With_Type_Refinements_And_Union_Lookup.yaml
index 518505ae9de..e03039e9e53 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Union_Field_With_Type_Refinements_And_Union_Lookup.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Union_Field_With_Type_Refinements_And_Union_Lookup.yaml
@@ -165,7 +165,7 @@ operationPlan:
}
source: $.searchResultById
target: $.search
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
@@ -188,7 +188,7 @@ operationPlan:
}
source: $.searchResultById
target: $.search
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_2_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Union_Field_With_Type_Refinements_And_Union_Lookup_With_Additional_Concrete_Dependency.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Union_Field_With_Type_Refinements_And_Union_Lookup_With_Additional_Concrete_Dependency.yaml
index 869b14d6b16..5ce2aec28aa 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Union_Field_With_Type_Refinements_And_Union_Lookup_With_Additional_Concrete_Dependency.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/AbstractTypeTests.Union_Field_With_Type_Refinements_And_Union_Lookup_With_Additional_Concrete_Dependency.yaml
@@ -206,7 +206,7 @@ operationPlan:
}
source: $.searchResultById
target: $.search
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
@@ -229,7 +229,7 @@ operationPlan:
}
source: $.searchResultById
target: $.search
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_2_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/ConditionalTests.NodeField_Skip_Around_Interface_Type_Refinement.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/ConditionalTests.NodeField_Skip_Around_Interface_Type_Refinement.yaml
index 9a4c0902b97..e6cf609b576 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/ConditionalTests.NodeField_Skip_Around_Interface_Type_Refinement.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/ConditionalTests.NodeField_Skip_Around_Interface_Type_Refinement.yaml
@@ -164,7 +164,6 @@ operationPlan:
}
}
}
- batchingGroupId: 1
forwardedVariables:
- skip
dependencies:
@@ -189,7 +188,6 @@ operationPlan:
}
}
}
- batchingGroupId: 1
forwardedVariables:
- skip
dependencies:
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/ConditionalTests.NodeField_Skip_On_Interface_Selection_Type_Refinement_With_Same_Unskipped_Selection.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/ConditionalTests.NodeField_Skip_On_Interface_Selection_Type_Refinement_With_Same_Unskipped_Selection.yaml
index a68f9cbec1b..8ec9e4036d4 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/ConditionalTests.NodeField_Skip_On_Interface_Selection_Type_Refinement_With_Same_Unskipped_Selection.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/ConditionalTests.NodeField_Skip_On_Interface_Selection_Type_Refinement_With_Same_Unskipped_Selection.yaml
@@ -211,7 +211,6 @@ operationPlan:
}
source: $.authorById
target: $.node.author
- batchingGroupId: 1
requirements:
- name: __fusion_1_id
selectionMap: >-
@@ -231,7 +230,6 @@ operationPlan:
}
source: $.authorById
target: $.node.author
- batchingGroupId: 1
requirements:
- name: __fusion_2_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/ConditionalTests.NodeField_Skip_On_Interface_Type_Refinement.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/ConditionalTests.NodeField_Skip_On_Interface_Type_Refinement.yaml
index 6d2048ee1ac..ca86abe8ba7 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/ConditionalTests.NodeField_Skip_On_Interface_Type_Refinement.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/ConditionalTests.NodeField_Skip_On_Interface_Type_Refinement.yaml
@@ -133,7 +133,6 @@ operationPlan:
}
}
}
- batchingGroupId: 1
forwardedVariables:
- skip
dependencies:
@@ -155,7 +154,6 @@ operationPlan:
}
}
}
- batchingGroupId: 1
forwardedVariables:
- skip
dependencies:
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Selections_On_Interface.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Selections_On_Interface.yaml
index 9c20f62fcbc..7d373b2ccfb 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Selections_On_Interface.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Selections_On_Interface.yaml
@@ -136,7 +136,6 @@ operationPlan:
}
}
}
- batchingGroupId: 1
forwardedVariables:
- id
dependencies:
@@ -156,7 +155,6 @@ operationPlan:
}
}
}
- batchingGroupId: 1
forwardedVariables:
- id
dependencies:
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Selections_On_Interface_And_Concrete_Type.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Selections_On_Interface_And_Concrete_Type.yaml
index 0c1ab987ebf..e1ffe14be58 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Selections_On_Interface_And_Concrete_Type.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Selections_On_Interface_And_Concrete_Type.yaml
@@ -146,7 +146,6 @@ operationPlan:
}
}
}
- batchingGroupId: 1
forwardedVariables:
- id
dependencies:
@@ -166,7 +165,6 @@ operationPlan:
}
}
}
- batchingGroupId: 1
forwardedVariables:
- id
dependencies:
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Selections_On_Interface_And_Concrete_Type_Both_Have_Different_Dependencies.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Selections_On_Interface_And_Concrete_Type_Both_Have_Different_Dependencies.yaml
index 09c8f16d104..4452339dfaf 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Selections_On_Interface_And_Concrete_Type_Both_Have_Different_Dependencies.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Selections_On_Interface_And_Concrete_Type_Both_Have_Different_Dependencies.yaml
@@ -165,22 +165,10 @@ sourceSchemas:
}
}
variables: |
- [
- {
- "__fusion_1_id": "UHJvZHVjdDo0"
- },
- {
- "__fusion_1_id": "UHJvZHVjdDox"
- },
- {
- "__fusion_1_id": "UHJvZHVjdDoy"
- },
- {
- "__fusion_1_id": "UHJvZHVjdDoz"
- }
- ]
+ {
+ "__fusion_1_id": "UHJvZHVjdDo0"
+ }
response:
- contentType: application/jsonl; charset=utf-8
results:
- |
{
@@ -191,6 +179,33 @@ sourceSchemas:
}
}
}
+ - request:
+ document: |
+ query testQuery_f7a0a31d_5(
+ $__fusion_2_id: ID!
+ ) {
+ node(id: $__fusion_2_id) {
+ __typename
+ ... on Product {
+ name
+ }
+ }
+ }
+ variables: |
+ [
+ {
+ "__fusion_2_id": "UHJvZHVjdDox"
+ },
+ {
+ "__fusion_2_id": "UHJvZHVjdDoy"
+ },
+ {
+ "__fusion_2_id": "UHJvZHVjdDoz"
+ }
+ ]
+ response:
+ contentType: application/jsonl; charset=utf-8
+ results:
- |
{
"data": {
@@ -291,13 +306,12 @@ operationPlan:
}
}
}
- batchingGroupId: 1
forwardedVariables:
- id
dependencies:
- id: 1
- id: 4
- type: OperationBatch
+ type: Operation
schema: B
operation: |
query testQuery_f7a0a31d_4(
@@ -311,16 +325,35 @@ operationPlan:
}
}
source: $.node
- targets:
- - $.node.singularProduct
- - $.node.products
- batchingGroupId: 2
+ target: $.node.singularProduct
requirements:
- name: __fusion_1_id
selectionMap: >-
id
dependencies:
- id: 3
+ - id: 5
+ type: Operation
+ schema: B
+ operation: |
+ query testQuery_f7a0a31d_5(
+ $__fusion_2_id: ID!
+ ) {
+ node(id: $__fusion_2_id) {
+ __typename
+ ... on Product {
+ name
+ }
+ }
+ }
+ source: $.node
+ target: $.node.products
+ requirements:
+ - name: __fusion_2_id
+ selectionMap: >-
+ id
+ dependencies:
+ - id: 3
- id: 6
type: Operation
schema: A
@@ -339,7 +372,6 @@ operationPlan:
}
}
}
- batchingGroupId: 1
forwardedVariables:
- id
dependencies:
@@ -360,7 +392,6 @@ operationPlan:
}
source: $.node
target: $.node.products
- batchingGroupId: 2
requirements:
- name: __fusion_3_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Selections_On_Interface_And_Concrete_Type_Both_Have_Same_Dependency.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Selections_On_Interface_And_Concrete_Type_Both_Have_Same_Dependency.yaml
index 5fd974d44bf..b82eacf9d26 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Selections_On_Interface_And_Concrete_Type_Both_Have_Same_Dependency.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Selections_On_Interface_And_Concrete_Type_Both_Have_Same_Dependency.yaml
@@ -120,34 +120,19 @@ sourceSchemas:
}
interactions:
- request:
- kind: OperationBatch
- items:
- - document: |
- query testQuery_648537b2_4(
- $__fusion_1_id: ID!
- ) {
- authorById(id: $__fusion_1_id) {
- rating
- }
- }
- variables: |
- {
- "__fusion_1_id": "QXV0aG9yOjE="
- }
- - document: |
- query testQuery_648537b2_5(
- $__fusion_2_id: ID!
- ) {
- authorById(id: $__fusion_2_id) {
- username
- }
- }
- variables: |
- {
- "__fusion_2_id": "QXV0aG9yOjE="
- }
+ document: |
+ query testQuery_648537b2_4(
+ $__fusion_1_id: ID!
+ ) {
+ authorById(id: $__fusion_1_id) {
+ rating
+ }
+ }
+ variables: |
+ {
+ "__fusion_1_id": "QXV0aG9yOjE="
+ }
response:
- contentType: application/jsonl; charset=utf-8
results:
- |
{
@@ -157,6 +142,21 @@ sourceSchemas:
}
}
}
+ - request:
+ document: |
+ query testQuery_648537b2_5(
+ $__fusion_2_id: ID!
+ ) {
+ authorById(id: $__fusion_2_id) {
+ username
+ }
+ }
+ variables: |
+ {
+ "__fusion_2_id": "QXV0aG9yOjE="
+ }
+ response:
+ results:
- |
{
"data": {
@@ -253,7 +253,6 @@ operationPlan:
}
source: $.authorById
target: $.node.author
- batchingGroupId: 1
requirements:
- name: __fusion_1_id
selectionMap: >-
@@ -273,7 +272,6 @@ operationPlan:
}
source: $.authorById
target: $.node.author
- batchingGroupId: 1
requirements:
- name: __fusion_2_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Selections_On_Interface_Selection_Has_Dependency.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Selections_On_Interface_Selection_Has_Dependency.yaml
index ab68ea140ec..a1c8ae78361 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Selections_On_Interface_Selection_Has_Dependency.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Selections_On_Interface_Selection_Has_Dependency.yaml
@@ -250,7 +250,6 @@ operationPlan:
}
}
}
- batchingGroupId: 1
forwardedVariables:
- id
dependencies:
@@ -271,7 +270,6 @@ operationPlan:
}
source: $.node
target: $.node.products
- batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
@@ -296,7 +294,6 @@ operationPlan:
}
}
}
- batchingGroupId: 1
forwardedVariables:
- id
dependencies:
@@ -317,7 +314,6 @@ operationPlan:
}
source: $.node
target: $.node.products
- batchingGroupId: 2
requirements:
- name: __fusion_2_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Two_Concrete_Types_Selections_Have_Different_Dependencies.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Two_Concrete_Types_Selections_Have_Different_Dependencies.yaml
index 4bf9da4c68e..5f9bc0b5e49 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Two_Concrete_Types_Selections_Have_Different_Dependencies.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Two_Concrete_Types_Selections_Have_Different_Dependencies.yaml
@@ -245,7 +245,6 @@ operationPlan:
}
source: $.node
target: $.node.product
- batchingGroupId: 1
requirements:
- name: __fusion_1_id
selectionMap: >-
@@ -289,7 +288,6 @@ operationPlan:
}
source: $.node
target: $.node.product
- batchingGroupId: 1
requirements:
- name: __fusion_2_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Two_Concrete_Types_Selections_Have_Same_Dependency.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Two_Concrete_Types_Selections_Have_Same_Dependency.yaml
index 331a20d4ec7..3f409803dd6 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Two_Concrete_Types_Selections_Have_Same_Dependency.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Node_Field_Two_Concrete_Types_Selections_Have_Same_Dependency.yaml
@@ -242,7 +242,6 @@ operationPlan:
}
source: $.node
target: $.node.product
- batchingGroupId: 1
requirements:
- name: __fusion_1_id
selectionMap: >-
@@ -286,7 +285,6 @@ operationPlan:
}
source: $.node
target: $.node.product
- batchingGroupId: 1
requirements:
- name: __fusion_2_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Two_Node_Fields_With_Alias.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Two_Node_Fields_With_Alias.yaml
index 4d8ab7dfbd8..e349c26ce55 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Two_Node_Fields_With_Alias.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/GlobalObjectIdentificationTests.Two_Node_Fields_With_Alias.yaml
@@ -49,25 +49,15 @@ sourceSchemas:
}
interactions:
- request:
- kind: OperationBatch
- items:
- - document: |
- query Op_a361113f_3 {
- b: discussionById(discussionId: "RGlzY3Vzc2lvbjoy") {
- __typename
- title
- id
- }
- }
- - document: |
- query Op_a361113f_7 {
- a: discussionById(discussionId: "RGlzY3Vzc2lvbjox") {
- __typename
- title
- }
- }
+ document: |
+ query Op_a361113f_3 {
+ b: discussionById(discussionId: "RGlzY3Vzc2lvbjoy") {
+ __typename
+ title
+ id
+ }
+ }
response:
- contentType: application/jsonl; charset=utf-8
results:
- |
{
@@ -79,6 +69,16 @@ sourceSchemas:
}
}
}
+ - request:
+ document: |
+ query Op_a361113f_7 {
+ a: discussionById(discussionId: "RGlzY3Vzc2lvbjox") {
+ __typename
+ title
+ }
+ }
+ response:
+ results:
- |
{
"data": {
@@ -169,7 +169,6 @@ operationPlan:
__typename
}
}
- batchingGroupId: 2
dependencies:
- id: 1
- id: 3
@@ -183,7 +182,6 @@ operationPlan:
id
}
}
- batchingGroupId: 1
dependencies:
- id: 1
- id: 4
@@ -220,7 +218,6 @@ operationPlan:
__typename
}
}
- batchingGroupId: 2
dependencies:
- id: 5
- id: 7
@@ -233,6 +230,5 @@ operationPlan:
title
}
}
- batchingGroupId: 1
dependencies:
- id: 5
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.Interface_Field_Linked_Field_With_Dependency_Different_Selection_In_Concrete_Type.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.Interface_Field_Linked_Field_With_Dependency_Different_Selection_In_Concrete_Type.yaml
index 24ceef119c4..33cb7008e16 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.Interface_Field_Linked_Field_With_Dependency_Different_Selection_In_Concrete_Type.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.Interface_Field_Linked_Field_With_Dependency_Different_Selection_In_Concrete_Type.yaml
@@ -198,7 +198,7 @@ operationPlan:
}
source: $.authorById
target: $.authorable.author
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
@@ -218,7 +218,7 @@ operationPlan:
}
source: $.authorById
target: $.authorable.author
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_2_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.Interface_Field_Linked_Field_With_Dependency_Same_Selection_In_Concrete_Type.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.Interface_Field_Linked_Field_With_Dependency_Same_Selection_In_Concrete_Type.yaml
index fc446438829..3982ed5f453 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.Interface_Field_Linked_Field_With_Dependency_Same_Selection_In_Concrete_Type.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.Interface_Field_Linked_Field_With_Dependency_Same_Selection_In_Concrete_Type.yaml
@@ -185,7 +185,7 @@ operationPlan:
targets:
- $.authorable.author
- $.authorable.author
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.Interface_List_Field_Linked_Field_With_Dependency_Different_Selection_In_Concrete_Type.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.Interface_List_Field_Linked_Field_With_Dependency_Different_Selection_In_Concrete_Type.yaml
index 770884aa79a..a6f0436c9df 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.Interface_List_Field_Linked_Field_With_Dependency_Different_Selection_In_Concrete_Type.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.Interface_List_Field_Linked_Field_With_Dependency_Different_Selection_In_Concrete_Type.yaml
@@ -264,7 +264,7 @@ operationPlan:
}
source: $.authorById
target: $.authorables.author
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
@@ -284,7 +284,7 @@ operationPlan:
}
source: $.authorById
target: $.authorables.author
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_2_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.Interface_List_Field_Linked_Field_With_Dependency_Same_Selection_In_Concrete_Type.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.Interface_List_Field_Linked_Field_With_Dependency_Same_Selection_In_Concrete_Type.yaml
index cd60b57b6a9..668d81dc011 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.Interface_List_Field_Linked_Field_With_Dependency_Same_Selection_In_Concrete_Type.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.Interface_List_Field_Linked_Field_With_Dependency_Same_Selection_In_Concrete_Type.yaml
@@ -246,7 +246,7 @@ operationPlan:
targets:
- $.authorables.author
- $.authorables.author
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.List_Field_Interface_Object_Property_Linked_Field_With_Dependency_Different_Selection_In_Concrete_Type.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.List_Field_Interface_Object_Property_Linked_Field_With_Dependency_Different_Selection_In_Concrete_Type.yaml
index b96002756d7..5bc662abc91 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.List_Field_Interface_Object_Property_Linked_Field_With_Dependency_Different_Selection_In_Concrete_Type.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.List_Field_Interface_Object_Property_Linked_Field_With_Dependency_Different_Selection_In_Concrete_Type.yaml
@@ -295,7 +295,7 @@ operationPlan:
}
source: $.authorById
target: $.wrappers.authorable.author
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
@@ -315,7 +315,7 @@ operationPlan:
}
source: $.authorById
target: $.wrappers.authorable.author
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_2_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.List_Field_Interface_Object_Property_Linked_Field_With_Dependency_Same_Selection_In_Concrete_Type.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.List_Field_Interface_Object_Property_Linked_Field_With_Dependency_Same_Selection_In_Concrete_Type.yaml
index 10dac429e55..4049b168618 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.List_Field_Interface_Object_Property_Linked_Field_With_Dependency_Same_Selection_In_Concrete_Type.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/InterfaceTests.List_Field_Interface_Object_Property_Linked_Field_With_Dependency_Same_Selection_In_Concrete_Type.yaml
@@ -271,7 +271,7 @@ operationPlan:
targets:
- $.wrappers.authorable.author
- $.wrappers.authorable.author
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/MutationTests.Multiple_Mutation.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/MutationTests.Multiple_Mutation.yaml
index eee9671db73..1c3f943c326 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/MutationTests.Multiple_Mutation.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/MutationTests.Multiple_Mutation.yaml
@@ -19,14 +19,21 @@ request:
response:
body: |
{
- "errors": [
- {
- "message": "The request exceeded the configured timeout of \u006000:00:30\u0060.",
- "extensions": {
- "code": "HC0045"
+ "data": {
+ "a": {
+ "book": {
+ "id": 1,
+ "author": null
+ }
+ },
+ "b": {
+ "book": {
+ "id": 2,
+ "title": "Book2",
+ "author": null
}
}
- ]
+ }
}
sourceSchemas:
- name: A
@@ -78,6 +85,29 @@ sourceSchemas:
}
}
}
+ - request:
+ document: |
+ mutation Op_192dc5f8_3 {
+ b: createBook(input: { title: "Book2" }) {
+ book {
+ id
+ title
+ }
+ }
+ }
+ response:
+ results:
+ - |
+ {
+ "data": {
+ "b": {
+ "book": {
+ "id": 2,
+ "title": "Book2"
+ }
+ }
+ }
+ }
- name: B
schema: |
schema {
@@ -92,3 +122,73 @@ sourceSchemas:
type Query {
bookById(id: Int!): Book! @internal @lookup
}
+operationPlan:
+ operation:
+ - document: |
+ mutation {
+ a: createBook(input: { title: "Book1" }) {
+ book {
+ id
+ id @fusion__requirement
+ author
+ }
+ }
+ b: createBook(input: { title: "Book2" }) {
+ book {
+ id
+ id @fusion__requirement
+ title
+ author
+ }
+ }
+ }
+ hash: 192dc5f8a8f00336bac2094122ae7902
+ searchSpace: 1
+ expandedNodes: 2
+ nodes:
+ - id: 1
+ type: Operation
+ schema: A
+ operation: |
+ mutation Op_192dc5f8_1 {
+ a: createBook(input: { title: "Book1" }) {
+ book {
+ id
+ }
+ }
+ }
+ - id: 2
+ type: OperationBatch
+ schema: B
+ operation: |
+ query Op_192dc5f8_2(
+ $__fusion_1_id: Int!
+ ) {
+ bookById(id: $__fusion_1_id) {
+ author
+ }
+ }
+ source: $.bookById
+ targets:
+ - $.a.book
+ - $.b.book
+ batchingGroupId: 2
+ requirements:
+ - name: __fusion_1_id
+ selectionMap: >-
+ id
+ dependencies:
+ - id: 1
+ - id: 3
+ - id: 3
+ type: Operation
+ schema: A
+ operation: |
+ mutation Op_192dc5f8_3 {
+ b: createBook(input: { title: "Book2" }) {
+ book {
+ id
+ title
+ }
+ }
+ }
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/RequireTests.Requirement_On_Leaf_Field.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/RequireTests.Requirement_On_Leaf_Field.yaml
index a17dac9cf10..23cb4019c2b 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/RequireTests.Requirement_On_Leaf_Field.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/RequireTests.Requirement_On_Leaf_Field.yaml
@@ -180,7 +180,6 @@ operationPlan:
id
dependencies:
- id: 3
- - id: 4
- id: 3
type: Operation
schema: A
@@ -194,7 +193,7 @@ operationPlan:
}
source: $.productById
target: $.productById
- batchingGroupId: 1
+ batchingGroupId: 3
requirements:
- name: __fusion_3_id
selectionMap: >-
@@ -214,7 +213,7 @@ operationPlan:
}
source: $.productById
target: $.productById
- batchingGroupId: 1
+ batchingGroupId: 3
requirements:
- name: __fusion_4_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/RequireTests.Requirement_On_Nullable_Leaf_Field_Returning_Null.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/RequireTests.Requirement_On_Nullable_Leaf_Field_Returning_Null.yaml
index c2425823c4a..69569be3ba2 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/RequireTests.Requirement_On_Nullable_Leaf_Field_Returning_Null.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/RequireTests.Requirement_On_Nullable_Leaf_Field_Returning_Null.yaml
@@ -180,7 +180,6 @@ operationPlan:
id
dependencies:
- id: 3
- - id: 4
- id: 3
type: Operation
schema: A
@@ -194,7 +193,7 @@ operationPlan:
}
source: $.productById
target: $.productById
- batchingGroupId: 1
+ batchingGroupId: 3
requirements:
- name: __fusion_3_id
selectionMap: >-
@@ -214,7 +213,7 @@ operationPlan:
}
source: $.productById
target: $.productById
- batchingGroupId: 1
+ batchingGroupId: 3
requirements:
- name: __fusion_4_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/RequireTests.Requirement_On_Property_Within_Nullable_Object.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/RequireTests.Requirement_On_Property_Within_Nullable_Object.yaml
index 246e10dd2f3..87760ce1609 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/RequireTests.Requirement_On_Property_Within_Nullable_Object.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/RequireTests.Requirement_On_Property_Within_Nullable_Object.yaml
@@ -190,7 +190,6 @@ operationPlan:
id
dependencies:
- id: 3
- - id: 4
- id: 3
type: Operation
schema: A
@@ -206,7 +205,7 @@ operationPlan:
}
source: $.productById
target: $.productById
- batchingGroupId: 1
+ batchingGroupId: 3
requirements:
- name: __fusion_3_id
selectionMap: >-
@@ -226,7 +225,7 @@ operationPlan:
}
source: $.productById
target: $.productById
- batchingGroupId: 1
+ batchingGroupId: 3
requirements:
- name: __fusion_4_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/RequireTests.Requirement_On_Property_Within_Nullable_Object_Returning_Null.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/RequireTests.Requirement_On_Property_Within_Nullable_Object_Returning_Null.yaml
index 0bb3bfb2db0..fb1c3f3c5df 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/RequireTests.Requirement_On_Property_Within_Nullable_Object_Returning_Null.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/RequireTests.Requirement_On_Property_Within_Nullable_Object_Returning_Null.yaml
@@ -188,7 +188,6 @@ operationPlan:
id
dependencies:
- id: 3
- - id: 4
- id: 3
type: Operation
schema: A
@@ -204,7 +203,7 @@ operationPlan:
}
source: $.productById
target: $.productById
- batchingGroupId: 1
+ batchingGroupId: 3
requirements:
- name: __fusion_3_id
selectionMap: >-
@@ -224,7 +223,7 @@ operationPlan:
}
source: $.productById
target: $.productById
- batchingGroupId: 1
+ batchingGroupId: 3
requirements:
- name: __fusion_4_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SharedPathTests.Hierarchy_Of_Shared_Parent_Fields_Below_Type_With_Lookup_With_Extra_Fields_On_Shared_Level.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SharedPathTests.Hierarchy_Of_Shared_Parent_Fields_Below_Type_With_Lookup_With_Extra_Fields_On_Shared_Level.yaml
index 3a73537a9e3..7030d1f23c2 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SharedPathTests.Hierarchy_Of_Shared_Parent_Fields_Below_Type_With_Lookup_With_Extra_Fields_On_Shared_Level.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SharedPathTests.Hierarchy_Of_Shared_Parent_Fields_Below_Type_With_Lookup_With_Extra_Fields_On_Shared_Level.yaml
@@ -268,7 +268,7 @@ operationPlan:
}
source: $.product
target: $.productById
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
@@ -292,7 +292,7 @@ operationPlan:
}
source: $.product
target: $.productById
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_2_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SharedPathTests.Hierarchy_Of_Shared_Parent_Fields_Below_Type_With_Lookup_With_Extra_Fields_On_Shared_Levels.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SharedPathTests.Hierarchy_Of_Shared_Parent_Fields_Below_Type_With_Lookup_With_Extra_Fields_On_Shared_Levels.yaml
index 91ebea2b545..64661e82d53 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SharedPathTests.Hierarchy_Of_Shared_Parent_Fields_Below_Type_With_Lookup_With_Extra_Fields_On_Shared_Levels.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SharedPathTests.Hierarchy_Of_Shared_Parent_Fields_Below_Type_With_Lookup_With_Extra_Fields_On_Shared_Levels.yaml
@@ -295,7 +295,7 @@ operationPlan:
}
source: $.product
target: $.productById
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
@@ -317,7 +317,7 @@ operationPlan:
}
source: $.product
target: $.productById
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_2_id
selectionMap: >-
@@ -341,7 +341,7 @@ operationPlan:
}
source: $.product
target: $.productById
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_3_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SharedPathTests.Hierarchy_Of_Shared_Root_Fields_With_Extra_Fields_On_Shared_Level.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SharedPathTests.Hierarchy_Of_Shared_Root_Fields_With_Extra_Fields_On_Shared_Level.yaml
index b465a46143d..d6f9c9dc514 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SharedPathTests.Hierarchy_Of_Shared_Root_Fields_With_Extra_Fields_On_Shared_Level.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SharedPathTests.Hierarchy_Of_Shared_Root_Fields_With_Extra_Fields_On_Shared_Level.yaml
@@ -260,16 +260,7 @@ operationPlan:
schema2
}
}
- batchingGroupId: 1
- - id: 3
- type: Operation
- schema: C
- operation: |
- query Op_2deb87e6_3 {
- viewer {
- schema3
- }
- }
+ batchingGroupId: 2
- id: 4
type: Operation
schema: B
@@ -281,4 +272,13 @@ operationPlan:
}
}
}
- batchingGroupId: 1
+ batchingGroupId: 2
+ - id: 3
+ type: Operation
+ schema: C
+ operation: |
+ query Op_2deb87e6_3 {
+ viewer {
+ schema3
+ }
+ }
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SharedPathTests.Shared_Parent_Field_Below_Type_With_Lookup_With_Extra_Fields_On_Shared_Level.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SharedPathTests.Shared_Parent_Field_Below_Type_With_Lookup_With_Extra_Fields_On_Shared_Level.yaml
index 25c03986d22..29e9fe95ed5 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SharedPathTests.Shared_Parent_Field_Below_Type_With_Lookup_With_Extra_Fields_On_Shared_Level.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SharedPathTests.Shared_Parent_Field_Below_Type_With_Lookup_With_Extra_Fields_On_Shared_Level.yaml
@@ -246,7 +246,7 @@ operationPlan:
}
source: $.product
target: $.productById
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
@@ -268,7 +268,7 @@ operationPlan:
}
source: $.product
target: $.productById
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_2_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SharedPathTests.Single_Shared_Root_Field_With_Extra_Fields_On_Root.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SharedPathTests.Single_Shared_Root_Field_With_Extra_Fields_On_Root.yaml
index 159878a0a52..c27269f636d 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SharedPathTests.Single_Shared_Root_Field_With_Extra_Fields_On_Root.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SharedPathTests.Single_Shared_Root_Field_With_Extra_Fields_On_Root.yaml
@@ -204,7 +204,7 @@ operationPlan:
query Op_4a75abf3_2 {
schema2
}
- batchingGroupId: 1
+ batchingGroupId: 2
- id: 3
type: Operation
schema: B
@@ -214,4 +214,4 @@ operationPlan:
schema2
}
}
- batchingGroupId: 1
+ batchingGroupId: 2
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Object_List_Union_Field_Concrete_Type_Selections_Have_Dependency_To_Same_Subgraph.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Object_List_Union_Field_Concrete_Type_Selections_Have_Dependency_To_Same_Subgraph.yaml
index 16adc13f87f..28dd5a13f3c 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Object_List_Union_Field_Concrete_Type_Selections_Have_Dependency_To_Same_Subgraph.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Object_List_Union_Field_Concrete_Type_Selections_Have_Dependency_To_Same_Subgraph.yaml
@@ -263,7 +263,7 @@ operationPlan:
}
source: $.authorById
target: $.postEdges.node.author
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
@@ -283,7 +283,7 @@ operationPlan:
}
source: $.productById
target: $.postEdges.node.product
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_2_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Object_List_Union_Field_Concrete_Type_Selections_Have_Same_Dependency.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Object_List_Union_Field_Concrete_Type_Selections_Have_Same_Dependency.yaml
index 74f5ed35e9c..d155adc92aa 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Object_List_Union_Field_Concrete_Type_Selections_Have_Same_Dependency.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Object_List_Union_Field_Concrete_Type_Selections_Have_Same_Dependency.yaml
@@ -255,7 +255,7 @@ operationPlan:
targets:
- $.postEdges.node.product
- $.postEdges.node.product
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Object_List_Union_List_Concrete_Type_Selections_Have_Dependency_To_Same_Subgraph.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Object_List_Union_List_Concrete_Type_Selections_Have_Dependency_To_Same_Subgraph.yaml
index 9b22ade7012..581ce9f0618 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Object_List_Union_List_Concrete_Type_Selections_Have_Dependency_To_Same_Subgraph.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Object_List_Union_List_Concrete_Type_Selections_Have_Dependency_To_Same_Subgraph.yaml
@@ -420,7 +420,7 @@ operationPlan:
}
source: $.authorById
target: $.users.posts.author
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
@@ -440,7 +440,7 @@ operationPlan:
}
source: $.productById
target: $.users.posts.product
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_2_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Object_List_Union_List_Concrete_Type_Selections_Have_Same_Dependency.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Object_List_Union_List_Concrete_Type_Selections_Have_Same_Dependency.yaml
index 9157d884e5e..0b9ee1c4bbc 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Object_List_Union_List_Concrete_Type_Selections_Have_Same_Dependency.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Object_List_Union_List_Concrete_Type_Selections_Have_Same_Dependency.yaml
@@ -399,7 +399,7 @@ operationPlan:
targets:
- $.users.posts.product
- $.users.posts.product
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Union_Field_Concrete_Type_Selections_Have_Dependency_To_Same_Subgraph.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Union_Field_Concrete_Type_Selections_Have_Dependency_To_Same_Subgraph.yaml
index 0a575e5c520..c1d19a2bc2a 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Union_Field_Concrete_Type_Selections_Have_Dependency_To_Same_Subgraph.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Union_Field_Concrete_Type_Selections_Have_Dependency_To_Same_Subgraph.yaml
@@ -188,7 +188,7 @@ operationPlan:
}
source: $.authorById
target: $.post.author
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
@@ -208,7 +208,7 @@ operationPlan:
}
source: $.productById
target: $.post.product
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_2_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Union_Field_Concrete_Type_Selections_Have_Same_Dependency.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Union_Field_Concrete_Type_Selections_Have_Same_Dependency.yaml
index d2ea32cd0a4..32f95b14a48 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Union_Field_Concrete_Type_Selections_Have_Same_Dependency.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Union_Field_Concrete_Type_Selections_Have_Same_Dependency.yaml
@@ -180,7 +180,7 @@ operationPlan:
targets:
- $.post.product
- $.post.product
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Union_List_Concrete_Type_Selections_Have_Dependency_To_Same_Subgraph.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Union_List_Concrete_Type_Selections_Have_Dependency_To_Same_Subgraph.yaml
index d555d8e7b58..ebb1dfa0fff 100644
--- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Union_List_Concrete_Type_Selections_Have_Dependency_To_Same_Subgraph.yaml
+++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/UnionTests.Union_List_Concrete_Type_Selections_Have_Dependency_To_Same_Subgraph.yaml
@@ -250,7 +250,7 @@ operationPlan:
}
source: $.authorById
target: $.posts.author
- batchingGroupId: 1
+ batchingGroupId: 2
requirements:
- name: __fusion_1_id
selectionMap: >-
@@ -270,7 +270,7 @@ operationPlan:
}
source: $.productById
target: $.posts