diff --git a/AGENTS.md b/AGENTS.md index a43d5e101e1..12bf1b73fcc 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -56,6 +56,7 @@ dotnet test src/HotChocolate/Fusion - Use test naming format: `Method_Should_Outcome_When_Condition`. - Do not write vacuous assertions (`Assert.NotNull` alone is not a complete test). - If a test requires excessive stubs and reflection, use a more appropriate test tier. +- Do not use em dash style sentences in docs, comments, or XML documentation. Use commas, periods, parentheses, or colons instead. ### Testing diff --git a/CLAUDE.md b/CLAUDE.md index f2a1c6394d3..4e29fe1e529 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -47,20 +47,36 @@ dotnet test src/HotChocolate/Fusion ### C# / .NET -- Always use curly braces for loops and conditionals — no exceptions +- Always use curly braces for loops and conditionals, no exceptions - File-scoped namespaces, 4-space indent - Test naming: `Method_Should_Outcome_When_Condition` - No vacuous assertions (`Assert.NotNull` alone is not a test) - If you need 8 stubs + reflection, you're at the wrong test tier +- Do not use em dash style sentences in docs, comments, or XML documentation. Use commas, periods, parentheses, or colons instead. ### Testing -- Prefer snapshot tests over manual `Assert` calls — use **CookieCrumble** for snapshots +- Prefer snapshot tests over manual `Assert` calls, use **CookieCrumble** for snapshots - CookieCrumble has native snapshot support for `IExecutionResult`, `GraphQLHttpResponse`, and other core types - For smaller snapshots, prefer **inline snapshots** (`MatchInlineSnapshot`) over snapshot files - For tests with multiple assertions, use **Markdown snapshots** (`MatchMarkdownSnapshot`) +- Hard limit: a single test method must contain at most 5 `Assert.*` calls. Anything beyond that is too hard to reason about in review, switch to a snapshot (Markdown for multi-shape state, inline or file for a single output) +- Use the AAA section marker style. Each section starts with a single-line comment, the test name documents intent, no paragraph-style block comments above sections: + + ```csharp + // arrange + // optional one-line description, only when the next code is non-obvious + ... arrange code ... + + // act + ... act code ... + + // assert + ... assert code ... + ``` + - Snapshot tests: update from `__mismatch__/` directory, understand ordering issues before updating -- Filter tests during iteration — never run the full suite unnecessarily +- Filter tests during iteration, never run the full suite unnecessarily - Real databases in integration tests, not mocks (unless explicitly instructed otherwise) ## Performance diff --git a/dotnet-install.sh b/dotnet-install.sh new file mode 100755 index 00000000000..c44294628d1 --- /dev/null +++ b/dotnet-install.sh @@ -0,0 +1,1887 @@ +#!/usr/bin/env bash +# Copyright (c) .NET Foundation and contributors. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for full license information. +# + +# Stop script on NZEC +set -e +# Stop script if unbound variable found (use ${var:-} if intentional) +set -u +# By default cmd1 | cmd2 returns exit code of cmd2 regardless of cmd1 success +# This is causing it to fail +set -o pipefail + +# Use in the the functions: eval $invocation +invocation='say_verbose "Calling: ${yellow:-}${FUNCNAME[0]} ${green:-}$*${normal:-}"' + +# standard output may be used as a return value in the functions +# we need a way to write text on the screen in the functions so that +# it won't interfere with the return value. +# Exposing stream 3 as a pipe to standard output of the script itself +exec 3>&1 + +# Setup some colors to use. These need to work in fairly limited shells, like the Ubuntu Docker container where there are only 8 colors. +# See if stdout is a terminal +if [ -t 1 ] && command -v tput > /dev/null; then + # see if it supports colors + ncolors=$(tput colors || echo 0) + if [ -n "$ncolors" ] && [ $ncolors -ge 8 ]; then + bold="$(tput bold || echo)" + normal="$(tput sgr0 || echo)" + black="$(tput setaf 0 || echo)" + red="$(tput setaf 1 || echo)" + green="$(tput setaf 2 || echo)" + yellow="$(tput setaf 3 || echo)" + blue="$(tput setaf 4 || echo)" + magenta="$(tput setaf 5 || echo)" + cyan="$(tput setaf 6 || echo)" + white="$(tput setaf 7 || echo)" + fi +fi + +say_warning() { + printf "%b\n" "${yellow:-}dotnet_install: Warning: $1${normal:-}" >&3 +} + +say_err() { + printf "%b\n" "${red:-}dotnet_install: Error: $1${normal:-}" >&2 +} + +say() { + # using stream 3 (defined in the beginning) to not interfere with stdout of functions + # which may be used as return value + printf "%b\n" "${cyan:-}dotnet-install:${normal:-} $1" >&3 +} + +say_verbose() { + if [ "$verbose" = true ]; then + say "$1" + fi +} + +# This platform list is finite - if the SDK/Runtime has supported Linux distribution-specific assets, +# then and only then should the Linux distribution appear in this list. +# Adding a Linux distribution to this list does not imply distribution-specific support. +get_legacy_os_name_from_platform() { + eval $invocation + + platform="$1" + case "$platform" in + "centos.7") + echo "centos" + return 0 + ;; + "debian.8") + echo "debian" + return 0 + ;; + "debian.9") + echo "debian.9" + return 0 + ;; + "fedora.23") + echo "fedora.23" + return 0 + ;; + "fedora.24") + echo "fedora.24" + return 0 + ;; + "fedora.27") + echo "fedora.27" + return 0 + ;; + "fedora.28") + echo "fedora.28" + return 0 + ;; + "opensuse.13.2") + echo "opensuse.13.2" + return 0 + ;; + "opensuse.42.1") + echo "opensuse.42.1" + return 0 + ;; + "opensuse.42.3") + echo "opensuse.42.3" + return 0 + ;; + "rhel.7"*) + echo "rhel" + return 0 + ;; + "ubuntu.14.04") + echo "ubuntu" + return 0 + ;; + "ubuntu.16.04") + echo "ubuntu.16.04" + return 0 + ;; + "ubuntu.16.10") + echo "ubuntu.16.10" + return 0 + ;; + "ubuntu.18.04") + echo "ubuntu.18.04" + return 0 + ;; + "alpine.3.4.3") + echo "alpine" + return 0 + ;; + esac + return 1 +} + +get_legacy_os_name() { + eval $invocation + + local uname=$(uname) + if [ "$uname" = "Darwin" ]; then + echo "osx" + return 0 + elif [ -n "$runtime_id" ]; then + echo $(get_legacy_os_name_from_platform "${runtime_id%-*}" || echo "${runtime_id%-*}") + return 0 + else + if [ -e /etc/os-release ]; then + . /etc/os-release + os=$(get_legacy_os_name_from_platform "$ID${VERSION_ID:+.${VERSION_ID}}" || echo "") + if [ -n "$os" ]; then + echo "$os" + return 0 + fi + fi + fi + + say_verbose "Distribution specific OS name and version could not be detected: UName = $uname" + return 1 +} + +get_linux_platform_name() { + eval $invocation + + if [ -n "$runtime_id" ]; then + echo "${runtime_id%-*}" + return 0 + else + if [ -e /etc/os-release ]; then + . /etc/os-release + echo "$ID${VERSION_ID:+.${VERSION_ID}}" + return 0 + elif [ -e /etc/redhat-release ]; then + local redhatRelease=$(&1 || true) | grep -q musl +} + +get_current_os_name() { + eval $invocation + + local uname=$(uname) + if [ "$uname" = "Darwin" ]; then + echo "osx" + return 0 + elif [ "$uname" = "FreeBSD" ]; then + echo "freebsd" + return 0 + elif [ "$uname" = "Linux" ]; then + local linux_platform_name="" + linux_platform_name="$(get_linux_platform_name)" || true + + if [ "$linux_platform_name" = "rhel.6" ]; then + echo $linux_platform_name + return 0 + elif is_musl_based_distro; then + echo "linux-musl" + return 0 + elif [ "$linux_platform_name" = "linux-musl" ]; then + echo "linux-musl" + return 0 + else + echo "linux" + return 0 + fi + fi + + say_err "OS name could not be detected: UName = $uname" + return 1 +} + +machine_has() { + eval $invocation + + command -v "$1" > /dev/null 2>&1 + return $? +} + +check_min_reqs() { + local hasMinimum=false + if machine_has "curl"; then + hasMinimum=true + elif machine_has "wget"; then + hasMinimum=true + fi + + if [ "$hasMinimum" = "false" ]; then + say_err "curl (recommended) or wget are required to download dotnet. Install missing prerequisite to proceed." + return 1 + fi + return 0 +} + +# args: +# input - $1 +to_lowercase() { + #eval $invocation + + echo "$1" | tr '[:upper:]' '[:lower:]' + return 0 +} + +# args: +# input - $1 +remove_trailing_slash() { + #eval $invocation + + local input="${1:-}" + echo "${input%/}" + return 0 +} + +# args: +# input - $1 +remove_beginning_slash() { + #eval $invocation + + local input="${1:-}" + echo "${input#/}" + return 0 +} + +# args: +# root_path - $1 +# child_path - $2 - this parameter can be empty +combine_paths() { + eval $invocation + + # TODO: Consider making it work with any number of paths. For now: + if [ ! -z "${3:-}" ]; then + say_err "combine_paths: Function takes two parameters." + return 1 + fi + + local root_path="$(remove_trailing_slash "$1")" + local child_path="$(remove_beginning_slash "${2:-}")" + say_verbose "combine_paths: root_path=$root_path" + say_verbose "combine_paths: child_path=$child_path" + echo "$root_path/$child_path" + return 0 +} + +get_machine_architecture() { + eval $invocation + + if command -v uname > /dev/null; then + CPUName=$(uname -m) + case $CPUName in + armv1*|armv2*|armv3*|armv4*|armv5*|armv6*) + echo "armv6-or-below" + return 0 + ;; + armv*l) + echo "arm" + return 0 + ;; + aarch64|arm64) + if [ "$(getconf LONG_BIT)" -lt 64 ]; then + # This is 32-bit OS running on 64-bit CPU (for example Raspberry Pi OS) + echo "arm" + return 0 + fi + echo "arm64" + return 0 + ;; + s390x) + echo "s390x" + return 0 + ;; + ppc64le) + echo "ppc64le" + return 0 + ;; + loongarch64) + echo "loongarch64" + return 0 + ;; + riscv64) + echo "riscv64" + return 0 + ;; + powerpc|ppc) + echo "ppc" + return 0 + ;; + esac + fi + + # Always default to 'x64' + echo "x64" + return 0 +} + +# args: +# architecture - $1 +get_normalized_architecture_from_architecture() { + eval $invocation + + local architecture="$(to_lowercase "$1")" + + if [[ $architecture == \ ]]; then + machine_architecture="$(get_machine_architecture)" + if [[ "$machine_architecture" == "armv6-or-below" ]]; then + say_err "Architecture \`$machine_architecture\` not supported. If you think this is a bug, report it at https://github.com/dotnet/install-scripts/issues" + return 1 + fi + + echo $machine_architecture + return 0 + fi + + case "$architecture" in + amd64|x64) + echo "x64" + return 0 + ;; + arm) + echo "arm" + return 0 + ;; + arm64) + echo "arm64" + return 0 + ;; + s390x) + echo "s390x" + return 0 + ;; + ppc64le) + echo "ppc64le" + return 0 + ;; + loongarch64) + echo "loongarch64" + return 0 + ;; + esac + + say_err "Architecture \`$architecture\` not supported. If you think this is a bug, report it at https://github.com/dotnet/install-scripts/issues" + return 1 +} + +# args: +# version - $1 +# channel - $2 +# architecture - $3 +get_normalized_architecture_for_specific_sdk_version() { + eval $invocation + + local is_version_support_arm64="$(is_arm64_supported "$1")" + local is_channel_support_arm64="$(is_arm64_supported "$2")" + local architecture="$3"; + local osname="$(get_current_os_name)" + + if [ "$osname" == "osx" ] && [ "$architecture" == "arm64" ] && { [ "$is_version_support_arm64" = false ] || [ "$is_channel_support_arm64" = false ]; }; then + #check if rosetta is installed + if [ "$(/usr/bin/pgrep oahd >/dev/null 2>&1;echo $?)" -eq 0 ]; then + say_verbose "Changing user architecture from '$architecture' to 'x64' because .NET SDKs prior to version 6.0 do not support arm64." + echo "x64" + return 0; + else + say_err "Architecture \`$architecture\` is not supported for .NET SDK version \`$version\`. Please install Rosetta to allow emulation of the \`$architecture\` .NET SDK on this platform" + return 1 + fi + fi + + echo "$architecture" + return 0 +} + +# args: +# version or channel - $1 +is_arm64_supported() { + # Extract the major version by splitting on the dot + major_version="${1%%.*}" + + # Check if the major version is a valid number and less than 6 + case "$major_version" in + [0-9]*) + if [ "$major_version" -lt 6 ]; then + echo false + return 0 + fi + ;; + esac + + echo true + return 0 +} + +# args: +# user_defined_os - $1 +get_normalized_os() { + eval $invocation + + local osname="$(to_lowercase "$1")" + if [ ! -z "$osname" ]; then + case "$osname" in + osx | freebsd | rhel.6 | linux-musl | linux) + echo "$osname" + return 0 + ;; + macos) + osname='osx' + echo "$osname" + return 0 + ;; + *) + say_err "'$user_defined_os' is not a supported value for --os option, supported values are: osx, macos, linux, linux-musl, freebsd, rhel.6. If you think this is a bug, report it at https://github.com/dotnet/install-scripts/issues." + return 1 + ;; + esac + else + osname="$(get_current_os_name)" || return 1 + fi + echo "$osname" + return 0 +} + +# args: +# quality - $1 +get_normalized_quality() { + eval $invocation + + local quality="$(to_lowercase "$1")" + if [ ! -z "$quality" ]; then + case "$quality" in + daily | preview) + echo "$quality" + return 0 + ;; + ga) + #ga quality is available without specifying quality, so normalizing it to empty + return 0 + ;; + *) + say_err "'$quality' is not a supported value for --quality option. Supported values are: daily, preview, ga. If you think this is a bug, report it at https://github.com/dotnet/install-scripts/issues." + return 1 + ;; + esac + fi + return 0 +} + +# args: +# channel - $1 +get_normalized_channel() { + eval $invocation + + local channel="$(to_lowercase "$1")" + + if [[ $channel == current ]]; then + say_warning 'Value "Current" is deprecated for -Channel option. Use "STS" instead.' + fi + + if [[ $channel == release/* ]]; then + say_warning 'Using branch name with -Channel option is no longer supported with newer releases. Use -Quality option with a channel in X.Y format instead.'; + fi + + if [ ! -z "$channel" ]; then + case "$channel" in + lts) + echo "LTS" + return 0 + ;; + sts) + echo "STS" + return 0 + ;; + current) + echo "STS" + return 0 + ;; + *) + echo "$channel" + return 0 + ;; + esac + fi + + return 0 +} + +# args: +# runtime - $1 +get_normalized_product() { + eval $invocation + + local product="" + local runtime="$(to_lowercase "$1")" + if [[ "$runtime" == "dotnet" ]]; then + product="dotnet-runtime" + elif [[ "$runtime" == "aspnetcore" ]]; then + product="aspnetcore-runtime" + elif [ -z "$runtime" ]; then + product="dotnet-sdk" + fi + echo "$product" + return 0 +} + +# The version text returned from the feeds is a 1-line or 2-line string: +# For the SDK and the dotnet runtime (2 lines): +# Line 1: # commit_hash +# Line 2: # 4-part version +# For the aspnetcore runtime (1 line): +# Line 1: # 4-part version + +# args: +# version_text - stdin +get_version_from_latestversion_file_content() { + eval $invocation + + cat | tail -n 1 | sed 's/\r$//' + return 0 +} + +# args: +# install_root - $1 +# relative_path_to_package - $2 +# specific_version - $3 +is_dotnet_package_installed() { + eval $invocation + + local install_root="$1" + local relative_path_to_package="$2" + local specific_version="${3//[$'\t\r\n']}" + + local dotnet_package_path="$(combine_paths "$(combine_paths "$install_root" "$relative_path_to_package")" "$specific_version")" + say_verbose "is_dotnet_package_installed: dotnet_package_path=$dotnet_package_path" + + if [ -d "$dotnet_package_path" ]; then + return 0 + else + return 1 + fi +} + +# args: +# downloaded file - $1 +# remote_file_size - $2 +validate_remote_local_file_sizes() +{ + eval $invocation + + local downloaded_file="$1" + local remote_file_size="$2" + local file_size='' + + if [[ "$OSTYPE" == "linux-gnu"* ]]; then + file_size="$(stat -c '%s' "$downloaded_file")" + elif [[ "$OSTYPE" == "darwin"* ]]; then + # hardcode in order to avoid conflicts with GNU stat + file_size="$(/usr/bin/stat -f '%z' "$downloaded_file")" + fi + + if [ -n "$file_size" ]; then + say "Downloaded file size is $file_size bytes." + + if [ -n "$remote_file_size" ] && [ -n "$file_size" ]; then + if [ "$remote_file_size" -ne "$file_size" ]; then + say "The remote and local file sizes are not equal. The remote file size is $remote_file_size bytes and the local size is $file_size bytes. The local package may be corrupted." + else + say "The remote and local file sizes are equal." + fi + fi + + else + say "Either downloaded or local package size can not be measured. One of them may be corrupted." + fi +} + +# args: +# azure_feed - $1 +# channel - $2 +# normalized_architecture - $3 +get_version_from_latestversion_file() { + eval $invocation + + local azure_feed="$1" + local channel="$2" + local normalized_architecture="$3" + + local version_file_url=null + if [[ "$runtime" == "dotnet" ]]; then + version_file_url="$azure_feed/Runtime/$channel/latest.version" + elif [[ "$runtime" == "aspnetcore" ]]; then + version_file_url="$azure_feed/aspnetcore/Runtime/$channel/latest.version" + elif [ -z "$runtime" ]; then + version_file_url="$azure_feed/Sdk/$channel/latest.version" + else + say_err "Invalid value for \$runtime" + return 1 + fi + say_verbose "get_version_from_latestversion_file: latest url: $version_file_url" + + download "$version_file_url" || return $? + return 0 +} + +# args: +# json_file - $1 +parse_globaljson_file_for_version() { + eval $invocation + + local json_file="$1" + if [ ! -f "$json_file" ]; then + say_err "Unable to find \`$json_file\`" + return 1 + fi + + sdk_section=$(cat "$json_file" | tr -d "\r" | awk '/"sdk"/,/}/') + if [ -z "$sdk_section" ]; then + say_err "Unable to parse the SDK node in \`$json_file\`" + return 1 + fi + + sdk_list=$(echo $sdk_section | awk -F"[{}]" '{print $2}') + sdk_list=${sdk_list//[\" ]/} + sdk_list=${sdk_list//,/$'\n'} + + local version_info="" + while read -r line; do + IFS=: + while read -r key value; do + if [[ "$key" == "version" ]]; then + version_info=$value + fi + done <<< "$line" + done <<< "$sdk_list" + if [ -z "$version_info" ]; then + say_err "Unable to find the SDK:version node in \`$json_file\`" + return 1 + fi + + unset IFS; + echo "$version_info" + return 0 +} + +# args: +# azure_feed - $1 +# channel - $2 +# normalized_architecture - $3 +# version - $4 +# json_file - $5 +get_specific_version_from_version() { + eval $invocation + + local azure_feed="$1" + local channel="$2" + local normalized_architecture="$3" + local version="$(to_lowercase "$4")" + local json_file="$5" + + if [ -z "$json_file" ]; then + if [[ "$version" == "latest" ]]; then + local version_info + version_info="$(get_version_from_latestversion_file "$azure_feed" "$channel" "$normalized_architecture" false)" || return 1 + say_verbose "get_specific_version_from_version: version_info=$version_info" + echo "$version_info" | get_version_from_latestversion_file_content + return 0 + else + echo "$version" + return 0 + fi + else + local version_info + version_info="$(parse_globaljson_file_for_version "$json_file")" || return 1 + echo "$version_info" + return 0 + fi +} + +# args: +# azure_feed - $1 +# channel - $2 +# normalized_architecture - $3 +# specific_version - $4 +# normalized_os - $5 +construct_download_link() { + eval $invocation + + local azure_feed="$1" + local channel="$2" + local normalized_architecture="$3" + local specific_version="${4//[$'\t\r\n']}" + local specific_product_version="$(get_specific_product_version "$1" "$4")" + local osname="$5" + + local download_link=null + if [[ "$runtime" == "dotnet" ]]; then + download_link="$azure_feed/Runtime/$specific_version/dotnet-runtime-$specific_product_version-$osname-$normalized_architecture.tar.gz" + elif [[ "$runtime" == "aspnetcore" ]]; then + download_link="$azure_feed/aspnetcore/Runtime/$specific_version/aspnetcore-runtime-$specific_product_version-$osname-$normalized_architecture.tar.gz" + elif [ -z "$runtime" ]; then + download_link="$azure_feed/Sdk/$specific_version/dotnet-sdk-$specific_product_version-$osname-$normalized_architecture.tar.gz" + else + return 1 + fi + + echo "$download_link" + return 0 +} + +# args: +# azure_feed - $1 +# specific_version - $2 +# download link - $3 (optional) +get_specific_product_version() { + # If we find a 'productVersion.txt' at the root of any folder, we'll use its contents + # to resolve the version of what's in the folder, superseding the specified version. + # if 'productVersion.txt' is missing but download link is already available, product version will be taken from download link + eval $invocation + + local azure_feed="$1" + local specific_version="${2//[$'\t\r\n']}" + local package_download_link="" + if [ $# -gt 2 ]; then + local package_download_link="$3" + fi + local specific_product_version=null + + # Try to get the version number, using the productVersion.txt file located next to the installer file. + local download_links=($(get_specific_product_version_url "$azure_feed" "$specific_version" true "$package_download_link") + $(get_specific_product_version_url "$azure_feed" "$specific_version" false "$package_download_link")) + + for download_link in "${download_links[@]}" + do + say_verbose "Checking for the existence of $download_link" + + if machine_has "curl" + then + if ! specific_product_version=$(curl -sL --fail "${download_link}${feed_credential}" 2>&1); then + continue + else + echo "${specific_product_version//[$'\t\r\n']}" + return 0 + fi + + elif machine_has "wget" + then + specific_product_version=$(wget -qO- "${download_link}${feed_credential}" 2>&1) + if [ $? = 0 ]; then + echo "${specific_product_version//[$'\t\r\n']}" + return 0 + fi + fi + done + + # Getting the version number with productVersion.txt has failed. Try parsing the download link for a version number. + say_verbose "Failed to get the version using productVersion.txt file. Download link will be parsed instead." + specific_product_version="$(get_product_specific_version_from_download_link "$package_download_link" "$specific_version")" + echo "${specific_product_version//[$'\t\r\n']}" + return 0 +} + +# args: +# azure_feed - $1 +# specific_version - $2 +# is_flattened - $3 +# download link - $4 (optional) +get_specific_product_version_url() { + eval $invocation + + local azure_feed="$1" + local specific_version="$2" + local is_flattened="$3" + local package_download_link="" + if [ $# -gt 3 ]; then + local package_download_link="$4" + fi + + local pvFileName="productVersion.txt" + if [ "$is_flattened" = true ]; then + if [ -z "$runtime" ]; then + pvFileName="sdk-productVersion.txt" + elif [[ "$runtime" == "dotnet" ]]; then + pvFileName="runtime-productVersion.txt" + else + pvFileName="$runtime-productVersion.txt" + fi + fi + + local download_link=null + + if [ -z "$package_download_link" ]; then + if [[ "$runtime" == "dotnet" ]]; then + download_link="$azure_feed/Runtime/$specific_version/${pvFileName}" + elif [[ "$runtime" == "aspnetcore" ]]; then + download_link="$azure_feed/aspnetcore/Runtime/$specific_version/${pvFileName}" + elif [ -z "$runtime" ]; then + download_link="$azure_feed/Sdk/$specific_version/${pvFileName}" + else + return 1 + fi + else + download_link="${package_download_link%/*}/${pvFileName}" + fi + + say_verbose "Constructed productVersion link: $download_link" + echo "$download_link" + return 0 +} + +# args: +# download link - $1 +# specific version - $2 +get_product_specific_version_from_download_link() +{ + eval $invocation + + local download_link="$1" + local specific_version="$2" + local specific_product_version="" + + if [ -z "$download_link" ]; then + echo "$specific_version" + return 0 + fi + + #get filename + filename="${download_link##*/}" + + #product specific version follows the product name + #for filename 'dotnet-sdk-3.1.404-linux-x64.tar.gz': the product version is 3.1.404 + IFS='-' + read -ra filename_elems <<< "$filename" + count=${#filename_elems[@]} + if [[ "$count" -gt 2 ]]; then + specific_product_version="${filename_elems[2]}" + else + specific_product_version=$specific_version + fi + unset IFS; + echo "$specific_product_version" + return 0 +} + +# args: +# azure_feed - $1 +# channel - $2 +# normalized_architecture - $3 +# specific_version - $4 +construct_legacy_download_link() { + eval $invocation + + local azure_feed="$1" + local channel="$2" + local normalized_architecture="$3" + local specific_version="${4//[$'\t\r\n']}" + + local distro_specific_osname + distro_specific_osname="$(get_legacy_os_name)" || return 1 + + local legacy_download_link=null + if [[ "$runtime" == "dotnet" ]]; then + legacy_download_link="$azure_feed/Runtime/$specific_version/dotnet-$distro_specific_osname-$normalized_architecture.$specific_version.tar.gz" + elif [ -z "$runtime" ]; then + legacy_download_link="$azure_feed/Sdk/$specific_version/dotnet-dev-$distro_specific_osname-$normalized_architecture.$specific_version.tar.gz" + else + return 1 + fi + + echo "$legacy_download_link" + return 0 +} + +get_user_install_path() { + eval $invocation + + if [ ! -z "${DOTNET_INSTALL_DIR:-}" ]; then + echo "$DOTNET_INSTALL_DIR" + else + echo "$HOME/.dotnet" + fi + return 0 +} + +# args: +# install_dir - $1 +resolve_installation_path() { + eval $invocation + + local install_dir=$1 + if [ "$install_dir" = "" ]; then + local user_install_path="$(get_user_install_path)" + say_verbose "resolve_installation_path: user_install_path=$user_install_path" + echo "$user_install_path" + return 0 + fi + + echo "$install_dir" + return 0 +} + +# args: +# relative_or_absolute_path - $1 +get_absolute_path() { + eval $invocation + + local relative_or_absolute_path=$1 + echo "$(cd "$(dirname "$1")" && pwd -P)/$(basename "$1")" + return 0 +} + +# args: +# override - $1 (boolean, true or false) +get_cp_options() { + eval $invocation + + local override="$1" + local override_switch="" + + if [ "$override" = false ]; then + override_switch="-n" + + # create temporary files to check if 'cp -u' is supported + tmp_dir="$(mktemp -d)" + tmp_file="$tmp_dir/testfile" + tmp_file2="$tmp_dir/testfile2" + + touch "$tmp_file" + + # use -u instead of -n if it's available + if cp -u "$tmp_file" "$tmp_file2" 2>/dev/null; then + override_switch="-u" + fi + + # clean up + rm -f "$tmp_file" "$tmp_file2" + rm -rf "$tmp_dir" + fi + + echo "$override_switch" +} + +# args: +# input_files - stdin +# root_path - $1 +# out_path - $2 +# override - $3 +copy_files_or_dirs_from_list() { + eval $invocation + + local root_path="$(remove_trailing_slash "$1")" + local out_path="$(remove_trailing_slash "$2")" + local override="$3" + local override_switch="$(get_cp_options "$override")" + + cat | uniq | while read -r file_path; do + local path="$(remove_beginning_slash "${file_path#$root_path}")" + local target="$out_path/$path" + if [ "$override" = true ] || (! ([ -d "$target" ] || [ -e "$target" ])); then + mkdir -p "$out_path/$(dirname "$path")" + if [ -d "$target" ]; then + rm -rf "$target" + fi + cp -R $override_switch "$root_path/$path" "$target" + fi + done +} + +# args: +# zip_uri - $1 +get_remote_file_size() { + local zip_uri="$1" + + if machine_has "curl"; then + file_size=$(curl -sI "$zip_uri" | grep -i content-length | awk '{ num = $2 + 0; print num }') + elif machine_has "wget"; then + file_size=$(wget --spider --server-response -O /dev/null "$zip_uri" 2>&1 | grep -i 'Content-Length:' | awk '{ num = $2 + 0; print num }') + else + say "Neither curl nor wget is available on this system." + return + fi + + if [ -n "$file_size" ]; then + say "Remote file $zip_uri size is $file_size bytes." + echo "$file_size" + else + say_verbose "Content-Length header was not extracted for $zip_uri." + echo "" + fi +} + +# args: +# zip_path - $1 +# out_path - $2 +# remote_file_size - $3 +extract_dotnet_package() { + eval $invocation + + local zip_path="$1" + local out_path="$2" + local remote_file_size="$3" + + local temp_out_path="$(mktemp -d "$temporary_file_template")" + + local failed=false + tar -xzf "$zip_path" -C "$temp_out_path" > /dev/null || failed=true + + local folders_with_version_regex='^.*/[0-9]+\.[0-9]+[^/]+/' + find "$temp_out_path" -type f | grep -Eo "$folders_with_version_regex" | sort | copy_files_or_dirs_from_list "$temp_out_path" "$out_path" false + find "$temp_out_path" -type f | grep -Ev "$folders_with_version_regex" | copy_files_or_dirs_from_list "$temp_out_path" "$out_path" "$override_non_versioned_files" + + validate_remote_local_file_sizes "$zip_path" "$remote_file_size" + + rm -rf "$temp_out_path" + if [ -z ${keep_zip+x} ]; then + rm -f "$zip_path" && say_verbose "Temporary archive file $zip_path was removed" + fi + + if [ "$failed" = true ]; then + say_err "Extraction failed" + return 1 + fi + return 0 +} + +# args: +# remote_path - $1 +# disable_feed_credential - $2 +get_http_header() +{ + eval $invocation + local remote_path="$1" + local disable_feed_credential="$2" + + local failed=false + local response + if machine_has "curl"; then + get_http_header_curl $remote_path $disable_feed_credential || failed=true + elif machine_has "wget"; then + get_http_header_wget $remote_path $disable_feed_credential || failed=true + else + failed=true + fi + if [ "$failed" = true ]; then + say_verbose "Failed to get HTTP header: '$remote_path'." + return 1 + fi + return 0 +} + +# args: +# remote_path - $1 +# disable_feed_credential - $2 +get_http_header_curl() { + eval $invocation + local remote_path="$1" + local disable_feed_credential="$2" + + remote_path_with_credential="$remote_path" + if [ "$disable_feed_credential" = false ]; then + remote_path_with_credential+="$feed_credential" + fi + + curl_options="-I -sSL --retry 5 --retry-delay 2 --connect-timeout 15 " + curl $curl_options "$remote_path_with_credential" 2>&1 || return 1 + return 0 +} + +# args: +# remote_path - $1 +# disable_feed_credential - $2 +get_http_header_wget() { + eval $invocation + local remote_path="$1" + local disable_feed_credential="$2" + local wget_options="-q -S --spider --tries 5 " + + local wget_options_extra='' + + # Test for options that aren't supported on all wget implementations. + if [[ $(wget -h 2>&1 | grep -E 'waitretry|connect-timeout') ]]; then + wget_options_extra="--waitretry 2 --connect-timeout 15 " + else + say "wget extra options are unavailable for this environment" + fi + + remote_path_with_credential="$remote_path" + if [ "$disable_feed_credential" = false ]; then + remote_path_with_credential+="$feed_credential" + fi + + wget $wget_options $wget_options_extra "$remote_path_with_credential" 2>&1 + + return $? +} + +# args: +# remote_path - $1 +# [out_path] - $2 - stdout if not provided +download() { + eval $invocation + + local remote_path="$1" + local out_path="${2:-}" + + if [[ "$remote_path" != "http"* ]]; then + cp "$remote_path" "$out_path" + return $? + fi + + local failed=false + local attempts=0 + while [ $attempts -lt 3 ]; do + attempts=$((attempts+1)) + failed=false + if machine_has "curl"; then + downloadcurl "$remote_path" "$out_path" || failed=true + elif machine_has "wget"; then + downloadwget "$remote_path" "$out_path" || failed=true + else + say_err "Missing dependency: neither curl nor wget was found." + exit 1 + fi + + if [ "$failed" = false ] || [ $attempts -ge 3 ] || { [ -n "${http_code-}" ] && [ "${http_code}" = "404" ]; }; then + break + fi + + say "Download attempt #$attempts has failed: ${http_code-} ${download_error_msg-}" + say "Attempt #$((attempts+1)) will start in $((attempts*10)) seconds." + sleep $((attempts*10)) + done + + if [ "$failed" = true ]; then + say_verbose "Download failed: $remote_path" + return 1 + fi + return 0 +} + +# Updates global variables $http_code and $download_error_msg +downloadcurl() { + eval $invocation + unset http_code + unset download_error_msg + local remote_path="$1" + local out_path="${2:-}" + # Append feed_credential as late as possible before calling curl to avoid logging feed_credential + # Avoid passing URI with credentials to functions: note, most of them echoing parameters of invocation in verbose output. + local remote_path_with_credential="${remote_path}${feed_credential}" + local curl_options="--retry 20 --retry-delay 2 --connect-timeout 15 -sSL -f --create-dirs " + local curl_exit_code=0; + if [ -z "$out_path" ]; then + curl_output=$(curl $curl_options "$remote_path_with_credential" 2>&1) + curl_exit_code=$? + echo "$curl_output" + else + curl_output=$(curl $curl_options -o "$out_path" "$remote_path_with_credential" 2>&1) + curl_exit_code=$? + fi + + # Regression in curl causes curl with --retry to return a 0 exit code even when it fails to download a file - https://github.com/curl/curl/issues/17554 + if [ $curl_exit_code -eq 0 ] && echo "$curl_output" | grep -q "^curl: ([0-9]*) "; then + curl_exit_code=$(echo "$curl_output" | sed 's/curl: (\([0-9]*\)).*/\1/') + fi + + if [ $curl_exit_code -gt 0 ]; then + download_error_msg="Unable to download $remote_path." + # Check for curl timeout codes + if [[ $curl_exit_code == 7 || $curl_exit_code == 28 ]]; then + download_error_msg+=" Failed to reach the server: connection timeout." + else + local disable_feed_credential=false + local response=$(get_http_header_curl $remote_path $disable_feed_credential) + http_code=$( echo "$response" | awk '/^HTTP/{print $2}' | tail -1 ) + if [[ ! -z $http_code && $http_code != 2* ]]; then + download_error_msg+=" Returned HTTP status code: $http_code." + fi + fi + say_verbose "$download_error_msg" + return 1 + fi + return 0 +} + + +# Updates global variables $http_code and $download_error_msg +downloadwget() { + eval $invocation + unset http_code + unset download_error_msg + local remote_path="$1" + local out_path="${2:-}" + # Append feed_credential as late as possible before calling wget to avoid logging feed_credential + local remote_path_with_credential="${remote_path}${feed_credential}" + local wget_options="--tries 20 " + + local wget_options_extra='' + local wget_result='' + + # Test for options that aren't supported on all wget implementations. + if [[ $(wget -h 2>&1 | grep -E 'waitretry|connect-timeout') ]]; then + wget_options_extra="--waitretry 2 --connect-timeout 15 " + else + say "wget extra options are unavailable for this environment" + fi + + if [ -z "$out_path" ]; then + wget -q $wget_options $wget_options_extra -O - "$remote_path_with_credential" 2>&1 + wget_result=$? + else + wget $wget_options $wget_options_extra -O "$out_path" "$remote_path_with_credential" 2>&1 + wget_result=$? + fi + + if [[ $wget_result != 0 ]]; then + local disable_feed_credential=false + local response=$(get_http_header_wget $remote_path $disable_feed_credential) + http_code=$( echo "$response" | awk '/^ HTTP/{print $2}' | tail -1 ) + download_error_msg="Unable to download $remote_path." + if [[ ! -z $http_code && $http_code != 2* ]]; then + download_error_msg+=" Returned HTTP status code: $http_code." + # wget exit code 4 stands for network-issue + elif [[ $wget_result == 4 ]]; then + download_error_msg+=" Failed to reach the server: connection timeout." + fi + say_verbose "$download_error_msg" + return 1 + fi + + return 0 +} + +get_download_link_from_aka_ms() { + eval $invocation + + #quality is not supported for LTS or STS channel + #STS maps to current + if [[ ! -z "$normalized_quality" && ("$normalized_channel" == "LTS" || "$normalized_channel" == "STS") ]]; then + normalized_quality="" + say_warning "Specifying quality for STS or LTS channel is not supported, the quality will be ignored." + fi + + say_verbose "Retrieving primary payload URL from aka.ms for channel: '$normalized_channel', quality: '$normalized_quality', product: '$normalized_product', os: '$normalized_os', architecture: '$normalized_architecture'." + + #construct aka.ms link + aka_ms_link="https://aka.ms/dotnet" + if [ "$internal" = true ]; then + aka_ms_link="$aka_ms_link/internal" + fi + aka_ms_link="$aka_ms_link/$normalized_channel" + if [[ ! -z "$normalized_quality" ]]; then + aka_ms_link="$aka_ms_link/$normalized_quality" + fi + aka_ms_link="$aka_ms_link/$normalized_product-$normalized_os-$normalized_architecture.tar.gz" + say_verbose "Constructed aka.ms link: '$aka_ms_link'." + + #get HTTP response + #do not pass credentials as a part of the $aka_ms_link and do not apply credentials in the get_http_header function + #otherwise the redirect link would have credentials as well + #it would result in applying credentials twice to the resulting link and thus breaking it, and in echoing credentials to the output as a part of redirect link + disable_feed_credential=true + response="$(get_http_header $aka_ms_link $disable_feed_credential)" + + say_verbose "Received response: $response" + # Get results of all the redirects. + http_codes=$( echo "$response" | awk '$1 ~ /^HTTP/ {print $2}' ) + # Allow intermediate 301 redirects and tolerate proxy-injected 200s + broken_redirects=$( echo "$http_codes" | sed '$d' | grep -vE '^(301|200)$' ) + # The response may end without final code 2xx/4xx/5xx somehow, e.g. network restrictions on www.bing.com causes redirecting to bing.com fails with connection refused. + # In this case it should not exclude the last. + last_http_code=$( echo "$http_codes" | tail -n 1 ) + if ! [[ $last_http_code =~ ^(2|4|5)[0-9][0-9]$ ]]; then + broken_redirects=$( echo "$http_codes" | grep -vE '^(301|200)$' ) + fi + + # All HTTP codes are 301 (Moved Permanently), the redirect link exists. + if [[ -z "$broken_redirects" ]]; then + aka_ms_download_link=$( echo "$response" | awk '$1 ~ /^Location/{print $2}' | tail -1 | tr -d '\r') + + if [[ -z "$aka_ms_download_link" ]]; then + say_verbose "The aka.ms link '$aka_ms_link' is not valid: failed to get redirect location." + return 1 + fi + + say_verbose "The redirect location retrieved: '$aka_ms_download_link'." + return 0 + else + say_verbose "The aka.ms link '$aka_ms_link' is not valid: received HTTP code: $(echo "$broken_redirects" | paste -sd "," -)." + return 1 + fi +} + +get_feeds_to_use() +{ + feeds=( + "https://builds.dotnet.microsoft.com/dotnet" + "https://ci.dot.net/public" + ) + + if [[ -n "$azure_feed" ]]; then + feeds=("$azure_feed") + fi + + if [[ -n "$uncached_feed" ]]; then + feeds=("$uncached_feed") + fi +} + +# THIS FUNCTION MAY EXIT (if the determined version is already installed). +generate_download_links() { + + download_links=() + specific_versions=() + effective_versions=() + link_types=() + + # If generate_akams_links returns false, no fallback to old links. Just terminate. + # This function may also 'exit' (if the determined version is already installed). + generate_akams_links || return + + # Check other feeds only if we haven't been able to find an aka.ms link. + if [[ "${#download_links[@]}" -lt 1 ]]; then + for feed in ${feeds[@]} + do + # generate_regular_links may also 'exit' (if the determined version is already installed). + generate_regular_links $feed || return + done + fi + + if [[ "${#download_links[@]}" -eq 0 ]]; then + say_err "Failed to resolve the exact version number." + return 1 + fi + + say_verbose "Generated ${#download_links[@]} links." + for link_index in ${!download_links[@]} + do + say_verbose "Link $link_index: ${link_types[$link_index]}, ${effective_versions[$link_index]}, ${download_links[$link_index]}" + done +} + +# THIS FUNCTION MAY EXIT (if the determined version is already installed). +generate_akams_links() { + local valid_aka_ms_link=true; + + normalized_version="$(to_lowercase "$version")" + if [[ "$normalized_version" != "latest" ]] && [ -n "$normalized_quality" ]; then + say_err "Quality and Version options are not allowed to be specified simultaneously. See https://learn.microsoft.com/dotnet/core/tools/dotnet-install-script#options for details." + return 1 + fi + + if [[ -n "$json_file" || "$normalized_version" != "latest" ]]; then + # aka.ms links are not needed when exact version is specified via command or json file + return + fi + + get_download_link_from_aka_ms || valid_aka_ms_link=false + + if [[ "$valid_aka_ms_link" == true ]]; then + say_verbose "Retrieved primary payload URL from aka.ms link: '$aka_ms_download_link'." + say_verbose "Downloading using legacy url will not be attempted." + + download_link=$aka_ms_download_link + + #get version from the path + IFS='/' + read -ra pathElems <<< "$download_link" + count=${#pathElems[@]} + specific_version="${pathElems[count-2]}" + unset IFS; + say_verbose "Version: '$specific_version'." + + #Retrieve effective version + effective_version="$(get_specific_product_version "$azure_feed" "$specific_version" "$download_link")" + + # Add link info to arrays + download_links+=($download_link) + specific_versions+=($specific_version) + effective_versions+=($effective_version) + link_types+=("aka.ms") + + # Check if the SDK version is already installed. + if [[ "$dry_run" != true ]] && is_dotnet_package_installed "$install_root" "$asset_relative_path" "$effective_version"; then + say "$asset_name with version '$effective_version' is already installed." + exit 0 + fi + + return 0 + fi + + # if quality is specified - exit with error - there is no fallback approach + if [ ! -z "$normalized_quality" ]; then + say_err "Failed to locate the latest version in the channel '$normalized_channel' with '$normalized_quality' quality for '$normalized_product', os: '$normalized_os', architecture: '$normalized_architecture'." + say_err "Refer to: https://aka.ms/dotnet-os-lifecycle for information on .NET Core support." + return 1 + fi + say_verbose "Falling back to latest.version file approach." +} + +# THIS FUNCTION MAY EXIT (if the determined version is already installed) +# args: +# feed - $1 +generate_regular_links() { + local feed="$1" + local valid_legacy_download_link=true + + specific_version=$(get_specific_version_from_version "$feed" "$channel" "$normalized_architecture" "$version" "$json_file") || specific_version='0' + + if [[ "$specific_version" == '0' ]]; then + say_verbose "Failed to resolve the specific version number using feed '$feed'" + return + fi + + effective_version="$(get_specific_product_version "$feed" "$specific_version")" + say_verbose "specific_version=$specific_version" + + download_link="$(construct_download_link "$feed" "$channel" "$normalized_architecture" "$specific_version" "$normalized_os")" + say_verbose "Constructed primary named payload URL: $download_link" + + # Add link info to arrays + download_links+=($download_link) + specific_versions+=($specific_version) + effective_versions+=($effective_version) + link_types+=("primary") + + legacy_download_link="$(construct_legacy_download_link "$feed" "$channel" "$normalized_architecture" "$specific_version")" || valid_legacy_download_link=false + + if [ "$valid_legacy_download_link" = true ]; then + say_verbose "Constructed legacy named payload URL: $legacy_download_link" + + download_links+=($legacy_download_link) + specific_versions+=($specific_version) + effective_versions+=($effective_version) + link_types+=("legacy") + else + legacy_download_link="" + say_verbose "Could not construct a legacy_download_link; omitting..." + fi + + # Check if the SDK version is already installed. + if [[ "$dry_run" != true ]] && is_dotnet_package_installed "$install_root" "$asset_relative_path" "$effective_version"; then + say "$asset_name with version '$effective_version' is already installed." + exit 0 + fi +} + +print_dry_run() { + + say "Payload URLs:" + + for link_index in "${!download_links[@]}" + do + say "URL #$link_index - ${link_types[$link_index]}: ${download_links[$link_index]}" + done + + resolved_version=${specific_versions[0]} + repeatable_command="./$script_name --version "\""$resolved_version"\"" --install-dir "\""$install_root"\"" --architecture "\""$normalized_architecture"\"" --os "\""$normalized_os"\""" + + if [ ! -z "$normalized_quality" ]; then + repeatable_command+=" --quality "\""$normalized_quality"\""" + fi + + if [[ "$runtime" == "dotnet" ]]; then + repeatable_command+=" --runtime "\""dotnet"\""" + elif [[ "$runtime" == "aspnetcore" ]]; then + repeatable_command+=" --runtime "\""aspnetcore"\""" + fi + + repeatable_command+="$non_dynamic_parameters" + + if [ -n "$feed_credential" ]; then + repeatable_command+=" --feed-credential "\"""\""" + fi + + say "Repeatable invocation: $repeatable_command" +} + +calculate_vars() { + eval $invocation + + script_name=$(basename "$0") + normalized_architecture="$(get_normalized_architecture_from_architecture "$architecture")" + say_verbose "Normalized architecture: '$normalized_architecture'." + normalized_os="$(get_normalized_os "$user_defined_os")" + say_verbose "Normalized OS: '$normalized_os'." + normalized_quality="$(get_normalized_quality "$quality")" + say_verbose "Normalized quality: '$normalized_quality'." + normalized_channel="$(get_normalized_channel "$channel")" + say_verbose "Normalized channel: '$normalized_channel'." + normalized_product="$(get_normalized_product "$runtime")" + say_verbose "Normalized product: '$normalized_product'." + install_root="$(resolve_installation_path "$install_dir")" + say_verbose "InstallRoot: '$install_root'." + + normalized_architecture="$(get_normalized_architecture_for_specific_sdk_version "$version" "$normalized_channel" "$normalized_architecture")" + + if [[ "$runtime" == "dotnet" ]]; then + asset_relative_path="shared/Microsoft.NETCore.App" + asset_name=".NET Core Runtime" + elif [[ "$runtime" == "aspnetcore" ]]; then + asset_relative_path="shared/Microsoft.AspNetCore.App" + asset_name="ASP.NET Core Runtime" + elif [ -z "$runtime" ]; then + asset_relative_path="sdk" + asset_name=".NET Core SDK" + fi + + get_feeds_to_use +} + +install_dotnet() { + eval $invocation + local download_failed=false + local download_completed=false + local remote_file_size=0 + + mkdir -p "$install_root" + zip_path="${zip_path:-$(mktemp "$temporary_file_template")}" + say_verbose "Archive path: $zip_path" + + for link_index in "${!download_links[@]}" + do + download_link="${download_links[$link_index]}" + specific_version="${specific_versions[$link_index]}" + effective_version="${effective_versions[$link_index]}" + link_type="${link_types[$link_index]}" + + say "Attempting to download using $link_type link $download_link" + + # The download function will set variables $http_code and $download_error_msg in case of failure. + download_failed=false + download "$download_link" "$zip_path" 2>&1 || download_failed=true + + if [ "$download_failed" = true ]; then + case ${http_code-} in + 404) + say "The resource at $link_type link '$download_link' is not available." + ;; + *) + say "Failed to download $link_type link '$download_link': ${http_code-} ${download_error_msg-}" + ;; + esac + rm -f "$zip_path" 2>&1 && say_verbose "Temporary archive file $zip_path was removed" + else + download_completed=true + break + fi + done + + if [[ "$download_completed" == false ]]; then + say_err "Could not find \`$asset_name\` with version = $specific_version" + say_err "Refer to: https://aka.ms/dotnet-os-lifecycle for information on .NET Core support" + return 1 + fi + + remote_file_size="$(get_remote_file_size "$download_link")" + + say "Extracting archive from $download_link" + extract_dotnet_package "$zip_path" "$install_root" "$remote_file_size" || return 1 + + # Check if the SDK version is installed; if not, fail the installation. + # if the version contains "RTM" or "servicing"; check if a 'release-type' SDK version is installed. + if [[ $specific_version == *"rtm"* || $specific_version == *"servicing"* ]]; then + IFS='-' + read -ra verArr <<< "$specific_version" + release_version="${verArr[0]}" + unset IFS; + say_verbose "Checking installation: version = $release_version" + if is_dotnet_package_installed "$install_root" "$asset_relative_path" "$release_version"; then + say "Installed version is $effective_version" + return 0 + fi + fi + + # Check if the standard SDK version is installed. + say_verbose "Checking installation: version = $effective_version" + if is_dotnet_package_installed "$install_root" "$asset_relative_path" "$effective_version"; then + say "Installed version is $effective_version" + return 0 + fi + + # Version verification failed. More likely something is wrong either with the downloaded content or with the verification algorithm. + say_err "Failed to verify the version of installed \`$asset_name\`.\nInstallation source: $download_link.\nInstallation location: $install_root.\nReport the bug at https://github.com/dotnet/install-scripts/issues." + say_err "\`$asset_name\` with version = $effective_version failed to install with an error." + return 1 +} + +args=("$@") + +local_version_file_relative_path="/.version" +bin_folder_relative_path="" +temporary_file_template="${TMPDIR:-/tmp}/dotnet.XXXXXXXXX" + +channel="LTS" +version="Latest" +json_file="" +install_dir="" +architecture="" +dry_run=false +no_path=false +azure_feed="" +uncached_feed="" +feed_credential="" +verbose=false +runtime="" +runtime_id="" +quality="" +internal=false +override_non_versioned_files=true +non_dynamic_parameters="" +user_defined_os="" + +while [ $# -ne 0 ] +do + name="$1" + case "$name" in + -c|--channel|-[Cc]hannel) + shift + channel="$1" + ;; + -v|--version|-[Vv]ersion) + shift + version="$1" + ;; + -q|--quality|-[Qq]uality) + shift + quality="$1" + ;; + --internal|-[Ii]nternal) + internal=true + non_dynamic_parameters+=" $name" + ;; + -i|--install-dir|-[Ii]nstall[Dd]ir) + shift + install_dir="$1" + ;; + --arch|--architecture|-[Aa]rch|-[Aa]rchitecture) + shift + architecture="$1" + ;; + --os|-[Oo][SS]) + shift + user_defined_os="$1" + ;; + --shared-runtime|-[Ss]hared[Rr]untime) + say_warning "The --shared-runtime flag is obsolete and may be removed in a future version of this script. The recommended usage is to specify '--runtime dotnet'." + if [ -z "$runtime" ]; then + runtime="dotnet" + fi + ;; + --runtime|-[Rr]untime) + shift + runtime="$1" + if [[ "$runtime" != "dotnet" ]] && [[ "$runtime" != "aspnetcore" ]]; then + say_err "Unsupported value for --runtime: '$1'. Valid values are 'dotnet' and 'aspnetcore'." + if [[ "$runtime" == "windowsdesktop" ]]; then + say_err "WindowsDesktop archives are manufactured for Windows platforms only." + fi + exit 1 + fi + ;; + --dry-run|-[Dd]ry[Rr]un) + dry_run=true + ;; + --no-path|-[Nn]o[Pp]ath) + no_path=true + non_dynamic_parameters+=" $name" + ;; + --verbose|-[Vv]erbose) + verbose=true + non_dynamic_parameters+=" $name" + ;; + --azure-feed|-[Aa]zure[Ff]eed) + shift + azure_feed="$1" + non_dynamic_parameters+=" $name "\""$1"\""" + ;; + --uncached-feed|-[Uu]ncached[Ff]eed) + shift + uncached_feed="$1" + non_dynamic_parameters+=" $name "\""$1"\""" + ;; + --feed-credential|-[Ff]eed[Cc]redential) + shift + feed_credential="$1" + #feed_credential should start with "?", for it to be added to the end of the link. + #adding "?" at the beginning of the feed_credential if needed. + [[ -z "$(echo $feed_credential)" ]] || [[ $feed_credential == \?* ]] || feed_credential="?$feed_credential" + ;; + --runtime-id|-[Rr]untime[Ii]d) + shift + runtime_id="$1" + non_dynamic_parameters+=" $name "\""$1"\""" + say_warning "Use of --runtime-id is obsolete and should be limited to the versions below 2.1. To override architecture, use --architecture option instead. To override OS, use --os option instead." + ;; + --jsonfile|-[Jj][Ss]on[Ff]ile) + shift + json_file="$1" + ;; + --skip-non-versioned-files|-[Ss]kip[Nn]on[Vv]ersioned[Ff]iles) + override_non_versioned_files=false + non_dynamic_parameters+=" $name" + ;; + --keep-zip|-[Kk]eep[Zz]ip) + keep_zip=true + non_dynamic_parameters+=" $name" + ;; + --zip-path|-[Zz]ip[Pp]ath) + shift + zip_path="$1" + ;; + -?|--?|-h|--help|-[Hh]elp) + script_name="dotnet-install.sh" + echo ".NET Tools Installer" + echo "Usage:" + echo " # Install a .NET SDK of a given Quality from a given Channel" + echo " $script_name [-c|--channel ] [-q|--quality ]" + echo " # Install a .NET SDK of a specific public version" + echo " $script_name [-v|--version ]" + echo " $script_name -h|-?|--help" + echo "" + echo "$script_name is a simple command line interface for obtaining dotnet cli." + echo " Note that the intended use of this script is for Continuous Integration (CI) scenarios, where:" + echo " - The SDK needs to be installed without user interaction and without admin rights." + echo " - The SDK installation doesn't need to persist across multiple CI runs." + echo " To set up a development environment or to run apps, use installers rather than this script. Visit https://dotnet.microsoft.com/download to get the installer." + echo "" + echo "Options:" + echo " -c,--channel Download from the channel specified, Defaults to \`$channel\`." + echo " -Channel" + echo " Possible values:" + echo " - STS - the most recent Standard Term Support release" + echo " - LTS - the most recent Long Term Support release" + echo " - 2-part version in a format A.B - represents a specific release" + echo " examples: 2.0; 1.0" + echo " - 3-part version in a format A.B.Cxx - represents a specific SDK release" + echo " examples: 5.0.1xx, 5.0.2xx." + echo " Supported since 5.0 release" + echo " Warning: Value 'Current' is deprecated for the Channel parameter. Use 'STS' instead." + echo " Note: The version parameter overrides the channel parameter when any version other than 'latest' is used." + echo " -v,--version Use specific VERSION, Defaults to \`$version\`." + echo " -Version" + echo " Possible values:" + echo " - latest - the latest build on specific channel" + echo " - 3-part version in a format A.B.C - represents specific version of build" + echo " examples: 2.0.0-preview2-006120; 1.1.0" + echo " -q,--quality Download the latest build of specified quality in the channel." + echo " -Quality" + echo " The possible values are: daily, preview, GA." + echo " Works only in combination with channel. Not applicable for STS and LTS channels and will be ignored if those channels are used." + echo " Supported since 5.0 release." + echo " Note: The version parameter overrides the channel parameter when any version other than 'latest' is used, and therefore overrides the quality." + echo " --internal,-Internal Download internal builds. Requires providing credentials via --feed-credential parameter." + echo " --feed-credential Token to access Azure feed. Used as a query string to append to the Azure feed." + echo " -FeedCredential This parameter typically is not specified." + echo " -i,--install-dir Install under specified location (see Install Location below)" + echo " -InstallDir" + echo " --architecture Architecture of dotnet binaries to be installed, Defaults to \`$architecture\`." + echo " --arch,-Architecture,-Arch" + echo " Possible values: x64, arm, arm64, s390x, ppc64le and loongarch64" + echo " --os Specifies operating system to be used when selecting the installer." + echo " Overrides the OS determination approach used by the script. Supported values: osx, linux, linux-musl, freebsd, rhel.6." + echo " In case any other value is provided, the platform will be determined by the script based on machine configuration." + echo " Not supported for legacy links. Use --runtime-id to specify platform for legacy links." + echo " Refer to: https://aka.ms/dotnet-os-lifecycle for more information." + echo " --runtime Installs a shared runtime only, without the SDK." + echo " -Runtime" + echo " Possible values:" + echo " - dotnet - the Microsoft.NETCore.App shared runtime" + echo " - aspnetcore - the Microsoft.AspNetCore.App shared runtime" + echo " --dry-run,-DryRun Do not perform installation. Display download link." + echo " --no-path, -NoPath Do not set PATH for the current process." + echo " --verbose,-Verbose Display diagnostics information." + echo " --azure-feed,-AzureFeed For internal use only." + echo " Allows using a different storage to download SDK archives from." + echo " --uncached-feed,-UncachedFeed For internal use only." + echo " Allows using a different storage to download SDK archives from." + echo " --skip-non-versioned-files Skips non-versioned files if they already exist, such as the dotnet executable." + echo " -SkipNonVersionedFiles" + echo " --jsonfile Determines the SDK version from a user specified global.json file." + echo " Note: global.json must have a value for 'SDK:Version'" + echo " --keep-zip,-KeepZip If set, downloaded file is kept." + echo " --zip-path, -ZipPath If set, downloaded file is stored at the specified path." + echo " -?,--?,-h,--help,-Help Shows this help message" + echo "" + echo "Install Location:" + echo " Location is chosen in following order:" + echo " - --install-dir option" + echo " - Environmental variable DOTNET_INSTALL_DIR" + echo " - $HOME/.dotnet" + exit 0 + ;; + *) + say_err "Unknown argument \`$name\`" + exit 1 + ;; + esac + + shift +done + +say_verbose "Note that the intended use of this script is for Continuous Integration (CI) scenarios, where:" +say_verbose "- The SDK needs to be installed without user interaction and without admin rights." +say_verbose "- The SDK installation doesn't need to persist across multiple CI runs." +say_verbose "To set up a development environment or to run apps, use installers rather than this script. Visit https://dotnet.microsoft.com/download to get the installer.\n" + +if [ "$internal" = true ] && [ -z "$(echo $feed_credential)" ]; then + message="Provide credentials via --feed-credential parameter." + if [ "$dry_run" = true ]; then + say_warning "$message" + else + say_err "$message" + exit 1 + fi +fi + +check_min_reqs +calculate_vars +# generate_regular_links call below will 'exit' if the determined version is already installed. +generate_download_links + +if [[ "$dry_run" = true ]]; then + print_dry_run + exit 0 +fi + +install_dotnet + +bin_path="$(get_absolute_path "$(combine_paths "$install_root" "$bin_folder_relative_path")")" +if [ "$no_path" = false ]; then + say "Adding to current process PATH: \`$bin_path\`. Note: This change will be visible only when sourcing script." + export PATH="$bin_path":"$PATH" +else + say "Binaries of dotnet can be found in $bin_path" +fi + +say "Note that the script does not resolve dependencies during installation." +say "To check the list of dependencies, go to https://learn.microsoft.com/dotnet/core/install, select your operating system and check the \"Dependencies\" section." +say "Installation finished successfully." diff --git a/global.json b/global.json index bb62055921f..06316314750 100644 --- a/global.json +++ b/global.json @@ -1,6 +1,6 @@ { "sdk": { - "version": "10.0.102", + "version": "10.0.201", "rollForward": "latestMinor" } } diff --git a/src/CookieCrumble/src/CookieCrumble.HotChocolate/Formatters/SnapshotValueFormatters.cs b/src/CookieCrumble/src/CookieCrumble.HotChocolate/Formatters/SnapshotValueFormatters.cs index d49e5c17482..cdea4bbc282 100644 --- a/src/CookieCrumble/src/CookieCrumble.HotChocolate/Formatters/SnapshotValueFormatters.cs +++ b/src/CookieCrumble/src/CookieCrumble.HotChocolate/Formatters/SnapshotValueFormatters.cs @@ -10,9 +10,15 @@ public static class SnapshotValueFormatters public static ISnapshotValueFormatter ExecutionResult { get; } = new ExecutionResultSnapshotValueFormatter(); + public static ISnapshotValueFormatter ExecutionResultStable { get; } = + new StableExecutionResultSnapshotValueFormatter(); + public static ISnapshotValueFormatter GraphQLHttp { get; } = new GraphQLHttpResponseFormatter(); + public static ISnapshotValueFormatter GraphQLHttpStable { get; } = + new StableGraphQLHttpResponseFormatter(); + public static ISnapshotValueFormatter OperationResult { get; } = new OperationResultSnapshotValueFormatter(); diff --git a/src/CookieCrumble/src/CookieCrumble.HotChocolate/Formatters/StableExecutionResultSnapshotValueFormatter.cs b/src/CookieCrumble/src/CookieCrumble.HotChocolate/Formatters/StableExecutionResultSnapshotValueFormatter.cs new file mode 100644 index 00000000000..7fa4c4797e4 --- /dev/null +++ b/src/CookieCrumble/src/CookieCrumble.HotChocolate/Formatters/StableExecutionResultSnapshotValueFormatter.cs @@ -0,0 +1,94 @@ +using System.Buffers; +using System.Text.Json; +using CookieCrumble.Formatters; +using HotChocolate; +using HotChocolate.Execution; +using static CookieCrumble.HotChocolate.Formatters.StableSnapshotHelpers; + +namespace CookieCrumble.HotChocolate.Formatters; + +/// +/// Produces a deterministic snapshot representation for incremental execution results. +/// The formatter normalizes payload timing/chunking by aggregating pending/incremental/completed +/// entries and writing a stable summary plus the merged final result. +/// +internal sealed class StableExecutionResultSnapshotValueFormatter + : SnapshotValueFormatter +{ + public StableExecutionResultSnapshotValueFormatter() + : base("json") + { + } + + protected override void Format(IBufferWriter snapshot, IExecutionResult value) + { + if (value.Kind is ExecutionResultKind.SingleResult) + { + using var resultDoc = JsonDocument.Parse(value.ToJson()); + using var writer = new Utf8JsonWriter(snapshot, IndentedWriterOptions); + WriteCanonicalResponseObject(writer, resultDoc.RootElement); + writer.Flush(); + snapshot.AppendLine(); + return; + } + + FormatStreamAsync(snapshot, (IResponseStream)value).GetAwaiter().GetResult(); + } + + private static async Task FormatStreamAsync( + IBufferWriter snapshot, + IResponseStream stream) + { + var docs = new List(); + JsonResultPatcher? patcher = null; + var acc = new StreamAccumulator(); + + try + { + await foreach (var queryResult in stream.ReadResultsAsync().ConfigureAwait(false)) + { + var doc = JsonDocument.Parse(queryResult.ToJson()); + docs.Add(doc); + + var root = doc.RootElement; + acc.AddPayload(root); + + if (patcher is null) + { + patcher = new JsonResultPatcher(); + patcher.SetResponse(doc); + } + else + { + patcher.ApplyPatch(doc); + } + } + + await using var writer = new Utf8JsonWriter(snapshot, IndentedWriterOptions); + + if (patcher is null) + { + writer.WriteStartObject(); + writer.WriteEndObject(); + writer.Flush(); + snapshot.AppendLine(); + return; + } + + var mergedBuffer = new ArrayBufferWriter(); + patcher.WriteResponse(mergedBuffer); + using var mergedDoc = JsonDocument.Parse(mergedBuffer.WrittenMemory); + + WriteStableStreamSnapshot(writer, acc, mergedDoc.RootElement); + writer.Flush(); + snapshot.AppendLine(); + } + finally + { + foreach (var doc in docs) + { + doc.Dispose(); + } + } + } +} diff --git a/src/CookieCrumble/src/CookieCrumble.HotChocolate/Formatters/StableGraphQLHttpResponseFormatter.cs b/src/CookieCrumble/src/CookieCrumble.HotChocolate/Formatters/StableGraphQLHttpResponseFormatter.cs new file mode 100644 index 00000000000..f3049720ddc --- /dev/null +++ b/src/CookieCrumble/src/CookieCrumble.HotChocolate/Formatters/StableGraphQLHttpResponseFormatter.cs @@ -0,0 +1,197 @@ +using System.Buffers; +using System.Text.Json; +using CookieCrumble.Formatters; +using HotChocolate.Transport.Http; +using Microsoft.AspNetCore.WebUtilities; +using static CookieCrumble.HotChocolate.Formatters.StableSnapshotHelpers; + +namespace CookieCrumble.HotChocolate.Formatters; + +/// +/// Produces a deterministic snapshot representation for incremental HTTP responses. +/// Reads multipart HTTP response bodies and normalizes payload timing/chunking +/// by aggregating pending/incremental/completed entries and writing a stable summary +/// plus the merged final result. +/// +internal sealed class StableGraphQLHttpResponseFormatter + : SnapshotValueFormatter +{ + public StableGraphQLHttpResponseFormatter() + : base("json") + { + } + + protected override void Format(IBufferWriter snapshot, GraphQLHttpResponse value) + { + var contentType = value.ContentHeaders.ContentType; + + if (contentType is null) + { + return; + } + + if (string.Equals(contentType.MediaType, "multipart/mixed", StringComparison.Ordinal)) + { + var boundary = contentType.Parameters.First( + t => string.Equals(t.Name, "boundary", StringComparison.Ordinal)); + FormatMultipartStreamAsync( + snapshot, + boundary.Value!.Trim('"'), + value.HttpResponseMessage.Content.ReadAsStream()) + .GetAwaiter().GetResult(); + return; + } + + if (IsJsonLinesMediaType(contentType.MediaType)) + { + FormatJsonLinesStreamAsync(snapshot, value.HttpResponseMessage.Content.ReadAsStream()) + .GetAwaiter().GetResult(); + return; + } + + // Single response, format as canonical JSON. + FormatSingleResponseAsync(snapshot, value.HttpResponseMessage.Content.ReadAsStream()) + .GetAwaiter().GetResult(); + } + + private static bool IsJsonLinesMediaType(string? mediaType) + => string.Equals(mediaType, "application/graphql-response+jsonl", StringComparison.Ordinal) + || string.Equals(mediaType, "application/jsonl", StringComparison.Ordinal); + + private static async Task FormatSingleResponseAsync( + IBufferWriter snapshot, + Stream body) + { + using var doc = await JsonDocument.ParseAsync(body); + await using var writer = new Utf8JsonWriter(snapshot, IndentedWriterOptions); + WriteCanonicalResponseObject(writer, doc.RootElement); + writer.Flush(); + snapshot.AppendLine(); + } + + private static async Task FormatJsonLinesStreamAsync( + IBufferWriter snapshot, + Stream body) + { + var docs = new List(); + JsonResultPatcher? patcher = null; + var acc = new StreamAccumulator(); + + try + { + using var reader = new StreamReader(body); + + while (await reader.ReadLineAsync().ConfigureAwait(false) is { } line) + { + if (string.IsNullOrWhiteSpace(line)) + { + continue; + } + + var doc = JsonDocument.Parse(line); + docs.Add(doc); + + acc.AddPayload(doc.RootElement); + + if (patcher is null) + { + patcher = new JsonResultPatcher(); + patcher.SetResponse(doc); + } + else + { + patcher.ApplyPatch(doc); + } + } + + await using var writer = new Utf8JsonWriter(snapshot, IndentedWriterOptions); + + if (patcher is null) + { + writer.WriteStartObject(); + writer.WriteEndObject(); + writer.Flush(); + snapshot.AppendLine(); + return; + } + + var mergedBuffer = new ArrayBufferWriter(); + patcher.WriteResponse(mergedBuffer); + using var mergedDoc = JsonDocument.Parse(mergedBuffer.WrittenMemory); + + WriteStableStreamSnapshot(writer, acc, mergedDoc.RootElement); + writer.Flush(); + snapshot.AppendLine(); + } + finally + { + foreach (var doc in docs) + { + doc.Dispose(); + } + } + } + + private static async Task FormatMultipartStreamAsync( + IBufferWriter snapshot, + string boundary, + Stream body) + { + var reader = new MultipartReader(boundary, body); + var docs = new List(); + JsonResultPatcher? patcher = null; + var acc = new StreamAccumulator(); + + try + { + var section = await reader.ReadNextSectionAsync().ConfigureAwait(false); + + while (section is not null) + { + await using var sectionBody = section.Body; + var doc = await JsonDocument.ParseAsync(sectionBody); + docs.Add(doc); + + acc.AddPayload(doc.RootElement); + + if (patcher is null) + { + patcher = new JsonResultPatcher(); + patcher.SetResponse(doc); + } + else + { + patcher.ApplyPatch(doc); + } + + section = await reader.ReadNextSectionAsync().ConfigureAwait(false); + } + + await using var writer = new Utf8JsonWriter(snapshot, IndentedWriterOptions); + + if (patcher is null) + { + writer.WriteStartObject(); + writer.WriteEndObject(); + writer.Flush(); + snapshot.AppendLine(); + return; + } + + var mergedBuffer = new ArrayBufferWriter(); + patcher.WriteResponse(mergedBuffer); + using var mergedDoc = JsonDocument.Parse(mergedBuffer.WrittenMemory); + + WriteStableStreamSnapshot(writer, acc, mergedDoc.RootElement); + writer.Flush(); + snapshot.AppendLine(); + } + finally + { + foreach (var doc in docs) + { + doc.Dispose(); + } + } + } +} diff --git a/src/CookieCrumble/src/CookieCrumble.HotChocolate/Formatters/StableSnapshotHelpers.cs b/src/CookieCrumble/src/CookieCrumble.HotChocolate/Formatters/StableSnapshotHelpers.cs new file mode 100644 index 00000000000..bbc705507f7 --- /dev/null +++ b/src/CookieCrumble/src/CookieCrumble.HotChocolate/Formatters/StableSnapshotHelpers.cs @@ -0,0 +1,661 @@ +using System.Buffers; +using System.Globalization; +using System.Text; +using System.Text.Encodings.Web; +using System.Text.Json; + +namespace CookieCrumble.HotChocolate.Formatters; + +/// +/// Shared helpers for stable (deterministic) snapshot formatters. +/// Provides canonical JSON writing, stream accumulation, and sort-key logic +/// used by both +/// and . +/// +internal static class StableSnapshotHelpers +{ + public static readonly JsonWriterOptions IndentedWriterOptions = new() + { + Indented = true, + SkipValidation = true, + Encoder = JavaScriptEncoder.UnsafeRelaxedJsonEscaping + }; + + public static readonly JsonWriterOptions CompactWriterOptions = new() + { + Indented = false, + SkipValidation = true, + Encoder = JavaScriptEncoder.UnsafeRelaxedJsonEscaping + }; + + internal static bool TryReadId(JsonElement element, out string id) + { + if (!element.TryGetProperty("id", out var idElement)) + { + id = string.Empty; + return false; + } + + switch (idElement.ValueKind) + { + case JsonValueKind.String: + id = idElement.GetString() ?? string.Empty; + return !string.IsNullOrEmpty(id); + + case JsonValueKind.Number: + if (idElement.TryGetInt64(out var int64Id)) + { + id = int64Id.ToString(CultureInfo.InvariantCulture); + return true; + } + + id = idElement.GetRawText(); + return !string.IsNullOrEmpty(id); + + default: + id = idElement.GetRawText(); + return !string.IsNullOrEmpty(id); + } + } + + public static void WriteStableStreamSnapshot( + Utf8JsonWriter writer, + StreamAccumulator acc, + JsonElement mergedResult) + { + writer.WriteStartObject(); + writer.WriteString("kind", "stable-stream"); + writer.WriteNumber("payloadCount", acc.PayloadCount); + + if (acc.InitialPayload is { } initial) + { + writer.WritePropertyName("initial"); + WriteCanonicalResponseObject(writer, initial); + } + + WritePending(writer, acc.PendingById); + WriteIncremental(writer, acc.IncrementalEntries, acc.PendingById); + WriteCompleted(writer, acc.CompletedEntries); + WriteRootErrors(writer, acc.RootErrors); + + WriteDiagnostics(writer, acc); + + writer.WritePropertyName("final"); + WriteCanonicalResponseObject(writer, mergedResult); + + writer.WriteEndObject(); + } + + public static void WritePending( + Utf8JsonWriter writer, + Dictionary pendingById) + { + if (pendingById.Count == 0) + { + return; + } + + var pending = pendingById.Values.ToList(); + pending.Sort(static (x, y) => CompareIds(x.Id, y.Id)); + + writer.WritePropertyName("pending"); + writer.WriteStartArray(); + + foreach (var entry in pending) + { + writer.WriteStartObject(); + writer.WriteString("id", entry.Id); + writer.WritePropertyName("path"); + WriteCanonicalJson(writer, entry.Path); + + if (!string.IsNullOrEmpty(entry.Label)) + { + writer.WriteString("label", entry.Label); + } + + writer.WriteEndObject(); + } + + writer.WriteEndArray(); + } + + public static void WriteIncremental( + Utf8JsonWriter writer, + List incrementalEntries, + Dictionary pendingById) + { + if (incrementalEntries.Count == 0) + { + return; + } + + var incremental = incrementalEntries.ToList(); + incremental.Sort( + (x, y) => + { + var c = CompareIds(x.Id, y.Id); + if (c != 0) + { + return c; + } + + c = string.CompareOrdinal(GetPathSortKey(x, pendingById), GetPathSortKey(y, pendingById)); + if (c != 0) + { + return c; + } + + c = string.CompareOrdinal(GetSubPathSortKey(x), GetSubPathSortKey(y)); + if (c != 0) + { + return c; + } + + c = string.CompareOrdinal(GetPayloadKindSortKey(x), GetPayloadKindSortKey(y)); + if (c != 0) + { + return c; + } + + c = string.CompareOrdinal(GetPayloadValueSortKey(x), GetPayloadValueSortKey(y)); + if (c != 0) + { + return c; + } + + return string.CompareOrdinal(GetErrorsSortKey(x.Errors), GetErrorsSortKey(y.Errors)); + }); + + writer.WritePropertyName("incremental"); + writer.WriteStartArray(); + + foreach (var entry in incremental) + { + writer.WriteStartObject(); + writer.WriteString("id", entry.Id); + + if (pendingById.TryGetValue(entry.Id, out var pending)) + { + writer.WritePropertyName("path"); + WriteCanonicalJson(writer, pending.Path); + } + + if (entry.SubPath is { } subPath) + { + writer.WritePropertyName("subPath"); + WriteCanonicalJson(writer, subPath); + } + + if (entry.Data is { } data) + { + writer.WritePropertyName("data"); + WriteCanonicalJson(writer, data); + } + + if (entry.Items is { } items) + { + writer.WritePropertyName("items"); + WriteCanonicalJson(writer, items); + } + + if (entry.Errors is { } errors) + { + writer.WritePropertyName("errors"); + WriteCanonicalJson(writer, errors); + } + + writer.WriteEndObject(); + } + + writer.WriteEndArray(); + } + + public static void WriteCompleted( + Utf8JsonWriter writer, + List completedEntries) + { + if (completedEntries.Count == 0) + { + return; + } + + var completed = completedEntries.ToList(); + completed.Sort( + (x, y) => + { + var c = CompareIds(x.Id, y.Id); + if (c != 0) + { + return c; + } + + return string.CompareOrdinal(GetErrorsSortKey(x.Errors), GetErrorsSortKey(y.Errors)); + }); + + writer.WritePropertyName("completed"); + writer.WriteStartArray(); + + foreach (var entry in completed) + { + writer.WriteStartObject(); + writer.WriteString("id", entry.Id); + + if (entry.Errors is { } errors) + { + writer.WritePropertyName("errors"); + WriteCanonicalJson(writer, errors); + } + + writer.WriteEndObject(); + } + + writer.WriteEndArray(); + } + + public static void WriteDiagnostics( + Utf8JsonWriter writer, + StreamAccumulator acc) + { + var pendingIds = new HashSet(acc.PendingById.Keys); + var completedIds = new HashSet(acc.CompletedEntries.Select(c => c.Id)); + + var neverCompleted = pendingIds.Except(completedIds).Order().ToList(); + var completedWithoutPending = completedIds.Except(pendingIds).Order().ToList(); + + if (neverCompleted.Count == 0 && completedWithoutPending.Count == 0) + { + return; + } + + writer.WritePropertyName("diagnostics"); + writer.WriteStartObject(); + + if (neverCompleted.Count > 0) + { + writer.WritePropertyName("pendingNeverCompleted"); + writer.WriteStartArray(); + foreach (var id in neverCompleted) + { + writer.WriteStringValue(id); + } + writer.WriteEndArray(); + } + + if (completedWithoutPending.Count > 0) + { + writer.WritePropertyName("completedWithoutPending"); + writer.WriteStartArray(); + foreach (var id in completedWithoutPending) + { + writer.WriteStringValue(id); + } + writer.WriteEndArray(); + } + + writer.WriteEndObject(); + } + + public static void WriteRootErrors( + Utf8JsonWriter writer, + List rootErrors) + { + if (rootErrors.Count == 0) + { + return; + } + + var errors = rootErrors.ToList(); + errors.Sort( + static (x, y) => + string.CompareOrdinal( + BuildCanonicalJsonSortKey(x), + BuildCanonicalJsonSortKey(y))); + + writer.WritePropertyName("rootErrors"); + writer.WriteStartArray(); + + foreach (var error in errors) + { + WriteCanonicalJson(writer, error); + } + + writer.WriteEndArray(); + } + + public static void WriteCanonicalJson(Utf8JsonWriter writer, JsonElement element) + { + switch (element.ValueKind) + { + case JsonValueKind.Object: + var properties = element.EnumerateObject().ToList(); + properties.Sort(static (x, y) => string.CompareOrdinal(x.Name, y.Name)); + + writer.WriteStartObject(); + + foreach (var property in properties) + { + writer.WritePropertyName(property.Name); + WriteCanonicalJson(writer, property.Value); + } + + writer.WriteEndObject(); + break; + + case JsonValueKind.Array: + writer.WriteStartArray(); + + foreach (var item in element.EnumerateArray()) + { + WriteCanonicalJson(writer, item); + } + + writer.WriteEndArray(); + break; + + case JsonValueKind.String: + writer.WriteStringValue(element.GetString()); + break; + + case JsonValueKind.Number: + case JsonValueKind.True: + case JsonValueKind.False: + case JsonValueKind.Null: + element.WriteTo(writer); + break; + + default: + writer.WriteNullValue(); + break; + } + } + + /// + /// Writes a response-shape object (initial or final merged result) with a + /// canonical property order. Strips incremental-delivery protocol fields + /// (captured separately) and the fusion extension (captured + /// separately by the test harness as the rendered operation plan). The + /// extensions object is omitted entirely when it contains nothing + /// beyond fusion; otherwise the non-fusion extensions are kept. + /// + public static void WriteCanonicalResponseObject( + Utf8JsonWriter writer, + JsonElement element) + { + if (element.ValueKind is not JsonValueKind.Object) + { + WriteCanonicalJson(writer, element); + return; + } + + var properties = element.EnumerateObject().ToList(); + properties.Sort(static (x, y) => string.CompareOrdinal(x.Name, y.Name)); + + writer.WriteStartObject(); + + foreach (var property in properties) + { + if (IsStreamField(property.Name)) + { + continue; + } + + if (string.Equals(property.Name, "extensions", StringComparison.Ordinal)) + { + WriteExtensionsWithoutFusion(writer, property.Value); + continue; + } + + writer.WritePropertyName(property.Name); + WriteCanonicalJson(writer, property.Value); + } + + writer.WriteEndObject(); + } + + private static void WriteExtensionsWithoutFusion( + Utf8JsonWriter writer, + JsonElement extensions) + { + if (extensions.ValueKind is not JsonValueKind.Object) + { + writer.WritePropertyName("extensions"); + WriteCanonicalJson(writer, extensions); + return; + } + + var remaining = new List(); + + foreach (var extension in extensions.EnumerateObject()) + { + if (string.Equals(extension.Name, "fusion", StringComparison.Ordinal)) + { + continue; + } + + remaining.Add(extension); + } + + if (remaining.Count == 0) + { + return; + } + + remaining.Sort(static (x, y) => string.CompareOrdinal(x.Name, y.Name)); + + writer.WritePropertyName("extensions"); + writer.WriteStartObject(); + + foreach (var extension in remaining) + { + writer.WritePropertyName(extension.Name); + WriteCanonicalJson(writer, extension.Value); + } + + writer.WriteEndObject(); + } + + public static string BuildCanonicalJsonSortKey(JsonElement element) + { + var buffer = new ArrayBufferWriter(); + using var writer = new Utf8JsonWriter(buffer, CompactWriterOptions); + WriteCanonicalJson(writer, element); + writer.Flush(); + return Encoding.UTF8.GetString(buffer.WrittenSpan); + } + + public static int CompareIds(string x, string y) + { + var xIsNumeric = int.TryParse(x, NumberStyles.Integer, CultureInfo.InvariantCulture, out var xId); + var yIsNumeric = int.TryParse(y, NumberStyles.Integer, CultureInfo.InvariantCulture, out var yId); + + if (xIsNumeric && yIsNumeric) + { + return xId.CompareTo(yId); + } + + if (xIsNumeric) + { + return -1; + } + + if (yIsNumeric) + { + return 1; + } + + return string.CompareOrdinal(x, y); + } + + public static bool IsStreamField(string fieldName) + => fieldName is "hasNext" or "pending" or "incremental" or "completed"; + + private static string GetPathSortKey( + IncrementalEntry entry, + Dictionary pendingById) + => pendingById.TryGetValue(entry.Id, out var pending) + ? BuildCanonicalJsonSortKey(pending.Path) + : string.Empty; + + private static string GetSubPathSortKey(IncrementalEntry entry) + => entry.SubPath is { } subPath + ? BuildCanonicalJsonSortKey(subPath) + : string.Empty; + + private static string GetPayloadKindSortKey(IncrementalEntry entry) + { + if (entry.Data is not null) + { + return "data"; + } + + if (entry.Items is not null) + { + return "items"; + } + + return string.Empty; + } + + private static string GetPayloadValueSortKey(IncrementalEntry entry) + { + if (entry.Data is { } data) + { + return BuildCanonicalJsonSortKey(data); + } + + if (entry.Items is { } items) + { + return BuildCanonicalJsonSortKey(items); + } + + return string.Empty; + } + + private static string GetErrorsSortKey(JsonElement? errors) + => errors is { } e + ? BuildCanonicalJsonSortKey(e) + : string.Empty; + + internal sealed class StreamAccumulator + { + public int PayloadCount { get; private set; } + + public JsonElement? InitialPayload { get; private set; } + + public Dictionary PendingById { get; } = []; + + public List IncrementalEntries { get; } = []; + + public List CompletedEntries { get; } = []; + + public List RootErrors { get; } = []; + + public void AddPayload(JsonElement root) + { + PayloadCount++; + + if (InitialPayload is null) + { + InitialPayload = root.Clone(); + } + + if (root.TryGetProperty("pending", out var pending)) + { + foreach (var entry in pending.EnumerateArray()) + { + if (!TryReadId(entry, out var id)) + { + continue; + } + + var label = entry.TryGetProperty("label", out var labelElement) + && labelElement.ValueKind is JsonValueKind.String + ? labelElement.GetString() + : null; + + if (!entry.TryGetProperty("path", out var path)) + { + continue; + } + + PendingById[id] = new PendingEntry(id, path.Clone(), label); + } + } + + if (root.TryGetProperty("incremental", out var incremental)) + { + foreach (var entry in incremental.EnumerateArray()) + { + if (!TryReadId(entry, out var id)) + { + continue; + } + + JsonElement? data = null; + JsonElement? items = null; + JsonElement? subPath = null; + JsonElement? errors = null; + + if (entry.TryGetProperty("data", out var dataElement)) + { + data = dataElement.Clone(); + } + + if (entry.TryGetProperty("items", out var itemsElement)) + { + items = itemsElement.Clone(); + } + + if (entry.TryGetProperty("subPath", out var subPathElement)) + { + subPath = subPathElement.Clone(); + } + + if (entry.TryGetProperty("errors", out var errorsElement)) + { + errors = errorsElement.Clone(); + } + + IncrementalEntries.Add(new IncrementalEntry(id, subPath, data, items, errors)); + } + } + + if (root.TryGetProperty("completed", out var completed)) + { + foreach (var entry in completed.EnumerateArray()) + { + if (!TryReadId(entry, out var id)) + { + continue; + } + + var errors = entry.TryGetProperty("errors", out var errorsElement) + ? errorsElement.Clone() + : (JsonElement?)null; + + CompletedEntries.Add(new CompletedEntry(id, errors)); + } + } + + if (root.TryGetProperty("errors", out var rootErrors) + && rootErrors.ValueKind is JsonValueKind.Array) + { + foreach (var error in rootErrors.EnumerateArray()) + { + RootErrors.Add(error.Clone()); + } + } + } + } + + internal sealed record PendingEntry(string Id, JsonElement Path, string? Label); + + internal sealed record IncrementalEntry( + string Id, + JsonElement? SubPath, + JsonElement? Data, + JsonElement? Items, + JsonElement? Errors); + + internal sealed record CompletedEntry(string Id, JsonElement? Errors); +} diff --git a/src/HotChocolate/Core/src/Execution.Abstractions/HotChocolate.Execution.Abstractions.csproj b/src/HotChocolate/Core/src/Execution.Abstractions/HotChocolate.Execution.Abstractions.csproj index 78c3a8dee0b..6b706af7cbe 100644 --- a/src/HotChocolate/Core/src/Execution.Abstractions/HotChocolate.Execution.Abstractions.csproj +++ b/src/HotChocolate/Core/src/Execution.Abstractions/HotChocolate.Execution.Abstractions.csproj @@ -17,6 +17,7 @@ + diff --git a/src/HotChocolate/Core/src/Types/Text/Json/ResultDocument.Text.cs b/src/HotChocolate/Core/src/Types/Text/Json/ResultDocument.Text.cs index 510e4dc3d01..b5b0f4e1223 100644 --- a/src/HotChocolate/Core/src/Types/Text/Json/ResultDocument.Text.cs +++ b/src/HotChocolate/Core/src/Types/Text/Json/ResultDocument.Text.cs @@ -8,7 +8,7 @@ public sealed partial class ResultDocument { internal string? GetString(Cursor cursor, ElementTokenType expectedType) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); var tokenType = row.TokenType; @@ -52,7 +52,7 @@ internal string GetNameOfPropertyValue(Cursor valueCursor) internal ReadOnlySpan GetPropertyNameRaw(Cursor valueCursor) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); // The property name is stored one row before the value var nameCursor = valueCursor + (-1); @@ -70,7 +70,7 @@ internal string GetRawValueAsString(Cursor cursor) internal ReadOnlySpan GetRawValue(Cursor cursor, bool includeQuotes) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); @@ -101,7 +101,7 @@ internal string GetPropertyRawValueAsString(Cursor valueCursor) private ReadOnlySpan GetPropertyRawValue(Cursor valueCursor) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); // The property name is stored one row before the value Debug.Assert(_metaDb.GetElementTokenType(valueCursor - 1) == ElementTokenType.PropertyName); @@ -121,7 +121,7 @@ private ReadOnlySpan GetPropertyRawValue(Cursor valueCursor) internal bool TextEquals(Cursor cursor, ReadOnlySpan otherText, bool isPropertyName) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); byte[]? otherUtf8TextArray = null; @@ -161,7 +161,7 @@ OperationStatus.DestinationTooSmall or internal bool TextEquals(Cursor cursor, ReadOnlySpan otherUtf8Text, bool isPropertyName, bool shouldUnescape) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var matchCursor = isPropertyName ? cursor + (-1) : cursor; var row = _metaDb.Get(matchCursor); diff --git a/src/HotChocolate/Core/src/Types/Text/Json/ResultDocument.TryGetProperty.cs b/src/HotChocolate/Core/src/Types/Text/Json/ResultDocument.TryGetProperty.cs index 01b41ac815e..535dbb5f8eb 100644 --- a/src/HotChocolate/Core/src/Types/Text/Json/ResultDocument.TryGetProperty.cs +++ b/src/HotChocolate/Core/src/Types/Text/Json/ResultDocument.TryGetProperty.cs @@ -10,7 +10,7 @@ internal bool TryGetNamedPropertyValue( string propertyName, out ResultElement value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); (startCursor, var tokenType) = _metaDb.GetStartCursor(startCursor); CheckExpectedType(ElementTokenType.StartObject, tokenType); @@ -118,7 +118,7 @@ internal bool TryGetNamedPropertyValue( ReadOnlySpan propertyName, out ResultElement value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); (startCursor, var tokenType) = _metaDb.GetStartCursor(startCursor); CheckExpectedType(ElementTokenType.StartObject, tokenType); @@ -238,14 +238,14 @@ private bool TryGetNamedPropertyValue( internal Cursor GetStartCursor(Cursor cursor) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); (cursor, _) = _metaDb.GetStartCursor(cursor); return cursor; } internal Cursor GetEndCursor(Cursor cursor) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); return cursor + _metaDb.GetNumberOfRows(cursor); } } diff --git a/src/HotChocolate/Core/src/Types/Text/Json/ResultDocument.TryGetValue.cs b/src/HotChocolate/Core/src/Types/Text/Json/ResultDocument.TryGetValue.cs index 2794eb02500..704468199c5 100644 --- a/src/HotChocolate/Core/src/Types/Text/Json/ResultDocument.TryGetValue.cs +++ b/src/HotChocolate/Core/src/Types/Text/Json/ResultDocument.TryGetValue.cs @@ -6,7 +6,7 @@ public sealed partial class ResultDocument { internal bool TryGetValue(Cursor cursor, out sbyte value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); CheckExpectedType(ElementTokenType.Number, row.TokenType); @@ -26,7 +26,7 @@ internal bool TryGetValue(Cursor cursor, out sbyte value) internal bool TryGetValue(Cursor cursor, out byte value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); CheckExpectedType(ElementTokenType.Number, row.TokenType); @@ -46,7 +46,7 @@ internal bool TryGetValue(Cursor cursor, out byte value) internal bool TryGetValue(Cursor cursor, out short value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); CheckExpectedType(ElementTokenType.Number, row.TokenType); @@ -66,7 +66,7 @@ internal bool TryGetValue(Cursor cursor, out short value) internal bool TryGetValue(Cursor cursor, out ushort value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); CheckExpectedType(ElementTokenType.Number, row.TokenType); @@ -86,7 +86,7 @@ internal bool TryGetValue(Cursor cursor, out ushort value) internal bool TryGetValue(Cursor cursor, out int value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); CheckExpectedType(ElementTokenType.Number, row.TokenType); @@ -106,7 +106,7 @@ internal bool TryGetValue(Cursor cursor, out int value) internal bool TryGetValue(Cursor cursor, out uint value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); CheckExpectedType(ElementTokenType.Number, row.TokenType); @@ -126,7 +126,7 @@ internal bool TryGetValue(Cursor cursor, out uint value) internal bool TryGetValue(Cursor cursor, out long value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); CheckExpectedType(ElementTokenType.Number, row.TokenType); @@ -146,7 +146,7 @@ internal bool TryGetValue(Cursor cursor, out long value) internal bool TryGetValue(Cursor cursor, out ulong value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); CheckExpectedType(ElementTokenType.Number, row.TokenType); @@ -166,7 +166,7 @@ internal bool TryGetValue(Cursor cursor, out ulong value) internal bool TryGetValue(Cursor cursor, out double value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); CheckExpectedType(ElementTokenType.Number, row.TokenType); @@ -186,7 +186,7 @@ internal bool TryGetValue(Cursor cursor, out double value) internal bool TryGetValue(Cursor cursor, out float value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); CheckExpectedType(ElementTokenType.Number, row.TokenType); @@ -206,7 +206,7 @@ internal bool TryGetValue(Cursor cursor, out float value) internal bool TryGetValue(Cursor cursor, out decimal value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); CheckExpectedType(ElementTokenType.Number, row.TokenType); diff --git a/src/HotChocolate/Core/src/Types/Text/Json/ResultDocument.cs b/src/HotChocolate/Core/src/Types/Text/Json/ResultDocument.cs index e3c1135b637..173988b8e67 100644 --- a/src/HotChocolate/Core/src/Types/Text/Json/ResultDocument.cs +++ b/src/HotChocolate/Core/src/Types/Text/Json/ResultDocument.cs @@ -25,7 +25,7 @@ public sealed partial class ResultDocument : IDisposable #else private readonly object _dataChunkLock = new(); #endif - private bool _disposed; + private int _disposed; public ResultDocument(Operation operation, ulong includeFlags) { @@ -105,7 +105,7 @@ internal Operation GetOperation() internal ResultElement GetArrayIndexElement(Cursor current, int arrayIndex) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var (start, tokenType) = _metaDb.GetStartCursor(current); @@ -124,7 +124,7 @@ internal ResultElement GetArrayIndexElement(Cursor current, int arrayIndex) internal int GetArrayLength(Cursor current) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); (current, var tokenType) = _metaDb.GetStartCursor(current); @@ -135,7 +135,7 @@ internal int GetArrayLength(Cursor current) internal int GetPropertyCount(Cursor current) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); (current, var tokenType) = _metaDb.GetStartCursor(current); @@ -245,7 +245,7 @@ internal ResultElement GetParent(Cursor current) internal bool IsInvalidated(Cursor current) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var tokenType = _metaDb.GetElementTokenType(current, resolveReferences: false); @@ -272,7 +272,7 @@ internal bool IsInvalidated(Cursor current) internal bool IsNullOrInvalidated(Cursor current) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var tokenType = _metaDb.GetElementTokenType(current); @@ -304,7 +304,7 @@ internal bool IsNullOrInvalidated(Cursor current) internal bool IsInternalProperty(Cursor current) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); // The flag sits on the property row (one before value) var propertyCursor = current.AddRows(-1); @@ -314,7 +314,7 @@ internal bool IsInternalProperty(Cursor current) internal void Invalidate(Cursor current) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var tokenType = _metaDb.GetElementTokenType(current, resolveReferences: false); @@ -883,16 +883,24 @@ private static void CheckExpectedType(ElementTokenType expected, ElementTokenTyp public void Dispose() { - if (!_disposed) + ReturnRentedMemory(); + GC.SuppressFinalize(this); + } + + private void ReturnRentedMemory() + { + if (Interlocked.Exchange(ref _disposed, 1) != 0) { - _metaDb.Dispose(); + return; + } - if (_data.Count > 0) - { - JsonMemory.Return(JsonMemoryKind.Json, _data); - } + _metaDb.Dispose(); - _disposed = true; + if (_data.Count > 0) + { + JsonMemory.Return(JsonMemoryKind.Json, _data); } } + + ~ResultDocument() => ReturnRentedMemory(); } diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution.Types/Completion/CompositeSchemaBuilder.cs b/src/HotChocolate/Fusion/src/Fusion.Execution.Types/Completion/CompositeSchemaBuilder.cs index 9618e26ea61..0ff38fd3f93 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution.Types/Completion/CompositeSchemaBuilder.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution.Types/Completion/CompositeSchemaBuilder.cs @@ -136,6 +136,38 @@ private static CompositeSchemaBuilderContext CreateTypes( } } + // Register the @defer directive so the gateway's validation accepts it. + // The gateway manages @defer itself (it does not pass it to subgraphs). + if (options.EnableDefer && !directiveDefinitions.ContainsKey(Defer.Name)) + { + var deferDirectiveNode = new DirectiveDefinitionNode( + null, + new HotChocolate.Language.NameNode(Defer.Name), + null, + false, + new[] + { + new InputValueDefinitionNode( + null, + new HotChocolate.Language.NameNode(Defer.Arguments.If), + null, + new NamedTypeNode("Boolean"), + new BooleanValueNode(true), + []), + new InputValueDefinitionNode( + null, + new HotChocolate.Language.NameNode(Defer.Arguments.Label), + null, + new NamedTypeNode("String"), + null, + []) + }, + new HotChocolate.Language.NameNode[] { new("INLINE_FRAGMENT"), new("FRAGMENT_SPREAD") }); + + directiveTypes.Add(CreateDirectiveType(deferDirectiveNode)); + directiveDefinitions.Add(Defer.Name, deferDirectiveNode); + } + features ??= new FeatureCollection(); return new CompositeSchemaBuilderContext( diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution.Types/FusionSchemaOptions.cs b/src/HotChocolate/Fusion/src/Fusion.Execution.Types/FusionSchemaOptions.cs index 2fd15320f7b..ea5210a1af0 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution.Types/FusionSchemaOptions.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution.Types/FusionSchemaOptions.cs @@ -4,8 +4,12 @@ internal struct FusionSchemaOptions : IFusionSchemaOptions { public bool ApplySerializeAsToScalars { get; private set; } + public bool EnableDefer { get; private set; } = true; + public bool EnableSemanticIntrospection { get; private set; } + public FusionSchemaOptions() { } + public static FusionSchemaOptions From(IFusionSchemaOptions? options) { var copy = new FusionSchemaOptions(); @@ -13,6 +17,7 @@ public static FusionSchemaOptions From(IFusionSchemaOptions? options) if (options is not null) { copy.ApplySerializeAsToScalars = options.ApplySerializeAsToScalars; + copy.EnableDefer = options.EnableDefer; copy.EnableSemanticIntrospection = options.EnableSemanticIntrospection; } diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution.Types/IFusionSchemaOptions.cs b/src/HotChocolate/Fusion/src/Fusion.Execution.Types/IFusionSchemaOptions.cs index fbc4d6af671..5416f8b304b 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution.Types/IFusionSchemaOptions.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution.Types/IFusionSchemaOptions.cs @@ -10,6 +10,13 @@ public interface IFusionSchemaOptions /// bool ApplySerializeAsToScalars { get; } + /// + /// Gets whether @defer is enabled. + /// When false, the @defer directive is not exposed in the schema + /// and deferred execution is disabled. + /// + bool EnableDefer { get; } + /// /// Enables the __search and __definitions introspection fields /// for semantic schema discovery. diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/DeferredPayloadDataFormatter.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/DeferredPayloadDataFormatter.cs new file mode 100644 index 00000000000..bfcd4572567 --- /dev/null +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/DeferredPayloadDataFormatter.cs @@ -0,0 +1,18 @@ +using HotChocolate.Execution; +using HotChocolate.Fusion.Text.Json; +using HotChocolate.Text.Json; + +namespace HotChocolate.Fusion.Execution; + +/// +/// Adapts a nested so that it can be written +/// as the "data" payload of an . The incremental +/// delivery contract requires the data value to be the delta to merge at the +/// pending path rather than the fully rooted result, so this formatter writes the +/// subtree element directly instead of the composite result document's root. +/// +internal sealed class DeferredPayloadDataFormatter(CompositeResultElement element) : IRawJsonFormatter +{ + public void WriteDataTo(JsonWriter jsonWriter) + => element.WriteTo(jsonWriter); +} diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/ExecutionState.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/ExecutionState.cs index 03944446a06..dd563a65411 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/ExecutionState.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/ExecutionState.cs @@ -548,7 +548,7 @@ private static bool ContainsDependent( }; } - private void AddToBacklog(ExecutionNode node) + internal void AddToBacklog(ExecutionNode node) { var nodeId = node.Id; diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/FusionOperationInfo.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/FusionOperationInfo.cs index 010222210f1..779219f6da4 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/FusionOperationInfo.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/FusionOperationInfo.cs @@ -9,7 +9,7 @@ internal sealed class FusionOperationInfo : RequestFeature public OperationPlan? OperationPlan { get; set; } - protected override void Reset() + protected internal override void Reset() { OperationId = null; OperationPlan = null; diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/FusionOptions.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/FusionOptions.cs index 72c9e7f00cf..89b065b36ac 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/FusionOptions.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/FusionOptions.cs @@ -167,6 +167,23 @@ public bool ApplySerializeAsToScalars } } + /// + /// Gets or sets whether @defer is enabled. + /// When false, the @defer directive is not exposed in the schema + /// and deferred execution is disabled. + /// true by default. + /// + public bool EnableDefer + { + get; + set + { + ExpectMutableOptions(); + + field = value; + } + } = true; + /// /// Enables the __search and __definitions introspection fields /// for semantic schema discovery. @@ -201,6 +218,7 @@ public FusionOptions Clone() LazyInitialization = LazyInitialization, NodeIdSerializerFormat = NodeIdSerializerFormat, ApplySerializeAsToScalars = ApplySerializeAsToScalars, + EnableDefer = EnableDefer, EnableSemanticIntrospection = EnableSemanticIntrospection }; } diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/FusionRequestExecutorManager.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/FusionRequestExecutorManager.cs index 777c3069448..1614b1f1ebe 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/FusionRequestExecutorManager.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/FusionRequestExecutorManager.cs @@ -177,7 +177,7 @@ private FusionRequestExecutor CreateRequestExecutor( var options = CreateOptions(setup); var requestOptions = CreateRequestOptions(setup); - var plannerOptions = CreatePlannerOptions(setup); + var plannerOptions = CreatePlannerOptions(setup, options); var parserOptions = CreateParserOptions(setup); var clientConfigurations = CreateClientConfigurations(setup, configuration.Settings.Document); var features = CreateSchemaFeatures( @@ -264,18 +264,21 @@ private static FusionRequestOptions CreateRequestOptions(FusionGatewaySetup setu return options; } - private static OperationPlannerOptions CreatePlannerOptions(FusionGatewaySetup setup) + private static OperationPlannerOptions CreatePlannerOptions(FusionGatewaySetup setup, FusionOptions options) { - var options = new OperationPlannerOptions(); + var plannerOptions = new OperationPlannerOptions + { + EnableDefer = options.EnableDefer + }; foreach (var configure in setup.PlannerOptionsModifiers) { - configure.Invoke(options); + configure.Invoke(plannerOptions); } - options.MakeReadOnly(); + plannerOptions.MakeReadOnly(); - return options; + return plannerOptions; } private static ParserOptions CreateParserOptions(FusionGatewaySetup setup) diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/DeferCondition.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/DeferCondition.cs new file mode 100644 index 00000000000..445a3e3dd28 --- /dev/null +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/DeferCondition.cs @@ -0,0 +1,105 @@ +using System.Diagnostics.CodeAnalysis; +using HotChocolate.Execution; +using HotChocolate.Language; +using HotChocolate.Types; + +namespace HotChocolate.Fusion.Execution.Nodes; + +internal readonly struct DeferCondition(string? ifVariableName) : IEquatable +{ + public string? IfVariableName => ifVariableName; + + public bool IsDeferred(IVariableValueCollection variableValues) + { + if (ifVariableName is not null) + { + if (!variableValues.TryGetValue(ifVariableName, out var value)) + { + throw new InvalidOperationException($"The variable {ifVariableName} has an invalid value."); + } + + if (!value.Value) + { + return false; + } + } + + return true; + } + + public bool Equals(DeferCondition other) + => string.Equals(ifVariableName, other.IfVariableName, StringComparison.Ordinal); + + public override bool Equals([NotNullWhen(true)] object? obj) + => obj is DeferCondition other && Equals(other); + + public override int GetHashCode() + => HashCode.Combine(ifVariableName); + + public static bool TryCreate(InlineFragmentNode inlineFragment, out DeferCondition deferCondition) + => TryCreate(inlineFragment.Directives, out deferCondition); + + public static bool TryCreate(FragmentSpreadNode fragmentSpread, out DeferCondition deferCondition) + => TryCreate(fragmentSpread.Directives, out deferCondition); + + private static bool TryCreate(IReadOnlyList directives, out DeferCondition deferCondition) + { + if (directives.Count == 0) + { + deferCondition = default; + return false; + } + + for (var i = 0; i < directives.Count; i++) + { + var directive = directives[i]; + + if (!directive.Name.Value.Equals(DirectiveNames.Defer.Name, StringComparison.Ordinal)) + { + continue; + } + + // @defer with no arguments is unconditionally deferred. + if (directive.Arguments.Count == 0) + { + deferCondition = new DeferCondition(null); + return true; + } + + for (var j = 0; j < directive.Arguments.Count; j++) + { + var argument = directive.Arguments[j]; + + if (!argument.Name.Value.Equals(DirectiveNames.Defer.Arguments.If, StringComparison.Ordinal)) + { + continue; + } + + switch (argument.Value) + { + // @defer(if: $variable) - conditionally deferred at runtime. + case VariableNode variable: + deferCondition = new DeferCondition(variable.Name.Value); + return true; + + // @defer(if: true) - unconditionally deferred. + case BooleanValueNode { Value: true }: + deferCondition = new DeferCondition(null); + return true; + + // @defer(if: false) - statically not deferred, no condition needed. + case BooleanValueNode { Value: false }: + deferCondition = default; + return false; + } + } + + // @defer directive found but no `if` argument matched - unconditionally deferred. + deferCondition = new DeferCondition(null); + return true; + } + + deferCondition = default; + return false; + } +} diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/DeferConditionCollection.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/DeferConditionCollection.cs new file mode 100644 index 00000000000..01bf8689c2a --- /dev/null +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/DeferConditionCollection.cs @@ -0,0 +1,50 @@ +using System.Collections; + +namespace HotChocolate.Fusion.Execution.Nodes; + +internal sealed class DeferConditionCollection : ICollection +{ + private readonly OrderedDictionary _dictionary = []; + + public DeferCondition this[int index] + => _dictionary.GetAt(index).Key; + + public int Count => _dictionary.Count; + + public bool IsReadOnly => false; + + public bool Add(DeferCondition item) + { + if (_dictionary.Count == 64) + { + throw new InvalidOperationException( + "The maximum number of defer conditions has been reached."); + } + + return _dictionary.TryAdd(item, _dictionary.Count); + } + + void ICollection.Add(DeferCondition item) + => Add(item); + + public bool Remove(DeferCondition item) + => throw new InvalidOperationException("This is an add only collection."); + + void ICollection.Clear() + => throw new InvalidOperationException("This is an add only collection."); + + public bool Contains(DeferCondition item) + => _dictionary.ContainsKey(item); + + public int IndexOf(DeferCondition item) + => _dictionary.GetValueOrDefault(item, -1); + + public void CopyTo(DeferCondition[] array, int arrayIndex) + => _dictionary.Keys.CopyTo(array, arrayIndex); + + public IEnumerator GetEnumerator() + => _dictionary.Keys.GetEnumerator(); + + IEnumerator IEnumerable.GetEnumerator() + => GetEnumerator(); +} diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/DeferUsage.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/DeferUsage.cs new file mode 100644 index 00000000000..3e5b908c22c --- /dev/null +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/DeferUsage.cs @@ -0,0 +1,51 @@ +using HotChocolate.Execution; + +namespace HotChocolate.Fusion.Execution.Nodes; + +/// +/// Represents a usage of the @defer directive. This is the plan-level identity +/// of a single deferred fragment (what the GraphQL Incremental Delivery spec calls a +/// "delivery group"). One instance per @defer occurrence in an operation; +/// instances form a parent chain to model nested defer scopes and are referenced +/// by identity from 's active defer-usage set. +/// +/// +/// The optional label from @defer(label: "..."), used to identify the deferred +/// payload in the incremental delivery response. +/// +/// +/// The parent defer usage when this @defer is nested inside another deferred fragment, +/// or null if this is a top-level defer. +/// +/// +/// The index into the for the if condition +/// associated with this defer directive. This index maps to a bit position in the +/// runtime defer flags bitmask. +/// +public sealed record DeferUsage( + string? Label, + DeferUsage? Parent, + byte DeferConditionIndex) +{ + /// + /// A plan-stable numeric identifier for this defer usage, assigned when the + /// is built. Serialized as the delivery group's + /// identity and used as the id in pending, incremental, + /// and completed entries of the incremental delivery response. + /// + public int Id { get; init; } = -1; + + /// + /// The selection path to the object in the response tree whose selection set + /// contains this @defer. Used as the path of the corresponding + /// pending entry on the wire. + /// + public SelectionPath? Path { get; init; } + + /// + /// The variable name from @defer(if: $var), or null when this + /// defer is unconditional. Runtime activation of this defer uses this variable + /// together with . + /// + public string? IfVariable { get; init; } +} diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/ExecutionSubPlan.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/ExecutionSubPlan.cs new file mode 100644 index 00000000000..03a0285b13b --- /dev/null +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/ExecutionSubPlan.cs @@ -0,0 +1,77 @@ +using System.Collections.Immutable; + +namespace HotChocolate.Fusion.Execution.Nodes; + +/// +/// Represents a plan for executing the fields that belong to a specific +/// DeferUsageSet. One is emitted per +/// unique non-empty active defer usage set in the operation. Its data is +/// delivered to every in +/// when the subplan completes. +/// +public sealed class ExecutionSubPlan +{ + /// + /// Initializes a new instance of . + /// + /// + /// The compiled operation for this subplan's result mapping. + /// + /// + /// The root execution nodes that serve as entry points for this subplan. + /// + /// + /// All execution nodes belonging to this subplan. + /// + /// + /// The set that keys this subplan, sorted ascending + /// by . Every element is a delivery group that + /// receives this subplan's data on the wire when the subplan completes. + /// + public ExecutionSubPlan( + Operation operation, + ImmutableArray rootNodes, + ImmutableArray allNodes, + ImmutableArray deliveryGroups) + { + Operation = operation; + RootNodes = rootNodes; + AllNodes = allNodes; + DeliveryGroups = deliveryGroups; + } + + /// + /// Gets the compiled operation for this subplan. + /// This is a standalone operation compiled from the rewritten subplan AST, + /// used for result mapping during execution. + /// + public Operation Operation { get; } + + /// + /// Gets the root execution nodes that serve as entry points for this subplan. + /// + public ImmutableArray RootNodes { get; } + + /// + /// Gets all execution nodes belonging to this subplan. + /// + public ImmutableArray AllNodes { get; } + + /// + /// Gets the set that keys this subplan, sorted + /// ascending by . When this subplan completes, + /// every in this set receives the subplan's data + /// as an incremental payload on the wire. + /// + public ImmutableArray DeliveryGroups { get; } + + /// + /// Gets the in the owning plan (the main plan + /// for top-level subplans, the parent subplan's plan for nested subplans) + /// whose fetch resolves the selection set where this subplan was anchored. + /// Always populated for a sealed plan; query plan visualizers can use this + /// to attach the subplan to the node that produces its enclosing data. + /// Set during plan construction. + /// + public int ParentNodeId { get; internal set; } +} diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/FieldSelectionNode.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/FieldSelectionNode.cs index 8c7549306cd..29ca8d9724f 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/FieldSelectionNode.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/FieldSelectionNode.cs @@ -3,7 +3,7 @@ namespace HotChocolate.Fusion.Execution.Nodes; /// -/// Represents a field selection node with its path include flags. +/// Represents a field selection node with its path include flags and defer usage. /// /// /// The syntax node that represents the field selection. @@ -11,4 +11,11 @@ namespace HotChocolate.Fusion.Execution.Nodes; /// /// The flags that must be all set for this selection to be included. /// -public sealed record FieldSelectionNode(FieldNode Node, ulong PathIncludeFlags); +/// +/// The defer usage context this field was collected under, or null if the field +/// is not inside a deferred fragment. +/// +public sealed record FieldSelectionNode( + FieldNode Node, + ulong PathIncludeFlags, + DeferUsage? DeferUsage = null); diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Operation.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Operation.cs index ec1196b1b0f..757287347ad 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Operation.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Operation.cs @@ -20,7 +20,10 @@ public sealed class Operation : IOperation private readonly ConcurrentDictionary<(int, string), SelectionSet> _selectionSets = []; private readonly OperationCompiler _compiler; private readonly IncludeConditionCollection _includeConditions; + private readonly DeferConditionCollection _deferConditions; + private readonly IReadOnlyDictionary _deferUsageByFragment; private readonly OperationFeatureCollection _features; + private readonly bool _hasIncrementalParts; private object[] _elementsById; private int _lastId; @@ -33,6 +36,9 @@ internal Operation( SelectionSet rootSelectionSet, OperationCompiler compiler, IncludeConditionCollection includeConditions, + DeferConditionCollection deferConditions, + IReadOnlyDictionary deferUsageByFragment, + bool hasIncrementalParts, int lastId, object[] elementsById) { @@ -44,6 +50,8 @@ internal Operation( ArgumentNullException.ThrowIfNull(rootSelectionSet); ArgumentNullException.ThrowIfNull(compiler); ArgumentNullException.ThrowIfNull(includeConditions); + ArgumentNullException.ThrowIfNull(deferConditions); + ArgumentNullException.ThrowIfNull(deferUsageByFragment); ArgumentNullException.ThrowIfNull(elementsById); Id = id; @@ -54,6 +62,9 @@ internal Operation( RootSelectionSet = rootSelectionSet; _compiler = compiler; _includeConditions = includeConditions; + _deferConditions = deferConditions; + _deferUsageByFragment = deferUsageByFragment; + _hasIncrementalParts = hasIncrementalParts; _lastId = lastId; _elementsById = elementsById; @@ -105,7 +116,7 @@ ISelectionSet IOperation.RootSelectionSet /// public IFeatureCollection Features => _features; - public bool HasIncrementalParts => throw new NotImplementedException(); + public bool HasIncrementalParts => _hasIncrementalParts; /// /// Gets the selection set for the specified @@ -165,6 +176,7 @@ public SelectionSet GetSelectionSet(Selection selection, IObjectTypeDefinition t selection, (FusionObjectTypeDefinition)typeContext, _includeConditions, + _deferUsageByFragment, ref _elementsById, ref _lastId); selectionSet.Seal(this); @@ -237,6 +249,33 @@ public ulong CreateIncludeFlags(IVariableValueCollection variables) return includeFlags; } + /// + /// Creates the defer flags for the specified variable values. + /// + /// + /// The variable values. + /// + /// + /// Returns the defer flags for the specified variable values. + /// + public ulong CreateDeferFlags(IVariableValueCollection variables) + { + var index = 0; + var deferFlags = 0ul; + + foreach (var deferCondition in _deferConditions) + { + if (deferCondition.IsDeferred(variables)) + { + deferFlags |= 1ul << index; + } + + index++; + } + + return deferFlags; + } + [MethodImpl(MethodImplOptions.AggressiveInlining)] internal Selection GetSelectionById(int id) => Unsafe.As(Unsafe.Add(ref MemoryMarshal.GetArrayDataReference(_elementsById), id)); diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationCompiler.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationCompiler.cs index 1970fcac583..b95692b9afe 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationCompiler.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationCompiler.cs @@ -1,4 +1,5 @@ using System.Buffers; +using HotChocolate.Fusion.Planning; using HotChocolate.Fusion.Rewriters; using HotChocolate.Fusion.Types; using HotChocolate.Language; @@ -45,7 +46,19 @@ public Operation Compile(string id, string hash, OperationDefinitionNode operati operationDefinition = (OperationDefinitionNode)document.Definitions[0]; var includeConditions = new IncludeConditionCollection(); + var deferConditions = new DeferConditionCollection(); IncludeConditionVisitor.Instance.Visit(operationDefinition, includeConditions); + + // Scans the operation for @defer fragments and creates one + // DeferUsage object for each. Also fills deferConditions with any + // @defer(if: ...) expressions found along the way. + // + // Important: each @defer must always map to the same DeferUsage + // instance. Runtime matching checks if two are the same object, + // not just equal in content, so handing back a fresh copy with + // equal field values would silently break it. + var partitioning = DeferPartitioner.Partition(operationDefinition, deferConditions); + var fields = _fieldsPool.Get(); var compilationContext = new CompilationContext(s_objectArrayPool.Rent(128)); @@ -61,7 +74,11 @@ public Operation Compile(string id, string hash, OperationDefinitionNode operati operationDefinition.SelectionSet.Selections, rootType, fields, - includeConditions); + includeConditions, + partitioning.ByFragment, + parentDeferUsage: null); + + var hasIncrementalParts = HasDeferDirective(operationDefinition); var selectionSet = BuildSelectionSet( fields, @@ -80,8 +97,11 @@ public Operation Compile(string id, string hash, OperationDefinitionNode operati selectionSet, this, includeConditions, + deferConditions, + partitioning.ByFragment, + hasIncrementalParts, lastId, - compilationContext.ElementsById); // Pass the populated array + compilationContext.ElementsById); } finally { @@ -93,6 +113,7 @@ internal SelectionSet CompileSelectionSet( Selection selection, FusionObjectTypeDefinition objectType, IncludeConditionCollection includeConditions, + IReadOnlyDictionary deferUsageByFragment, ref object[] elementsById, ref int lastId) { @@ -110,7 +131,9 @@ internal SelectionSet CompileSelectionSet( first.Node.SelectionSet!.Selections, objectType, fields, - includeConditions); + includeConditions, + deferUsageByFragment, + parentDeferUsage: first.DeferUsage); if (nodes.Length > 1) { @@ -123,7 +146,9 @@ internal SelectionSet CompileSelectionSet( node.Node.SelectionSet!.Selections, objectType, fields, - includeConditions); + includeConditions, + deferUsageByFragment, + parentDeferUsage: nodes[i].DeferUsage); } } @@ -143,7 +168,9 @@ private void CollectFields( IReadOnlyList selections, IObjectTypeDefinition typeContext, OrderedDictionary> fields, - IncludeConditionCollection includeConditions) + IncludeConditionCollection includeConditions, + IReadOnlyDictionary deferUsageByFragment, + DeferUsage? parentDeferUsage) { for (var i = 0; i < selections.Count; i++) { @@ -166,10 +193,9 @@ private void CollectFields( pathIncludeFlags |= 1ul << index; } - nodes.Add(new FieldSelectionNode(fieldNode, pathIncludeFlags)); + nodes.Add(new FieldSelectionNode(fieldNode, pathIncludeFlags, parentDeferUsage)); } - - if (selection is InlineFragmentNode inlineFragmentNode + else if (selection is InlineFragmentNode inlineFragmentNode && DoesTypeApply(inlineFragmentNode.TypeCondition, typeContext)) { var pathIncludeFlags = parentIncludeFlags; @@ -180,12 +206,22 @@ private void CollectFields( pathIncludeFlags |= 1ul << index; } + // Look up the canonical DeferUsage from the pre-computed + // partitioning. The partitioner created one instance per + // `... @defer` occurrence; using it here guarantees downstream + // set-identity comparisons work correctly. + var newDeferUsage = deferUsageByFragment.TryGetValue(inlineFragmentNode, out var canonical) + ? canonical + : parentDeferUsage; + CollectFields( pathIncludeFlags, inlineFragmentNode.SelectionSet.Selections, typeContext, fields, - includeConditions); + includeConditions, + deferUsageByFragment, + newDeferUsage); } } } @@ -199,21 +235,30 @@ private SelectionSet BuildSelectionSet( var i = 0; var selections = new Selection[fieldMap.Count]; var isConditional = false; + var hasIncrementalParts = false; var includeFlags = new List(); + var deferUsages = new List(); var selectionSetId = ++lastId; foreach (var (responseName, nodes) in fieldMap) { includeFlags.Clear(); + deferUsages.Clear(); var first = nodes[0]; var isInternal = IsInternal(first.Node); + var hasNonDeferredNode = first.DeferUsage is null; if (first.PathIncludeFlags > 0) { includeFlags.Add(first.PathIncludeFlags); } + if (first.DeferUsage is not null) + { + deferUsages.Add(first.DeferUsage); + } + if (nodes.Count > 1) { for (var j = 1; j < nodes.Count; j++) @@ -231,6 +276,15 @@ private SelectionSet BuildSelectionSet( includeFlags.Add(next.PathIncludeFlags); } + if (next.DeferUsage is null) + { + hasNonDeferredNode = true; + } + else if (!hasNonDeferredNode) + { + deferUsages.Add(next.DeferUsage); + } + if (isInternal) { isInternal = IsInternal(next.Node); @@ -243,6 +297,43 @@ private SelectionSet BuildSelectionSet( CollapseIncludeFlags(includeFlags); } + // If any field node is not inside a deferred fragment, the selection + // is not deferred, so it must be included in the initial response. + ulong deferMask = 0; + DeferUsage[]? selectionDeferUsages = null; + + if (!hasNonDeferredNode && deferUsages.Count > 0) + { + // Remove child defer usages when their parent is also in the set. + // A field should be delivered with the outermost (earliest) defer + // that contains it. + for (var j = deferUsages.Count - 1; j >= 0; j--) + { + var parent = deferUsages[j].Parent; + while (parent is not null) + { + if (deferUsages.Contains(parent)) + { + deferUsages.RemoveAt(j); + break; + } + + parent = parent.Parent; + } + } + + foreach (var usage in deferUsages) + { + deferMask |= 1ul << usage.DeferConditionIndex; + } + + // Preserve the pruned list on the Selection so the runtime can + // answer GetActiveDeferUsages and HasActiveDeferUsage. + selectionDeferUsages = deferUsages.ToArray(); + + hasIncrementalParts = true; + } + IOutputFieldDefinition field = first.Node.Name.Value.Equals(IntrospectionFieldNames.TypeName) ? _typeNameField : typeContext.Fields.GetField(first.Node.Name.Value, allowInaccessibleFields: true); @@ -253,7 +344,9 @@ private SelectionSet BuildSelectionSet( field, nodes.ToArray(), includeFlags.ToArray(), - isInternal); + isInternal, + deferMask, + selectionDeferUsages); // Register the selection in the elements array compilationContext.Register(selection, selection.Id); @@ -265,7 +358,7 @@ private SelectionSet BuildSelectionSet( } } - return new SelectionSet(selectionSetId, typeContext, selections, isConditional); + return new SelectionSet(selectionSetId, typeContext, selections, isConditional, hasIncrementalParts); } private static void CollapseIncludeFlags(List includeFlags) @@ -396,6 +489,65 @@ private static bool IsInternal(FieldNode fieldNode) return false; } + private static bool HasDeferDirective(OperationDefinitionNode operation) + => DeferDetectionVisitor.Instance.HasDefer(operation); + + private sealed class DeferDetectionVisitor : SyntaxWalker + { + public static readonly DeferDetectionVisitor Instance = new(); + + public bool HasDefer(OperationDefinitionNode operation) + { + var context = new Context(); + Visit(operation, context); + return context.Found; + } + + protected override ISyntaxVisitorAction Enter( + InlineFragmentNode node, + Context context) + { + if (HasDeferDirectiveOnNode(node.Directives)) + { + context.Found = true; + return Break; + } + + return base.Enter(node, context); + } + + protected override ISyntaxVisitorAction Enter( + FragmentSpreadNode node, + Context context) + { + if (HasDeferDirectiveOnNode(node.Directives)) + { + context.Found = true; + return Break; + } + + return base.Enter(node, context); + } + + private static bool HasDeferDirectiveOnNode(IReadOnlyList directives) + { + for (var i = 0; i < directives.Count; i++) + { + if (directives[i].Name.Value.Equals(DirectiveNames.Defer.Name, StringComparison.Ordinal)) + { + return true; + } + } + + return false; + } + + internal sealed class Context + { + public bool Found; + } + } + private class IncludeConditionVisitor : SyntaxWalker { public static readonly IncludeConditionVisitor Instance = new(); @@ -425,6 +577,23 @@ protected override ISyntaxVisitorAction Enter( } } + private class DeferConditionVisitor : SyntaxWalker + { + public static readonly DeferConditionVisitor Instance = new(); + + protected override ISyntaxVisitorAction Enter( + InlineFragmentNode node, + DeferConditionCollection context) + { + if (DeferCondition.TryCreate(node, out var condition)) + { + context.Add(condition); + } + + return base.Enter(node, context); + } + } + private class CompilationContext(object[] elementsById) { private object[] _elementsById = elementsById; diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationPlan.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationPlan.cs index 63bad9f78f2..5db55ca97c2 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationPlan.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/OperationPlan.cs @@ -20,6 +20,8 @@ private OperationPlan( Operation operation, ImmutableArray rootNodes, ImmutableArray allNodes, + ImmutableArray deliveryGroups, + ImmutableArray deferredSubPlans, int searchSpace, int expandedNodes) { @@ -29,6 +31,8 @@ private OperationPlan( AllNodes = allNodes; SearchSpace = searchSpace; ExpandedNodes = expandedNodes; + DeliveryGroups = deliveryGroups; + DeferredSubPlans = deferredSubPlans; _nodesById = CreateNodeLookup(allNodes); MaxNodeId = _nodesById.Length > 0 ? _nodesById.Length - 1 : 0; } @@ -74,6 +78,24 @@ public IReadOnlyList VariableDefinitions /// public int ExpandedNodes { get; } + /// + /// Gets every (delivery group) this plan uses, in + /// ascending order. One element per @defer + /// occurrence in the operation. Empty if the operation has no @defer + /// directives. + /// + public ImmutableArray DeliveryGroups { get; } + + /// + /// Gets the deferred execution subplans for this plan. Each subplan is + /// keyed by a unique DeferUsageSet and fetches the fields whose + /// active defer usage set matches that key. The subplan's data is + /// delivered to every in its + /// when it completes. + /// Empty if the operation has no @defer directives. + /// + public ImmutableArray DeferredSubPlans { get; } + /// /// Gets the maximum node identifier across all nodes in this plan. /// @@ -131,6 +153,14 @@ public ExecutionNode GetExecutionNode(IOperationPlanNode planNode) /// The GraphQL operation. /// The root execution nodes. /// All execution nodes in the plan. + /// + /// Every (delivery group) this plan uses, in ascending + /// order. + /// + /// + /// The deferred execution subplans for @defer support, one per unique + /// DeferUsageSet. + /// /// A number specifying how many possible plans were considered during planning. /// The number of expanded nodes during planner search. /// A new instance. @@ -142,6 +172,8 @@ public static OperationPlan Create( Operation operation, ImmutableArray rootNodes, ImmutableArray allNodes, + ImmutableArray deliveryGroups, + ImmutableArray deferredSubPlans, int searchSpace, int expandedNodes) { @@ -150,7 +182,8 @@ public static OperationPlan Create( ArgumentOutOfRangeException.ThrowIfLessThan(rootNodes.Length, 0); ArgumentOutOfRangeException.ThrowIfLessThan(allNodes.Length, 0); - return new OperationPlan(id, operation, rootNodes, allNodes, searchSpace, expandedNodes); + return new OperationPlan( + id, operation, rootNodes, allNodes, deliveryGroups, deferredSubPlans, searchSpace, expandedNodes); } /// @@ -160,6 +193,14 @@ public static OperationPlan Create( /// The GraphQL operation. /// The root execution nodes. /// All execution nodes in the plan. + /// + /// Every (delivery group) this plan uses, in ascending + /// order. + /// + /// + /// The deferred execution subplans for @defer support, one per unique + /// DeferUsageSet. + /// /// A number specifying how many possible plans were considered during planning. /// The number of expanded nodes during planner search. /// A new instance with a content-based identifier. @@ -169,6 +210,8 @@ public static OperationPlan Create( Operation operation, ImmutableArray rootNodes, ImmutableArray allNodes, + ImmutableArray deliveryGroups, + ImmutableArray deferredSubPlans, int searchSpace, int expandedNodes) { @@ -192,7 +235,8 @@ public static OperationPlan Create( var id = Convert.ToHexString(buffer.WrittenSpan[^32..]).ToLowerInvariant(); #endif - return new OperationPlan(id, operation, rootNodes, allNodes, searchSpace, expandedNodes); + return new OperationPlan( + id, operation, rootNodes, allNodes, deliveryGroups, deferredSubPlans, searchSpace, expandedNodes); } private static ExecutionNode?[] CreateNodeLookup(ImmutableArray allNodes) diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Selection.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Selection.cs index 3b2e19b56b5..b8a9b9afa7f 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Selection.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Selection.cs @@ -1,3 +1,4 @@ +using System.Runtime.CompilerServices; using HotChocolate.Execution; using HotChocolate.Fusion.Text; using HotChocolate.Language; @@ -10,9 +11,13 @@ namespace HotChocolate.Fusion.Execution.Nodes; /// public sealed class Selection : ISelection { + private static readonly DeferUsage[] s_emptyDeferUsages = []; + private readonly FieldSelectionNode[] _syntaxNodes; private readonly ulong[] _includeFlags; private readonly byte[] _utf8ResponseName; + private readonly ulong _deferMask; + private readonly DeferUsage[] _deferUsages; private Flags _flags; public Selection( @@ -21,7 +26,9 @@ public Selection( IOutputFieldDefinition field, FieldSelectionNode[] syntaxNodes, ulong[] includeFlags, - bool isInternal) + bool isInternal, + ulong deferMask = 0, + DeferUsage[]? deferUsages = null) { ArgumentNullException.ThrowIfNull(field); @@ -37,6 +44,8 @@ public Selection( Field = field; _syntaxNodes = syntaxNodes; _includeFlags = includeFlags; + _deferMask = deferMask; + _deferUsages = deferUsages ?? s_emptyDeferUsages; _flags = isInternal ? Flags.Internal : Flags.None; if (field.Type.NamedType().IsLeafType()) @@ -162,9 +171,158 @@ internal void Seal(SelectionSet selectionSet) DeclaringSelectionSet = selectionSet; } - public bool IsDeferred(ulong deferFlags) + public bool IsDeferred(ulong deferFlags) => (_deferMask & deferFlags) != 0; + + /// + /// Returns the active defer usages for this selection given the runtime + /// , after resolving inactive defers to their + /// nearest active ancestor and applying parent-child pruning (ancestors win). + /// Returns null when any occurrence of the field falls outside an + /// active defer chain (meaning the field belongs in the initial response). + /// + public DeferUsage[]? GetActiveDeferUsages(ulong deferFlags) { - throw new NotImplementedException(); + if (_deferUsages.Length == 0) + { + return null; + } + + if (_deferUsages.Length == 1) + { + var active = ResolveActiveAncestor(_deferUsages[0], deferFlags); + return active is null ? null : [active]; + } + + DeferUsage[]? result = null; + var count = 0; + + for (var i = 0; i < _deferUsages.Length; i++) + { + var effective = ResolveActiveAncestor(_deferUsages[i], deferFlags); + + if (effective is null) + { + // One occurrence is non-deferred; the field is non-deferred overall. + return null; + } + + var duplicate = false; + if (result is not null) + { + for (var j = 0; j < count; j++) + { + if (result[j] == effective) + { + duplicate = true; + break; + } + } + } + + if (!duplicate) + { + result ??= new DeferUsage[_deferUsages.Length]; + result[count++] = effective; + } + } + + if (result is null || count == 0) + { + return null; + } + + // Parent-child pruning: if a parent and child are both in the set, + // keep only the outermost. + for (var i = count - 1; i >= 0; i--) + { + var ancestor = result[i].Parent; + + while (ancestor is not null) + { + for (var j = 0; j < count; j++) + { + if (j != i && result[j] == ancestor) + { + result[i] = result[--count]; + goto nextItem; + } + } + + ancestor = ancestor.Parent; + } + +nextItem: + ; + } + + if (count == 0) + { + return null; + } + + if (count < result.Length) + { + Array.Resize(ref result, count); + } + + return result; + } + + /// + /// Determines whether is among this selection's + /// active defer usages under the runtime + /// (using the same parent-chain walk and parent-child pruning as + /// ). + /// + public bool HasActiveDeferUsage(ulong deferFlags, DeferUsage target) + { + if (_deferUsages.Length == 0) + { + return false; + } + + var found = false; + + for (var i = 0; i < _deferUsages.Length; i++) + { + var effective = ResolveActiveAncestor(_deferUsages[i], deferFlags); + + if (effective is null) + { + // Any non-deferred occurrence makes the whole field non-deferred. + return false; + } + + if (effective == target) + { + found = true; + } + } + + return found; + } + + // Walks up the @defer parent chain and returns the first one that is + // actually turned on for this request (its bit in deferFlags is set). + // A nested @defer whose own `if:` is false falls back to its enclosing + // @defer. If none on the chain are active, returns null, meaning the + // field is not deferred at this occurrence. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static DeferUsage? ResolveActiveAncestor(DeferUsage start, ulong deferFlags) + { + var usage = start; + + while (usage is not null) + { + if ((deferFlags & (1UL << usage.DeferConditionIndex)) != 0) + { + return usage; + } + + usage = usage.Parent; + } + + return null; } [Flags] diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/SelectionSet.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/SelectionSet.cs index 8f8e08a96e2..48f2154c27c 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/SelectionSet.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/SelectionSet.cs @@ -18,7 +18,12 @@ public sealed class SelectionSet : ISelectionSet private readonly SelectionLookup _utf8ResponseNameLookup; private bool _isSealed; - public SelectionSet(int id, IObjectTypeDefinition type, Selection[] selections, bool isConditional) + public SelectionSet( + int id, + IObjectTypeDefinition type, + Selection[] selections, + bool isConditional, + bool hasIncrementalParts) { ArgumentNullException.ThrowIfNull(selections); @@ -30,6 +35,7 @@ public SelectionSet(int id, IObjectTypeDefinition type, Selection[] selections, Id = id; Type = type; IsConditional = isConditional; + HasIncrementalParts = hasIncrementalParts; _selections = selections; _responseNameLookup = _selections.ToFrozenDictionary(t => t.ResponseName); _utf8ResponseNameLookup = SelectionLookup.Create(this); @@ -62,7 +68,10 @@ public SelectionSet(int id, IObjectTypeDefinition type, Selection[] selections, /// public ReadOnlySpan Selections => _selections; - public bool HasIncrementalParts => throw new NotImplementedException(); + /// + /// Gets a value indicating whether the selection set contains deferred selections. + /// + public bool HasIncrementalParts { get; } IEnumerable ISelectionSet.GetSelections() => _selections; diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/JsonOperationPlanFormatter.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/JsonOperationPlanFormatter.cs index 03094025086..52551385c67 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/JsonOperationPlanFormatter.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/JsonOperationPlanFormatter.cs @@ -4,6 +4,7 @@ using System.Text.Encodings.Web; using System.Text.Json; using HotChocolate.Buffers; +using HotChocolate.Execution; using JsonWriter = HotChocolate.Text.Json.JsonWriter; namespace HotChocolate.Fusion.Execution.Nodes.Serialization; @@ -83,6 +84,9 @@ public void Format(IBufferWriter writer, OperationPlan plan, OperationPlan jsonWriter.WritePropertyName("nodes"); WriteNodes(jsonWriter, plan.Operation, plan.AllNodes, trace); + WriteDeliveryGroups(jsonWriter, plan.DeliveryGroups); + WriteDeferredSubPlans(jsonWriter, plan.DeferredSubPlans); + jsonWriter.WriteEndObject(); } @@ -166,6 +170,110 @@ private static void WriteNodes( jsonWriter.WriteEndArray(); } + private static void WriteDeliveryGroups( + JsonWriter jsonWriter, + ImmutableArray deliveryGroups) + { + if (deliveryGroups.IsDefaultOrEmpty) + { + return; + } + + jsonWriter.WritePropertyName("deliveryGroups"); + jsonWriter.WriteStartArray(); + + foreach (var deliveryGroup in deliveryGroups) + { + WriteDeliveryGroup(jsonWriter, deliveryGroup); + } + + jsonWriter.WriteEndArray(); + } + + private static void WriteDeliveryGroup( + JsonWriter jsonWriter, + DeferUsage deliveryGroup) + { + jsonWriter.WriteStartObject(); + + jsonWriter.WritePropertyName("id"); + jsonWriter.WriteNumberValue(deliveryGroup.Id); + + jsonWriter.WritePropertyName("path"); + jsonWriter.WriteStringValue((deliveryGroup.Path ?? SelectionPath.Root).ToString()); + + if (deliveryGroup.Label is not null) + { + jsonWriter.WritePropertyName("label"); + jsonWriter.WriteStringValue(deliveryGroup.Label); + } + + if (deliveryGroup.IfVariable is not null) + { + jsonWriter.WritePropertyName("ifVariable"); + jsonWriter.WriteStringValue("$" + deliveryGroup.IfVariable); + } + + if (deliveryGroup.Parent is not null) + { + jsonWriter.WritePropertyName("parentId"); + jsonWriter.WriteNumberValue(deliveryGroup.Parent.Id); + } + + jsonWriter.WriteEndObject(); + } + + private static void WriteDeferredSubPlans( + JsonWriter jsonWriter, + ImmutableArray deferredSubPlans) + { + if (deferredSubPlans.IsDefaultOrEmpty) + { + return; + } + + jsonWriter.WritePropertyName("deferredSubPlans"); + jsonWriter.WriteStartArray(); + + foreach (var subPlan in deferredSubPlans) + { + WriteDeferredSubPlan(jsonWriter, subPlan); + } + + jsonWriter.WriteEndArray(); + } + + private static void WriteDeferredSubPlan( + JsonWriter jsonWriter, + ExecutionSubPlan subPlan) + { + jsonWriter.WriteStartObject(); + + jsonWriter.WritePropertyName("deliveryGroupIds"); + jsonWriter.WriteStartArray(); + + foreach (var deliveryGroup in subPlan.DeliveryGroups) + { + jsonWriter.WriteNumberValue(deliveryGroup.Id); + } + + jsonWriter.WriteEndArray(); + + jsonWriter.WritePropertyName("parentNodeId"); + jsonWriter.WriteNumberValue(subPlan.ParentNodeId); + + jsonWriter.WritePropertyName("operation"); + WriteOperation(jsonWriter, subPlan.Operation); + + if (!subPlan.AllNodes.IsDefaultOrEmpty) + { + jsonWriter.WritePropertyName("nodes"); + WriteNodes(jsonWriter, subPlan.Operation, subPlan.AllNodes, null); + } + + jsonWriter.WriteEndObject(); + } + private static void WriteOperationNode( JsonWriter jsonWriter, Operation operation, diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/JsonOperationPlanParser.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/JsonOperationPlanParser.cs index f00c3286ba2..cae359f0baa 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/JsonOperationPlanParser.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/JsonOperationPlanParser.cs @@ -52,6 +52,20 @@ public override OperationPlan Parse(ReadOnlyMemory planSourceText) var nodes = ParseNodes(rootElement.GetProperty("nodes"), operation); + var deliveryGroups = ImmutableArray.Empty; + var deferredSubPlans = ImmutableArray.Empty; + var deliveryGroupMap = new Dictionary(); + + if (rootElement.TryGetProperty("deliveryGroups", out var deliveryGroupsElement)) + { + deliveryGroups = ParseDeliveryGroups(deliveryGroupsElement, deliveryGroupMap); + } + + if (rootElement.TryGetProperty("deferredSubPlans", out var deferredSubPlansElement)) + { + deferredSubPlans = ParseDeferredSubPlans(deferredSubPlansElement, deliveryGroupMap); + } + // Root nodes are the entry points of the execution plan. A node is a // root when it has no dependencies at all, meaning the executor can // start it immediately without waiting for other nodes to finish. @@ -60,10 +74,117 @@ public override OperationPlan Parse(ReadOnlyMemory planSourceText) operation, [.. nodes.Where(n => n.Dependencies.Length == 0 && n.OptionalDependencies.Length == 0)], nodes, + deliveryGroups, + deferredSubPlans, searchSpace, expandedNodes); } + private static ImmutableArray ParseDeliveryGroups( + JsonElement deliveryGroupsElement, + Dictionary deliveryGroupMap) + { + // Phase 1: Construct every DeferUsage without resolving parent references. + // Parents are captured as numeric ids for the second pass because a parent + // may appear after its child in the serialized array. + var ordered = new List<(DeferUsage Usage, int? ParentId)>(); + + foreach (var groupElement in deliveryGroupsElement.EnumerateArray()) + { + var deferId = groupElement.GetProperty("id").GetInt32(); + var path = SelectionPath.Parse(groupElement.GetProperty("path").GetString()!); + + string? label = null; + if (groupElement.TryGetProperty("label", out var labelElement)) + { + label = labelElement.GetString(); + } + + string? ifVariable = null; + if (groupElement.TryGetProperty("ifVariable", out var ifVarElement) + && ifVarElement.ValueKind == JsonValueKind.String) + { + ifVariable = ifVarElement.GetString()!.TrimStart('$'); + } + + int? parentId = null; + if (groupElement.TryGetProperty("parentId", out var parentIdElement) + && parentIdElement.ValueKind == JsonValueKind.Number) + { + parentId = parentIdElement.GetInt32(); + } + + var deliveryGroup = new DeferUsage(label, Parent: null, DeferConditionIndex: 0) + { + Id = deferId, + Path = path, + IfVariable = ifVariable + }; + + ordered.Add((deliveryGroup, parentId)); + deliveryGroupMap[deferId] = deliveryGroup; + } + + // Phase 2: Resolve every parent id against the map and rebuild the records + // so their Parent references point at the final, canonical instances. + // Update the map in place so downstream subplan parsing and the returned + // array both observe the same DeferUsage instances. + var builder = ImmutableArray.CreateBuilder(ordered.Count); + + foreach (var (usage, parentId) in ordered) + { + var resolved = parentId is null + ? usage + : usage with { Parent = deliveryGroupMap[parentId.Value] }; + + deliveryGroupMap[usage.Id] = resolved; + builder.Add(resolved); + } + + return builder.MoveToImmutable(); + } + + private ImmutableArray ParseDeferredSubPlans( + JsonElement deferredSubPlansElement, + Dictionary deliveryGroupMap) + { + var builder = ImmutableArray.CreateBuilder(); + + foreach (var subPlanElement in deferredSubPlansElement.EnumerateArray()) + { + var deliveryGroupIdsElement = subPlanElement.GetProperty("deliveryGroupIds"); + var subPlanDeliveryGroupsBuilder = ImmutableArray.CreateBuilder(); + + foreach (var idElement in deliveryGroupIdsElement.EnumerateArray()) + { + subPlanDeliveryGroupsBuilder.Add(deliveryGroupMap[idElement.GetInt32()]); + } + + var subPlanOperation = ParseOperation(subPlanElement.GetProperty("operation")); + + var subPlanNodes = subPlanElement.TryGetProperty("nodes", out var subPlanNodesElement) + ? ParseNodes(subPlanNodesElement, subPlanOperation) + : []; + + var rootSubPlanNodes = subPlanNodes + .Where(n => n.Dependencies.Length == 0 && n.OptionalDependencies.Length == 0) + .ToImmutableArray(); + + var subPlan = new ExecutionSubPlan( + subPlanOperation, + rootSubPlanNodes, + subPlanNodes, + subPlanDeliveryGroupsBuilder.ToImmutable()) + { + ParentNodeId = subPlanElement.GetProperty("parentNodeId").GetInt32() + }; + + builder.Add(subPlan); + } + + return builder.ToImmutable(); + } + private Operation ParseOperation(JsonElement operationElement) { var sourceText = operationElement.GetProperty("document").GetString()!; diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/YamlOperationPlanFormatter.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/YamlOperationPlanFormatter.cs index d3e2d16dbf8..1e85910beba 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/YamlOperationPlanFormatter.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Nodes/Serialization/YamlOperationPlanFormatter.cs @@ -1,4 +1,5 @@ using System.Text; +using HotChocolate.Execution; namespace HotChocolate.Fusion.Execution.Nodes.Serialization; @@ -25,29 +26,118 @@ public override string Format(OperationPlan plan, OperationPlanTrace? trace = nu ExecutionNodeTrace? nodeTrace = null; trace?.Nodes.TryGetValue(node.Id, out nodeTrace); - switch (node) + WriteNode(node, nodeTrace, writer); + } + + writer.Unindent(); + + if (!plan.DeliveryGroups.IsDefaultOrEmpty) + { + writer.WriteLine("deliveryGroups:"); + writer.Indent(); + + foreach (var deliveryGroup in plan.DeliveryGroups) { - case OperationExecutionNode operationNode: - WriteOperationNode(operationNode, nodeTrace, writer); - break; + WriteDeliveryGroup(deliveryGroup, writer); + } - case OperationBatchExecutionNode batchNode: - WriteBatchExecutionNode(batchNode, nodeTrace, writer); - break; + writer.Unindent(); + } - case IntrospectionExecutionNode introspectionNode: - WriteIntrospectionNode(introspectionNode, nodeTrace, writer); - break; + if (!plan.DeferredSubPlans.IsDefaultOrEmpty) + { + writer.WriteLine("deferredSubPlans:"); + writer.Indent(); - case NodeFieldExecutionNode nodeExecutionNode: - WriteNodeFieldNode(nodeExecutionNode, nodeTrace, writer); - break; + foreach (var subPlan in plan.DeferredSubPlans) + { + WriteDeferredSubPlan(subPlan, writer); } + + writer.Unindent(); } return sb.ToString(); } + private static void WriteNode(ExecutionNode node, ExecutionNodeTrace? nodeTrace, CodeWriter writer) + { + switch (node) + { + case OperationExecutionNode operationNode: + WriteOperationNode(operationNode, nodeTrace, writer); + break; + + case OperationBatchExecutionNode batchNode: + WriteBatchExecutionNode(batchNode, nodeTrace, writer); + break; + + case IntrospectionExecutionNode introspectionNode: + WriteIntrospectionNode(introspectionNode, nodeTrace, writer); + break; + + case NodeFieldExecutionNode nodeExecutionNode: + WriteNodeFieldNode(nodeExecutionNode, nodeTrace, writer); + break; + } + } + + private static void WriteDeliveryGroup(DeferUsage deliveryGroup, CodeWriter writer) + { + writer.WriteLine("- id: {0}", deliveryGroup.Id); + writer.Indent(); + + writer.WriteLine("path: {0}", (deliveryGroup.Path ?? SelectionPath.Root).ToString()); + + if (deliveryGroup.Label is not null) + { + writer.WriteLine("label: {0}", deliveryGroup.Label); + } + + if (deliveryGroup.IfVariable is not null) + { + writer.WriteLine("ifVariable: ${0}", deliveryGroup.IfVariable); + } + + if (deliveryGroup.Parent is not null) + { + writer.WriteLine("parentId: {0}", deliveryGroup.Parent.Id); + } + + writer.Unindent(); + } + + private static void WriteDeferredSubPlan(ExecutionSubPlan subPlan, CodeWriter writer) + { + writer.WriteLine("- deliveryGroupIds:"); + writer.Indent(); + writer.Indent(); + + foreach (var deliveryGroup in subPlan.DeliveryGroups) + { + writer.WriteLine("- {0}", deliveryGroup.Id); + } + + writer.Unindent(); + + writer.WriteLine("parentNodeId: {0}", subPlan.ParentNodeId); + + if (!subPlan.AllNodes.IsDefaultOrEmpty) + { + writer.WriteLine("nodes:"); + writer.Indent(); + + foreach (var node in subPlan.AllNodes) + { + WriteNode(node, nodeTrace: null, writer); + } + + writer.Unindent(); + } + + writer.Unindent(); + } + private static void WriteOperation( OperationPlan plan, OperationPlanTrace? trace, diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/OperationPlanContext.Pooling.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/OperationPlanContext.Pooling.cs index b897dc63a7c..d42f2de3935 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/OperationPlanContext.Pooling.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/OperationPlanContext.Pooling.cs @@ -30,11 +30,12 @@ internal void Initialize( ArgumentNullException.ThrowIfNull(variables); ArgumentNullException.ThrowIfNull(operationPlan); - _disposed = false; + _disposed = 0; RequestContext = requestContext; Variables = variables; OperationPlan = operationPlan; IncludeFlags = operationPlan.Operation.CreateIncludeFlags(variables); + DeferFlags = operationPlan.Operation.CreateDeferFlags(variables); _collectTelemetry = requestContext.CollectOperationPlanTelemetry(); _clientScope = requestContext.CreateClientScope(); @@ -44,6 +45,7 @@ internal void Initialize( operationPlan.Operation, requestContext.ErrorHandlingMode(), IncludeFlags, + DeferFlags, requestContext.Schema.GetOptions().PathSegmentLocalPoolCapacity); _executionState.Initialize(_collectTelemetry, cancellationTokenSource); @@ -75,6 +77,7 @@ internal void Clean() RequestContext = default!; Variables = default!; OperationPlan = default!; + DeferFlags = 0; _clientScope = default!; Traces = #if NET10_0_OR_GREATER @@ -98,15 +101,16 @@ internal void Destroy() public async ValueTask DisposeAsync() { - if (!_disposed) + if (Interlocked.Exchange(ref _disposed, 1) != 0) { - _disposed = true; - await _clientScope.DisposeAsync(); - - var pool = _pool; - _pool = null; - pool?.Return(this); + return; } + + await _clientScope.DisposeAsync(); + + var pool = _pool; + _pool = null; + pool?.Return(this); } private void EnsureNodeArrayCapacity(int maxNodeId) diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/OperationPlanContext.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/OperationPlanContext.cs index be0608a228d..b5e5d430be9 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/OperationPlanContext.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/OperationPlanContext.cs @@ -40,7 +40,7 @@ public sealed partial class OperationPlanContext : IFeatureProvider, IAsyncDispo private ISourceSchemaClientScope _clientScope = default!; private string? _traceId; private long _start; - private bool _disposed; + private int _disposed; private int _nodeSlotCapacity; internal OperationPlanContextPool? _pool; @@ -79,6 +79,11 @@ internal bool IsNodeSkipped(int nodeId) /// public ulong IncludeFlags { get; private set; } + /// + /// Gets the evaluated defer flags derived from @defer directives. + /// + public ulong DeferFlags { get; private set; } + /// /// Gets a value indicating whether operation plan telemetry is being collected for this request. /// diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/OperationPlanExecutor.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/OperationPlanExecutor.cs index 5c3278bbfed..3e366ee9e9b 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/OperationPlanExecutor.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/OperationPlanExecutor.cs @@ -1,15 +1,18 @@ using System.Collections.Immutable; using System.Runtime.CompilerServices; +using System.Text.Json; +using System.Threading.Channels; using HotChocolate.Execution; using HotChocolate.Fusion.Execution.Nodes; +using HotChocolate.Fusion.Text.Json; using HotChocolate.Language; using Microsoft.Extensions.DependencyInjection; namespace HotChocolate.Fusion.Execution; -internal sealed class OperationPlanExecutor +internal static class OperationPlanExecutor { - public async Task ExecuteAsync( + public static async Task ExecuteAsync( RequestContext requestContext, IVariableValueCollection variables, OperationPlan operationPlan, @@ -46,7 +49,598 @@ public async Task ExecuteAsync( return context.Complete(); } - public async Task SubscribeAsync( + public static async Task ExecuteWithDeferAsync( + RequestContext requestContext, + IVariableValueCollection variables, + OperationPlan operationPlan, + CancellationToken cancellationToken) + { + // Execute the main (non-deferred) plan nodes first. + var executionCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); + OperationPlanContext? context = null; + + try + { + context = requestContext.Schema.Services.GetRequiredService().Rent(); + context.Initialize(requestContext, variables, operationPlan, executionCts); + + context.Begin(); + + switch (operationPlan.Operation.Definition.Operation) + { + case OperationType.Query: + await ExecuteQueryAsync(context, operationPlan, executionCts.Token); + break; + + case OperationType.Mutation: + await ExecuteMutationAsync(context, operationPlan, executionCts.Token); + break; + + default: + throw new InvalidOperationException("Only queries and mutations can use @defer."); + } + + cancellationToken.ThrowIfCancellationRequested(); + + // Build the initial result. + var initialResult = context.Complete(); + + // Compute the active delivery groups (one per @defer occurrence whose + // @defer(if:) evaluates to true) and the subplans that will actually run. + // A subplan is active if at least one of its delivery groups is active. + var activeDeliveryGroupIds = new HashSet(); + foreach (var deliveryGroup in operationPlan.DeliveryGroups) + { + if (IsDeliveryGroupActive(deliveryGroup, variables)) + { + activeDeliveryGroupIds.Add(deliveryGroup.Id); + } + } + + // Announce every top-level active delivery group as pending on the + // initial payload. Nested delivery groups are announced when their + // parent's subplan completes. + var pendingResults = ImmutableList.CreateBuilder(); + foreach (var deliveryGroup in operationPlan.DeliveryGroups) + { + if (deliveryGroup.Parent is not null) + { + continue; + } + + if (!activeDeliveryGroupIds.Contains(deliveryGroup.Id)) + { + continue; + } + + pendingResults.Add(new PendingResult( + deliveryGroup.Id, + BuildPath(deliveryGroup.Path ?? SelectionPath.Root), + deliveryGroup.Label)); + } + + initialResult.HasNext = pendingResults.Count > 0; + initialResult.Pending = pendingResults.ToImmutable(); + + if (pendingResults.Count == 0) + { + // No active deferred subplans (all conditions were false). + executionCts.Dispose(); + await context.DisposeAsync(); + return initialResult; + } + + // Return a ResponseStream that yields the initial result then deferred results. + var stream = new ResponseStream( + () => CreateIncrementalStream( + requestContext, + variables, + operationPlan, + initialResult, + activeDeliveryGroupIds, + cancellationToken), + ExecutionResultKind.DeferredResult); + + stream.RegisterForCleanup(context); + stream.RegisterForCleanup(executionCts); + return stream; + } + catch (Exception) + { + executionCts.Dispose(); + + if (context is not null) + { + await context.DisposeAsync(); + } + + throw; + } + } + + private static async IAsyncEnumerable CreateIncrementalStream( + RequestContext requestContext, + IVariableValueCollection variables, + OperationPlan operationPlan, + OperationResult initialResult, + HashSet activeDeliveryGroupIds, + [EnumeratorCancellation] CancellationToken cancellationToken) + { + // Yield the initial result first. + yield return initialResult; + + var deferredSubPlans = operationPlan.DeferredSubPlans; + + // Per-delivery-group completion tracking. A delivery group is considered + // complete when every subplan whose DeliveryGroups contains it has + // finished. We also track which subplans are "active" so an inactive + // @defer(if: false) group does not block completion accounting. + var pendingCountByDeliveryGroup = new Dictionary(); + foreach (var subPlan in deferredSubPlans) + { + if (!IsSubPlanActive(subPlan, activeDeliveryGroupIds)) + { + continue; + } + + foreach (var deliveryGroup in subPlan.DeliveryGroups) + { + if (!activeDeliveryGroupIds.Contains(deliveryGroup.Id)) + { + continue; + } + + pendingCountByDeliveryGroup[deliveryGroup.Id] = + pendingCountByDeliveryGroup.GetValueOrDefault(deliveryGroup.Id) + 1; + } + } + + // A subplan starts running once every delivery group it depends on has + // had its parent subplan dispatched. For now we keep the simpler rule + // used previously: a subplan is top-level when its first delivery group + // has no parent. Nested subplans are launched when their parent delivery + // group's subplan completes. + var started = new HashSet(); + var channel = Channel.CreateUnbounded<(ExecutionSubPlan SubPlan, OperationResult? Result, Exception? Error)>(); + var pendingSubPlanCount = 0; + + foreach (var subPlan in deferredSubPlans) + { + if (!IsSubPlanActive(subPlan, activeDeliveryGroupIds)) + { + continue; + } + + if (subPlan.DeliveryGroups[0].Parent is not null) + { + continue; + } + + started.Add(subPlan); + pendingSubPlanCount++; + _ = ExecuteDeferredSubPlanInBackground( + requestContext, + variables, + operationPlan, + subPlan, + channel.Writer, + cancellationToken); + } + + // Track which delivery groups we have already announced as pending so + // we do not re-announce nested groups multiple times when they belong + // to more than one subplan. + var announcedDeliveryGroupIds = new HashSet(); + foreach (var deliveryGroup in operationPlan.DeliveryGroups) + { + if (deliveryGroup.Parent is null && activeDeliveryGroupIds.Contains(deliveryGroup.Id)) + { + announcedDeliveryGroupIds.Add(deliveryGroup.Id); + } + } + + // Yield results as they complete. + while (pendingSubPlanCount > 0 && !cancellationToken.IsCancellationRequested) + { + var (subPlan, result, error) = await channel.Reader.ReadAsync(cancellationToken); + pendingSubPlanCount--; + + // Start nested subplans whose parent delivery group belongs to the + // just-completed subplan, then announce their delivery groups as + // pending. We collect announcements for the outgoing payload. + var childPending = ImmutableList.CreateBuilder(); + foreach (var candidate in deferredSubPlans) + { + if (!IsSubPlanActive(candidate, activeDeliveryGroupIds)) + { + continue; + } + + if (started.Contains(candidate)) + { + continue; + } + + var candidateParent = candidate.DeliveryGroups[0].Parent; + if (candidateParent is null) + { + continue; + } + + var parentBelongsToJustCompleted = false; + foreach (var deliveryGroup in subPlan.DeliveryGroups) + { + if (ReferenceEquals(deliveryGroup, candidateParent)) + { + parentBelongsToJustCompleted = true; + break; + } + } + + if (!parentBelongsToJustCompleted) + { + continue; + } + + started.Add(candidate); + pendingSubPlanCount++; + _ = ExecuteDeferredSubPlanInBackground( + requestContext, + variables, + operationPlan, + candidate, + channel.Writer, + cancellationToken); + + foreach (var deliveryGroup in candidate.DeliveryGroups) + { + if (!activeDeliveryGroupIds.Contains(deliveryGroup.Id)) + { + continue; + } + + if (!announcedDeliveryGroupIds.Add(deliveryGroup.Id)) + { + continue; + } + + childPending.Add(new PendingResult( + deliveryGroup.Id, + BuildPath(deliveryGroup.Path ?? SelectionPath.Root), + deliveryGroup.Label)); + } + } + + // Pick the best delivery group for this subplan's emission: the + // one whose Path is the longest prefix of the data's actual path + // (equivalently: produces the shortest subPath). This follows the + // graphql-js `_getBestIdAndSubPath` rule. Ties are broken by the + // smallest DeferUsage.Id for determinism, which matches the sorted + // DeliveryGroups order. + var bestDeliveryGroup = PickBestDeliveryGroup(subPlan); + + // Build the incremental payload following the GraphQL incremental + // delivery spec. Deferred data goes in `incremental`; `completed` + // signals a delivery group is done; `hasNext` indicates more + // payloads follow. We compute completed entries by decrementing + // each delivery group the subplan contributed to. + var completed = ImmutableList.CreateBuilder(); + OperationResult payload; + + if (error is not null) + { + var errorObj = ErrorBuilder.New() + .SetMessage(error.Message) + .Build(); + payload = OperationResult.FromError(errorObj); + CompleteDeliveryGroupsForSubPlan( + subPlan, + activeDeliveryGroupIds, + pendingCountByDeliveryGroup, + completed, + errors: [errorObj]); + } + else if (result is not null) + { + payload = result; + + // Wrap the deferred result's data in IncrementalObjectResult + // and clear top-level data/errors (per spec, subsequent payloads + // use `incremental` array, not root `data`). + // + // The deferred plan executes against a standalone operation whose + // result is rooted at Query (e.g. `{ user: { reviews: [...] } }`), + // but the incremental delivery contract requires `incremental.data` + // to be the delta at `pending.path`. We navigate down the best + // delivery group's path and emit only the subtree at that location. + if (result.Data.HasValue + && !result.Data.Value.IsValueNull + && TryCreateIncrementalData( + result.Data.Value, + bestDeliveryGroup, + out var incrementalData)) + { + payload.Incremental = + [ + new IncrementalObjectResult( + bestDeliveryGroup.Id, + result.Errors.Count > 0 ? result.Errors : null, + data: incrementalData) + ]; + } + + CompleteDeliveryGroupsForSubPlan( + subPlan, + activeDeliveryGroupIds, + pendingCountByDeliveryGroup, + completed, + errors: result.Errors.Count > 0 && payload.Incremental.Count == 0 + ? result.Errors + : null); + } + else + { + // Empty deferred subplan: all fields may have been conditional + // and excluded. Report a successful completion with no data. + // We use FromError to create a valid OperationResult, then + // clear top-level errors since this is a successful completion. + var placeholder = ErrorBuilder.New() + .SetMessage("placeholder") + .Build(); + payload = OperationResult.FromError(placeholder); + CompleteDeliveryGroupsForSubPlan( + subPlan, + activeDeliveryGroupIds, + pendingCountByDeliveryGroup, + completed, + errors: null); + } + + // Set Completed first so the IncrementalDataFeature is established + // before clearing the top-level Errors (which validates against it). + if (completed.Count > 0) + { + payload.Completed = completed.ToImmutable(); + } + + if (childPending.Count > 0) + { + payload.Pending = childPending.ToImmutable(); + } + + // Per spec: subsequent payloads use `incremental` array, not root + // `data`. Clear top-level data/errors so the formatter only renders + // incremental delivery fields. + payload.Data = null; + if (payload.Errors.Count > 0) + { + payload.Errors = []; + } + + payload.HasNext = pendingSubPlanCount > 0; + yield return payload; + } + } + + private static async Task ExecuteDeferredSubPlanInBackground( + RequestContext requestContext, + IVariableValueCollection variables, + OperationPlan operationPlan, + ExecutionSubPlan subPlan, + ChannelWriter<(ExecutionSubPlan, OperationResult?, Exception?)> writer, + CancellationToken cancellationToken) + { + try + { + if (subPlan.AllNodes.IsEmpty) + { + await writer.WriteAsync((subPlan, null, null), cancellationToken); + return; + } + + var representative = subPlan.DeliveryGroups[0]; + + // Create a mini OperationPlan for the deferred subplan using the + // subplan's own compiled Operation for correct result mapping. + var deferPlan = OperationPlan.Create( + operationPlan.Id + "#defer_" + representative.Id, + subPlan.Operation, + subPlan.RootNodes, + subPlan.AllNodes, + [], + [], + 0, + 0); + + using var executionCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); + + await using var context = requestContext.Schema.Services + .GetRequiredService().Rent(); + context.Initialize(requestContext, variables, deferPlan, executionCts); + + context.Begin(); + + await ExecuteQueryAsync(context, deferPlan, executionCts.Token); + + var deferredResult = context.Complete(); + await writer.WriteAsync((subPlan, deferredResult, null), cancellationToken); + } + catch (OperationCanceledException) + { + // Write a cancellation result so the consumer doesn't hang. + await writer.WriteAsync((subPlan, null, null), CancellationToken.None); + } + catch (Exception ex) + { + await writer.WriteAsync((subPlan, null, ex), CancellationToken.None); + } + } + + private static bool IsDeliveryGroupActive(DeferUsage deliveryGroup, IVariableValueCollection variables) + { + if (deliveryGroup.IfVariable is null) + { + return true; + } + + if (!variables.TryGetValue(deliveryGroup.IfVariable, out var boolValue)) + { + throw new InvalidOperationException( + $"The variable {deliveryGroup.IfVariable} has an invalid value."); + } + + return boolValue.Value; + } + + private static bool IsSubPlanActive(ExecutionSubPlan subPlan, HashSet activeDeliveryGroupIds) + { + foreach (var deliveryGroup in subPlan.DeliveryGroups) + { + if (activeDeliveryGroupIds.Contains(deliveryGroup.Id)) + { + return true; + } + } + + return false; + } + + /// + /// Picks the best delivery group for emitting a subplan's incremental + /// payload. Per graphql-js _getBestIdAndSubPath, the best group is + /// the one whose is the longest prefix of + /// the data's actual path (equivalently, the shortest subPath). + /// Ties are broken by the smallest , which is + /// the first element in the sorted . + /// + private static DeferUsage PickBestDeliveryGroup(ExecutionSubPlan subPlan) + { + var best = subPlan.DeliveryGroups[0]; + var bestLength = best.Path?.Length ?? 0; + + for (var i = 1; i < subPlan.DeliveryGroups.Length; i++) + { + var candidate = subPlan.DeliveryGroups[i]; + var candidateLength = candidate.Path?.Length ?? 0; + + if (candidateLength > bestLength) + { + best = candidate; + bestLength = candidateLength; + } + } + + return best; + } + + private static void CompleteDeliveryGroupsForSubPlan( + ExecutionSubPlan subPlan, + HashSet activeDeliveryGroupIds, + Dictionary pendingCountByDeliveryGroup, + ImmutableList.Builder completed, + IReadOnlyList? errors) + { + foreach (var deliveryGroup in subPlan.DeliveryGroups) + { + if (!activeDeliveryGroupIds.Contains(deliveryGroup.Id)) + { + continue; + } + + if (!pendingCountByDeliveryGroup.TryGetValue(deliveryGroup.Id, out var count)) + { + continue; + } + + count--; + if (count <= 0) + { + pendingCountByDeliveryGroup.Remove(deliveryGroup.Id); + completed.Add(errors is { Count: > 0 } + ? new CompletedResult(deliveryGroup.Id, errors) + : new CompletedResult(deliveryGroup.Id)); + } + else + { + pendingCountByDeliveryGroup[deliveryGroup.Id] = count; + } + } + } + + private static Path BuildPath(SelectionPath selectionPath) + { + var path = Path.Root; + + for (var i = 0; i < selectionPath.Length; i++) + { + var segment = selectionPath[i]; + + if (segment.Kind is SelectionPathSegmentKind.Field) + { + path = path.Append(segment.Name); + } + } + + return path; + } + + /// + /// Produces an whose logical root is the + /// subtree at the best delivery group's path within the deferred plan's + /// composite result. The incremental delivery contract requires + /// incremental.data to be the delta to merge at the pending path, + /// not the fully rooted result. + /// + private static bool TryCreateIncrementalData( + OperationResultData rootData, + DeferUsage bestDeliveryGroup, + out OperationResultData incrementalData) + { + if (rootData.Value is not CompositeResultDocument document) + { + // Unknown backing value: fall through to the default behavior and + // emit the result as-is. + incrementalData = rootData; + return true; + } + + var element = document.Data; + var selectionPath = bestDeliveryGroup.Path ?? SelectionPath.Root; + + for (var i = 0; i < selectionPath.Length; i++) + { + var segment = selectionPath[i]; + + // Inline fragments/type-conditions do not introduce an extra level + // in the result tree, so we only walk field segments. + if (segment.Kind is not SelectionPathSegmentKind.Field) + { + continue; + } + + if (!element.TryGetProperty(segment.Name, out var next) + || next.ValueKind is JsonValueKind.Null or JsonValueKind.Undefined) + { + // The path could not be resolved or is null; nothing to merge. + incrementalData = default; + return false; + } + + element = next; + } + + // MemoryHolder is intentionally not carried over: the surrounding + // OperationResult already owns the composite document's lifetime, + // and the IncrementalObjectResult is a non-owning view over it. + incrementalData = new OperationResultData( + document, + isValueNull: false, + new DeferredPayloadDataFormatter(element), + memoryHolder: null); + return true; + } + + public static async Task SubscribeAsync( RequestContext requestContext, OperationPlan operationPlan, CancellationToken cancellationToken) diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Pipeline/OperationExecutionMiddleware.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Pipeline/OperationExecutionMiddleware.cs index a6595cb8a26..c0c092dc43f 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Pipeline/OperationExecutionMiddleware.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Pipeline/OperationExecutionMiddleware.cs @@ -9,7 +9,6 @@ namespace HotChocolate.Fusion.Execution.Pipeline; internal sealed class OperationExecutionMiddleware { - private readonly OperationPlanExecutor _planExecutor = new(); private readonly IFusionExecutionDiagnosticEvents _diagnosticEvents; private OperationExecutionMiddleware(IFusionExecutionDiagnosticEvents diagnosticEvents) @@ -46,18 +45,30 @@ public async ValueTask InvokeAsync( return; } - context.Result = await _planExecutor.SubscribeAsync(context, operationPlan, cancellationToken); + context.Result = await OperationPlanExecutor.SubscribeAsync(context, operationPlan, cancellationToken); } else { if (context.VariableValues.Length > 1) { + if (!operationPlan.DeferredSubPlans.IsEmpty) + { + var error = ErrorBuilder.New() + .SetMessage("Variable batching is not supported with @defer.") + .Build(); + + _diagnosticEvents.RequestError(context, error); + + context.Result = OperationResult.FromError(error); + return; + } + var variableValues = ImmutableCollectionsMarshal.AsArray(context.VariableValues).AsSpan(); var tasks = new Task[variableValues.Length]; for (var i = 0; i < variableValues.Length; i++) { - tasks[i] = _planExecutor.ExecuteAsync( + tasks[i] = OperationPlanExecutor.ExecuteAsync( context, variableValues[i], operationPlan, @@ -67,9 +78,17 @@ public async ValueTask InvokeAsync( var results = ImmutableList.CreateRange(await Task.WhenAll(tasks)); context.Result = new OperationResultBatch(results); } + else if (!operationPlan.DeferredSubPlans.IsEmpty) + { + context.Result = await OperationPlanExecutor.ExecuteWithDeferAsync( + context, + context.VariableValues[0], + operationPlan, + cancellationToken); + } else { - context.Result = await _planExecutor.ExecuteAsync( + context.Result = await OperationPlanExecutor.ExecuteAsync( context, context.VariableValues[0], operationPlan, diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Results/FetchResultStore.Pooling.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Results/FetchResultStore.Pooling.cs index 8d7a62d516d..82d245c110b 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Results/FetchResultStore.Pooling.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Results/FetchResultStore.Pooling.cs @@ -17,6 +17,7 @@ public void Initialize( Operation operation, ErrorHandlingMode errorHandlingMode, ulong includeFlags, + ulong deferFlags, int pathSegmentLocalPoolCapacity) { ArgumentNullException.ThrowIfNull(schema); @@ -27,10 +28,11 @@ public void Initialize( _operation = operation; _errorHandlingMode = errorHandlingMode; _includeFlags = includeFlags; + _deferFlags = deferFlags; _disposed = false; _pathPool ??= new PathSegmentLocalPool(pathSegmentLocalPoolCapacity); - _result = new CompositeResultDocument(operation, includeFlags, _pathPool); + _result = new CompositeResultDocument(operation, includeFlags, deferFlags, _pathPool); _valueCompletion = new ValueCompletion( this, @@ -46,7 +48,7 @@ public void Reset() { ObjectDisposedException.ThrowIf(_disposed, this); - _result = new CompositeResultDocument(_operation, _includeFlags, _pathPool); + _result = new CompositeResultDocument(_operation, _includeFlags, _deferFlags, _pathPool); _errors?.Clear(); _pocketedErrors?.Clear(); diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Results/FetchResultStore.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Results/FetchResultStore.cs index 7cf5dcb2563..cf04aac8655 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Results/FetchResultStore.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/Results/FetchResultStore.cs @@ -36,6 +36,7 @@ internal sealed partial class FetchResultStore : IDisposable private Operation _operation = default!; private ErrorHandlingMode _errorHandlingMode; private ulong _includeFlags; + private ulong _deferFlags; private CompositeResultElement[] _collectTargetA = ArrayPool.Shared.Rent(64); private CompositeResultElement[] _collectTargetB = ArrayPool.Shared.Rent(64); private CompositeResultElement[] _collectTargetCombined = ArrayPool.Shared.Rent(64); diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/ThrowHelper.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/ThrowHelper.cs index a3663cb9e0d..8f0caacece9 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/ThrowHelper.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Execution/ThrowHelper.cs @@ -1,3 +1,4 @@ +using HotChocolate.Execution; using HotChocolate.Fusion.Properties; namespace HotChocolate.Fusion.Execution; @@ -14,6 +15,11 @@ public static KeyNotFoundException NodeNotFound(int id) FusionExecutionResources.OperationPlan_NodeNotFound, id)); + public static InvalidOperationException DeferredSubPlanParentNotFound(SelectionPath path) + => new(string.Format( + FusionExecutionResources.OperationPlan_DeferredSubPlanParentNotFound, + path)); + public static InvalidOperationException MissingBatchResult(int operationId) => new(string.Format( FusionExecutionResources.OperationBatchExecutionNode_MissingBatchResult, diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferEffectiveSetResolver.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferEffectiveSetResolver.cs new file mode 100644 index 00000000000..472b5d823f7 --- /dev/null +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferEffectiveSetResolver.cs @@ -0,0 +1,120 @@ +using System.Collections.Immutable; +using HotChocolate.Fusion.Execution.Nodes; + +namespace HotChocolate.Fusion.Planning; + +/// +/// Resolves the effective for every leaf +/// field location collected by . +/// Fields whose set is empty are non-deferred; fields sharing a non-empty +/// set form a single subplan, matching the GraphQL incremental-delivery +/// spec rule that two sibling ... @defer fragments which contribute +/// the same field collapse into one delivery group. +/// +internal static class DeferEffectiveSetResolver +{ + /// + /// Groups occurrences by and computes the + /// effective per location. + /// + public static Dictionary Resolve(List occurrences) + { + var leavesByLocation = new Dictionary(); + + foreach (var occurrence in occurrences) + { + var location = new FieldLocation(occurrence.ParentPath, occurrence.ResponseName); + + if (!leavesByLocation.TryGetValue(location, out var acc)) + { + acc = new LeavesAccumulator(); + leavesByLocation[location] = acc; + } + + acc.Add(occurrence.EnclosingDefer); + } + + var result = new Dictionary(leavesByLocation.Count); + + foreach (var (location, acc) in leavesByLocation) + { + result[location] = acc.ToEffectiveSet(); + } + + return result; + } + + private sealed class LeavesAccumulator + { + private bool _hasNonDeferred; + private readonly List _leaves = []; + + public void Add(DeferUsage? leaf) + { + if (leaf is null) + { + _hasNonDeferred = true; + return; + } + + foreach (var existing in _leaves) + { + if (ReferenceEquals(existing, leaf)) + { + return; + } + } + + _leaves.Add(leaf); + } + + public DeferUsageSetKey ToEffectiveSet() + { + if (_hasNonDeferred || _leaves.Count == 0) + { + return DeferUsageSetKey.Empty; + } + + // Parent-child pruning: drop any leaf whose ancestor is also + // in the set. + var pruned = new List(_leaves.Count); + for (var i = 0; i < _leaves.Count; i++) + { + var ancestor = _leaves[i].Parent; + var dropped = false; + + while (ancestor is not null) + { + for (var j = 0; j < _leaves.Count; j++) + { + if (j != i && ReferenceEquals(_leaves[j], ancestor)) + { + dropped = true; + break; + } + } + + if (dropped) + { + break; + } + + ancestor = ancestor.Parent; + } + + if (!dropped) + { + pruned.Add(_leaves[i]); + } + } + + if (pruned.Count == 0) + { + return DeferUsageSetKey.Empty; + } + + pruned.Sort(static (a, b) => a.Id.CompareTo(b.Id)); + return new DeferUsageSetKey(pruned.ToImmutableArray()); + } + } +} diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferOccurrenceCollector.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferOccurrenceCollector.cs new file mode 100644 index 00000000000..0a46e9d3216 --- /dev/null +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferOccurrenceCollector.cs @@ -0,0 +1,137 @@ +using System.Collections.Immutable; +using HotChocolate.Fusion.Execution.Nodes; +using HotChocolate.Language; + +namespace HotChocolate.Fusion.Planning; + +/// +/// Walks an operation AST and records every leaf field together with the +/// enclosing @defer chain leaf. The resulting list is the single +/// source of truth for downstream passes that compute effective defer +/// usage sets and emit per-set subplans. The walk consults the +/// map so every leaf +/// reference shares object identity with the planner pipeline. +/// +internal static class DeferOccurrenceCollector +{ + /// + /// Collects leaf field occurrences from the given operation, optionally + /// folding unlabeled nested @defer fragments into their parent + /// when the wire output would be indistinguishable. + /// + /// The operation definition to walk. + /// + /// The canonical to + /// lookup produced by . + /// + /// + /// When true an unlabeled nested @defer whose if + /// variable matches its parent is folded into the parent's set. + /// + public static List Collect( + OperationDefinitionNode operation, + IReadOnlyDictionary byFragment, + bool inlineUnlabeledNestedDefers) + { + var occurrences = new List(); + CollectOccurrences( + operation.SelectionSet.Selections, + parentPath: [], + enclosingDefer: null, + parentTypeCondition: null, + byFragment, + inlineUnlabeledNestedDefers, + occurrences); + return occurrences; + } + + private static void CollectOccurrences( + IReadOnlyList selections, + ImmutableArray parentPath, + DeferUsage? enclosingDefer, + NamedTypeNode? parentTypeCondition, + IReadOnlyDictionary byFragment, + bool inlineUnlabeledNestedDefers, + List occurrences) + { + foreach (var selection in selections) + { + if (selection is FieldNode fieldNode) + { + if (fieldNode.SelectionSet is { } childSelectionSet) + { + // Composite field: we do NOT record an occurrence at this + // level. The wrapping is reconstructed by the subplan AST + // builder from the path tree, which looks up the original + // field (with its arguments/alias/directives) in the + // source AST. Recording an occurrence here would lead to + // the field being emitted twice: once as a wrapper and + // once as a leaf contribution. + var childPath = parentPath.Add( + new FieldPathSegment( + fieldNode.Name.Value, + fieldNode.Alias?.Value)); + + CollectOccurrences( + childSelectionSet.Selections, + childPath, + enclosingDefer, + parentTypeCondition: null, + byFragment, + inlineUnlabeledNestedDefers, + occurrences); + } + else + { + // Leaf field: add as a direct contribution at the current + // path. Effective-set computation groups these by + // (parentPath, responseName) so sibling @defer fragments + // that share a leaf are unified into a single subplan. + occurrences.Add( + new FieldOccurrence( + parentPath, + fieldNode.Alias?.Value ?? fieldNode.Name.Value, + fieldNode, + enclosingDefer, + parentTypeCondition)); + } + + continue; + } + + if (selection is InlineFragmentNode inlineFragment) + { + var nestedDefer = enclosingDefer; + + if (byFragment.TryGetValue(inlineFragment, out var canonical)) + { + // Fragment is a @defer. Honor the unlabeled-inlining option: + // an unlabeled nested @defer whose condition matches its + // parent's is indistinguishable from the parent in the wire + // output, so we fold its fields into the parent's set. + if (inlineUnlabeledNestedDefers + && canonical.Label is null + && enclosingDefer is not null + && (canonical.IfVariable is null + || canonical.IfVariable == enclosingDefer.IfVariable)) + { + // Treat as non-defer: keep enclosingDefer. + } + else + { + nestedDefer = canonical; + } + } + + CollectOccurrences( + inlineFragment.SelectionSet.Selections, + parentPath, + nestedDefer, + inlineFragment.TypeCondition ?? parentTypeCondition, + byFragment, + inlineUnlabeledNestedDefers, + occurrences); + } + } + } +} diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferOperationRewriter.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferOperationRewriter.cs new file mode 100644 index 00000000000..aed2dd49c15 --- /dev/null +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferOperationRewriter.cs @@ -0,0 +1,577 @@ +using System.Collections.Immutable; +using HotChocolate.Execution; +using HotChocolate.Fusion.Execution.Nodes; +using HotChocolate.Language; +using HotChocolate.Types; + +namespace HotChocolate.Fusion.Planning; + +/// +/// Splits an operation with @defer directives into a main operation +/// (non-deferred fields only) and one subplan operation per unique +/// set. The set keying follows the GraphQL +/// incremental-delivery spec: each field's active defer usage set is the +/// union of its per-occurrence enclosing @defer leaves (with +/// parent-child pruning). Two sibling ... @defer fragments that share +/// a field therefore produce one subplan keyed by both usages for the shared +/// field, rather than two independent subplans that both fetch it. +/// +internal sealed class DeferOperationRewriter +{ + private readonly bool _inlineUnlabeledNestedDefers; + + internal DeferOperationRewriter(bool inlineUnlabeledNestedDefers = true) + { + _inlineUnlabeledNestedDefers = inlineUnlabeledNestedDefers; + } + + /// + /// Fast check whether the operation contains any @defer directives. + /// Used to avoid the full split for non-deferred operations (the common case). + /// + public static bool HasDeferDirective(OperationDefinitionNode operation) + { + return HasDeferInSelectionSet(operation.SelectionSet); + + static bool HasDeferInSelectionSet(SelectionSetNode selectionSet) + { + for (var i = 0; i < selectionSet.Selections.Count; i++) + { + var selection = selectionSet.Selections[i]; + + if (selection is InlineFragmentNode inlineFragment) + { + if (HasDeferDirective(inlineFragment)) + { + return true; + } + + if (HasDeferInSelectionSet(inlineFragment.SelectionSet)) + { + return true; + } + } + else if (selection is FragmentSpreadNode fragmentSpread) + { + if (HasDeferDirectiveOnSpread(fragmentSpread)) + { + return true; + } + } + else if (selection is FieldNode { SelectionSet: not null } field) + { + if (HasDeferInSelectionSet(field.SelectionSet)) + { + return true; + } + } + } + + return false; + } + + static bool HasDeferDirectiveOnSpread(FragmentSpreadNode node) + { + for (var i = 0; i < node.Directives.Count; i++) + { + if (node.Directives[i].Name.Value.Equals( + DirectiveNames.Defer.Name, + StringComparison.Ordinal)) + { + return true; + } + } + + return false; + } + } + + /// + /// Splits the given operation at @defer boundaries using the + /// topology produced by + /// . The output contains the stripped main + /// operation (fields whose active DeferUsageSet is empty) plus one + /// subplan per unique non-empty active set. Sibling @defer + /// fragments that share a field collapse into a single subplan keyed by + /// the union of both usages. + /// + /// The operation definition that may contain @defer directives. + /// The output for . + public DeferSplitResult Split( + OperationDefinitionNode operation, + DeferPartitioningResult partitioning) + { + ArgumentNullException.ThrowIfNull(operation); + ArgumentNullException.ThrowIfNull(partitioning); + + var occurrences = DeferOccurrenceCollector.Collect( + operation, + partitioning.ByFragment, + _inlineUnlabeledNestedDefers); + + var effectiveSetByLocation = DeferEffectiveSetResolver.Resolve(occurrences); + + var mainOperation = BuildMainOperation(operation, partitioning.ByFragment); + + if (partitioning.AllDeferUsages.IsEmpty) + { + return new DeferSplitResult(mainOperation, []); + } + + var subPlanDescriptors = BuildSubPlanOps(operation, occurrences, effectiveSetByLocation); + + return new DeferSplitResult(mainOperation, subPlanDescriptors); + } + + private static ImmutableArray BuildSubPlanOps( + OperationDefinitionNode operation, + List occurrences, + Dictionary effectiveSetByLocation) + { + // Bucket occurrences by effective set. We use a canonical + // ImmutableArray (sorted by Id) as the set key so the + // resulting buckets are stable across runs and trivially comparable + // by sequence equality. + var buckets = new Dictionary(); + + foreach (var occurrence in occurrences) + { + var key = effectiveSetByLocation[new FieldLocation(occurrence.ParentPath, occurrence.ResponseName)]; + + if (key.IsEmpty) + { + continue; + } + + if (!buckets.TryGetValue(key, out var bucket)) + { + bucket = new SubPlanBucket(key); + buckets[key] = bucket; + } + + bucket.Add(occurrence); + } + + if (buckets.Count == 0) + { + return []; + } + + // Synthesize one OperationDefinitionNode per bucket. The output is a + // query operation rooted at Query, with the wrapping path fields + // looked up in the original AST so arguments, aliases and directives + // are preserved. Buckets are ordered by the smallest DeferUsage Id in + // the key so subsequent sort-by-Id consumers see a deterministic order. + var ordered = buckets.Values.ToList(); + ordered.Sort(static (a, b) => a.Key.CompareTo(b.Key)); + + var subPlanDescriptors = ImmutableArray.CreateBuilder(ordered.Count); + var descriptorByKey = new Dictionary(ordered.Count); + + foreach (var bucket in ordered) + { + var subPlanOp = BuildSubPlanOperation(operation, bucket); + var path = DeterminePath(bucket.Key); + var parent = ResolveParentDescriptor(bucket.Key, descriptorByKey); + var descriptor = new DeferSubPlanDescriptor( + deferUsageSet: bucket.Key.Items, + operation: subPlanOp, + path: path, + parent: parent); + + descriptorByKey[bucket.Key] = descriptor; + subPlanDescriptors.Add(descriptor); + } + + return subPlanDescriptors.ToImmutable(); + } + + private OperationDefinitionNode BuildMainOperation( + OperationDefinitionNode operation, + IReadOnlyDictionary byFragment) + { + var newRoot = StripDeferFromSelectionSet(operation.SelectionSet, byFragment); + return operation.WithSelectionSet(newRoot); + } + + private SelectionSetNode StripDeferFromSelectionSet( + SelectionSetNode selectionSet, + IReadOnlyDictionary byFragment) + { + var selections = new List(selectionSet.Selections.Count); + var modified = false; + + for (var i = 0; i < selectionSet.Selections.Count; i++) + { + var selection = selectionSet.Selections[i]; + + if (selection is InlineFragmentNode inlineFragment + && byFragment.TryGetValue(inlineFragment, out var usage)) + { + // The main operation never keeps @defer fragments: all + // deferred fields go through their subplan. For a conditional + // @defer(if: $variable) we still keep a @skip(if: $variable) + // guarded inline copy so the variable-off runtime path + // fetches the fields eagerly. + if (usage.IfVariable is not null) + { + var skipDirective = new DirectiveNode( + null, + new NameNode("skip"), + [ + new ArgumentNode( + null, + new NameNode("if"), + new VariableNode(new NameNode(usage.IfVariable))) + ]); + + var stripped = StripDeferDirective(inlineFragment); + var nested = StripDeferFromSelectionSet(stripped.SelectionSet, byFragment); + + if (!ReferenceEquals(nested, stripped.SelectionSet)) + { + stripped = stripped.WithSelectionSet(nested); + } + + selections.Add(stripped.WithDirectives([.. stripped.Directives, skipDirective])); + } + + modified = true; + continue; + } + + if (selection is InlineFragmentNode nonDeferFragment) + { + var nestedInner = StripDeferFromSelectionSet(nonDeferFragment.SelectionSet, byFragment); + + if (!ReferenceEquals(nestedInner, nonDeferFragment.SelectionSet)) + { + nonDeferFragment = nonDeferFragment.WithSelectionSet(nestedInner); + modified = true; + } + + selections.Add(nonDeferFragment); + continue; + } + + if (selection is FieldNode fieldNode && fieldNode.SelectionSet is not null) + { + var childInner = StripDeferFromSelectionSet(fieldNode.SelectionSet, byFragment); + + if (!ReferenceEquals(childInner, fieldNode.SelectionSet)) + { + fieldNode = fieldNode.WithSelectionSet(childInner); + modified = true; + } + + selections.Add(fieldNode); + continue; + } + + selections.Add(selection); + } + + if (!modified) + { + return selectionSet; + } + + if (selections.Count == 0) + { + selections.Add(new FieldNode("__typename")); + } + + return new SelectionSetNode(selections); + } + + private static OperationDefinitionNode BuildSubPlanOperation( + OperationDefinitionNode rootOperation, + SubPlanBucket bucket) + { + // Build a tree of PathNodes keyed by FieldPathSegment. Each leaf + // carries the FieldNodes (per optional type condition) contributed + // by the bucket at that path. We resolve each wrapping path field + // against the original AST so arguments and aliases survive. + var root = new PathNode(); + + foreach (var occurrence in bucket.Occurrences) + { + var node = root; + for (var i = 0; i < occurrence.ParentPath.Length; i++) + { + var segment = occurrence.ParentPath[i]; + node = node.GetOrAddChild(segment); + } + + node.AddContribution(occurrence.ResponseName, occurrence.FieldNode, occurrence.TypeCondition); + } + + var rootSelectionSet = BuildSelectionSetFromPathNode( + root, + rootOperation.SelectionSet, + parentPath: []); + + return rootOperation + .WithOperation(OperationType.Query) + .WithDirectives([]) + .WithSelectionSet(rootSelectionSet); + } + + private static SelectionSetNode BuildSelectionSetFromPathNode( + PathNode node, + SelectionSetNode originalSelectionSet, + ImmutableArray parentPath) + { + var selections = new List(); + + // Leaf contributions at this path. Group by type condition name so + // each `on Type` wrapping is emitted once. The same response name + // may be contributed by multiple sibling @defer fragments; we only + // keep the first to avoid duplicate selections in the subgraph + // request. + if (node.Contributions.Count > 0) + { + var unconditional = new List(); + var byTypeCondition = new Dictionary Fields)>( + StringComparer.Ordinal); + var seen = new HashSet<(string? TypeCondition, string ResponseName)>(); + + foreach (var contribution in node.Contributions) + { + var discriminator = (contribution.TypeCondition?.Name.Value, contribution.ResponseName); + if (!seen.Add(discriminator)) + { + continue; + } + + if (contribution.TypeCondition is null) + { + unconditional.Add(contribution.FieldNode); + } + else + { + var typeName = contribution.TypeCondition.Name.Value; + if (!byTypeCondition.TryGetValue(typeName, out var bucketEntry)) + { + bucketEntry = (contribution.TypeCondition, []); + byTypeCondition[typeName] = bucketEntry; + } + + bucketEntry.Fields.Add(contribution.FieldNode); + } + } + + foreach (var field in unconditional) + { + selections.Add(field); + } + + foreach (var (_, bucketEntry) in byTypeCondition) + { + selections.Add(new InlineFragmentNode( + null, + bucketEntry.Node, + [], + new SelectionSetNode(bucketEntry.Fields.ToArray()))); + } + } + + // Child path nodes: wrap in the original field node (preserving + // name/alias/arguments/directives) so the subplan operation is a + // syntactically valid query against the root schema. + foreach (var (segment, childNode) in node.Children) + { + var wrappingField = ResolveWrappingField(originalSelectionSet, segment) + ?? throw new InvalidOperationException( + $"Unable to resolve wrapping field for '{segment.ResponseName}' at path '{FormatPath(parentPath)}'."); + + var childSelectionSet = wrappingField.SelectionSet + ?? throw new InvalidOperationException( + $"Wrapping field '{segment.ResponseName}' at path '{FormatPath(parentPath)}' has no selection set."); + + var childParentPath = parentPath.Add(segment); + var nestedSelectionSet = BuildSelectionSetFromPathNode(childNode, childSelectionSet, childParentPath); + + selections.Add(new FieldNode( + null, + wrappingField.Name, + wrappingField.Alias, + wrappingField.Directives, + wrappingField.Arguments, + nestedSelectionSet)); + } + + if (selections.Count == 0) + { + selections.Add(new FieldNode("__typename")); + } + + return new SelectionSetNode(selections); + } + + private static FieldNode? ResolveWrappingField( + SelectionSetNode selectionSet, + FieldPathSegment segment) + { + for (var i = 0; i < selectionSet.Selections.Count; i++) + { + var selection = selectionSet.Selections[i]; + + if (selection is FieldNode field) + { + var responseName = field.Alias?.Value ?? field.Name.Value; + + if (responseName.Equals(segment.ResponseName, StringComparison.Ordinal)) + { + return field; + } + } + + if (selection is InlineFragmentNode inline) + { + var nested = ResolveWrappingField(inline.SelectionSet, segment); + + if (nested is not null) + { + return nested; + } + } + } + + return null; + } + + private static string FormatPath(ImmutableArray path) + { + if (path.IsEmpty) + { + return "$"; + } + + var builder = new System.Text.StringBuilder("$"); + for (var i = 0; i < path.Length; i++) + { + builder.Append('.'); + builder.Append(path[i].ResponseName); + } + + return builder.ToString(); + } + + private static SelectionPath DeterminePath(DeferUsageSetKey key) + { + // The subplan roots at the longest shared ancestor of all usages in + // its set. For siblings (same parent), every usage has the same + // anchor path; for the general case we pick the deepest path because + // after parent pruning no two usages in the same set sit on the same + // parent chain. + SelectionPath? best = null; + + foreach (var usage in key.Items) + { + if (usage.Path is null) + { + continue; + } + + if (best is null || usage.Path.Length > best.Length) + { + best = usage.Path; + } + } + + return best ?? SelectionPath.Root; + } + + private static DeferSubPlanDescriptor? ResolveParentDescriptor( + DeferUsageSetKey key, + Dictionary descriptorByKey) + { + // A subplan's "parent" is the subplan whose key contains the parent + // DeferUsage of any usage in this set. We pick the first usage's + // parent chain; all other usages in the set share an equivalent + // ancestry after parent pruning. + foreach (var usage in key.Items) + { + var parent = usage.Parent; + while (parent is not null) + { + foreach (var (candidateKey, candidate) in descriptorByKey) + { + foreach (var candidateUsage in candidateKey.Items) + { + if (ReferenceEquals(candidateUsage, parent)) + { + return candidate; + } + } + } + parent = parent.Parent; + } + } + + return null; + } + + private static InlineFragmentNode StripDeferDirective(InlineFragmentNode node) + { + var directives = new List(node.Directives.Count); + + for (var i = 0; i < node.Directives.Count; i++) + { + if (!node.Directives[i].Name.Value.Equals( + DirectiveNames.Defer.Name, + StringComparison.Ordinal)) + { + directives.Add(node.Directives[i]); + } + } + + return node.WithDirectives(directives); + } + + private static bool HasDeferDirective(InlineFragmentNode node) + { + for (var i = 0; i < node.Directives.Count; i++) + { + if (node.Directives[i].Name.Value.Equals( + DirectiveNames.Defer.Name, + StringComparison.Ordinal)) + { + return true; + } + } + + return false; + } + + private sealed class SubPlanBucket(DeferUsageSetKey key) + { + public DeferUsageSetKey Key { get; } = key; + public List Occurrences { get; } = []; + + public void Add(FieldOccurrence occurrence) => Occurrences.Add(occurrence); + } + + private sealed class PathNode + { + public List<(string ResponseName, FieldNode FieldNode, NamedTypeNode? TypeCondition)> Contributions { get; } = []; + public Dictionary Children { get; } = []; + + public PathNode GetOrAddChild(FieldPathSegment segment) + { + if (!Children.TryGetValue(segment, out var child)) + { + child = new PathNode(); + Children[segment] = child; + } + + return child; + } + + public void AddContribution(string responseName, FieldNode fieldNode, NamedTypeNode? typeCondition) + { + Contributions.Add((responseName, fieldNode, typeCondition)); + } + } +} diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferPartitioner.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferPartitioner.cs new file mode 100644 index 00000000000..063e8bcf178 --- /dev/null +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferPartitioner.cs @@ -0,0 +1,198 @@ +using System.Collections.Immutable; +using HotChocolate.Execution; +using HotChocolate.Fusion.Execution.Nodes; +using HotChocolate.Language; +using HotChocolate.Types; + +namespace HotChocolate.Fusion.Planning; + +/// +/// Walks an operation AST once and produces the tree +/// for all @defer occurrences. The resulting mapping from +/// instance to is the +/// single source of truth for defer topology, consumed by the planner pipeline +/// stages (rewriter, compiler) so they do not perform parallel AST walks with +/// divergent state. +/// +internal static class DeferPartitioner +{ + /// + /// Walks and produces the complete + /// tree for every ... @defer inline + /// fragment encountered. Defer conditions are registered into + /// (passed in rather than owned so the + /// caller can share one collection with the operation being compiled). + /// + public static DeferPartitioningResult Partition( + OperationDefinitionNode operation, + DeferConditionCollection deferConditions) + { + ArgumentNullException.ThrowIfNull(operation); + ArgumentNullException.ThrowIfNull(deferConditions); + + var byFragment = new Dictionary(ReferenceEqualityComparer.Instance); + var ordered = new List(); + + Walk( + operation.SelectionSet.Selections, + parent: null, + currentPath: SelectionPath.Root, + deferConditions, + byFragment, + ordered); + + // Assign plan-stable Ids in declaration order. Re-create the records + // via `with { Id = i }` so downstream stages can key off Id for + // serialization and sorted DeferUsageSet emission. The re-creation + // also updates parent references to point to the Id-assigned parents. + var reassigned = AssignIds(ordered, byFragment); + + return new DeferPartitioningResult(reassigned, byFragment); + } + + private static void Walk( + IReadOnlyList selections, + DeferUsage? parent, + SelectionPath currentPath, + DeferConditionCollection deferConditions, + Dictionary byFragment, + List ordered) + { + for (var i = 0; i < selections.Count; i++) + { + var selection = selections[i]; + + if (selection is FieldNode field) + { + if (field.SelectionSet is { } sub) + { + var responseName = field.Alias?.Value ?? field.Name.Value; + var childPath = currentPath.AppendField(responseName); + Walk(sub.Selections, parent, childPath, deferConditions, byFragment, ordered); + } + + continue; + } + + if (selection is InlineFragmentNode inline) + { + var nested = parent; + + if (DeferCondition.TryCreate(inline, out var deferCondition)) + { + deferConditions.Add(deferCondition); + var deferIndex = deferConditions.IndexOf(deferCondition); + var label = GetDeferLabel(inline); + var ifVariable = GetDeferIfVariable(inline); + + var usage = new DeferUsage(label, parent, (byte)deferIndex) + { + Path = currentPath, + IfVariable = ifVariable + }; + byFragment[inline] = usage; + ordered.Add(usage); + nested = usage; + } + + Walk(inline.SelectionSet.Selections, nested, currentPath, deferConditions, byFragment, ordered); + continue; + } + + // FragmentSpreadNode / other shapes are not expected at planner input + // (document rewriter inlines spreads); fall through silently. + } + } + + /// + /// Assigns plan-stable Ids to every in declaration + /// order and rebuilds parent references against the Id-assigned instances. + /// The map is updated in place so callers see + /// the canonical records. + /// + private static ImmutableArray AssignIds( + List ordered, + Dictionary byFragment) + { + if (ordered.Count == 0) + { + return []; + } + + var remap = new Dictionary(ordered.Count, ReferenceEqualityComparer.Instance); + var builder = ImmutableArray.CreateBuilder(ordered.Count); + + for (var i = 0; i < ordered.Count; i++) + { + var source = ordered[i]; + var parent = source.Parent is null ? null : remap[source.Parent]; + var reassigned = source with { Id = i, Parent = parent }; + remap[source] = reassigned; + builder.Add(reassigned); + } + + // Keep the fragment lookup in sync with the Id-assigned records. + foreach (var kvp in byFragment.ToArray()) + { + byFragment[kvp.Key] = remap[kvp.Value]; + } + + return builder.MoveToImmutable(); + } + + private static string? GetDeferLabel(InlineFragmentNode node) + { + for (var i = 0; i < node.Directives.Count; i++) + { + var directive = node.Directives[i]; + + if (!directive.Name.Value.Equals( + DirectiveNames.Defer.Name, + StringComparison.Ordinal)) + { + continue; + } + + for (var j = 0; j < directive.Arguments.Count; j++) + { + var arg = directive.Arguments[j]; + + if (arg.Name.Value.Equals("label", StringComparison.Ordinal) + && arg.Value is StringValueNode stringValue) + { + return stringValue.Value; + } + } + } + + return null; + } + + private static string? GetDeferIfVariable(InlineFragmentNode node) + { + for (var i = 0; i < node.Directives.Count; i++) + { + var directive = node.Directives[i]; + + if (!directive.Name.Value.Equals( + DirectiveNames.Defer.Name, + StringComparison.Ordinal)) + { + continue; + } + + for (var j = 0; j < directive.Arguments.Count; j++) + { + var arg = directive.Arguments[j]; + + if (arg.Name.Value.Equals("if", StringComparison.Ordinal) + && arg.Value is VariableNode variable) + { + return variable.Name.Value; + } + } + } + + return null; + } +} diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferPartitioningResult.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferPartitioningResult.cs new file mode 100644 index 00000000000..e301486e28c --- /dev/null +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferPartitioningResult.cs @@ -0,0 +1,31 @@ +using System.Collections.Immutable; +using HotChocolate.Fusion.Execution.Nodes; +using HotChocolate.Language; + +namespace HotChocolate.Fusion.Planning; + +/// +/// Result of . Carries the complete +/// tree (in declaration order) together with the +/// -keyed lookup so downstream stages can +/// resolve any ... @defer in the same AST to the canonical +/// instance. +/// +internal sealed class DeferPartitioningResult( + ImmutableArray allDeferUsages, + IReadOnlyDictionary byFragment) +{ + /// + /// Every discovered in the operation, in + /// declaration order (depth-first, left-to-right as encountered). + /// The array index equals each usage's . + /// + public ImmutableArray AllDeferUsages { get; } = allDeferUsages; + + /// + /// Maps each ... @defer inline fragment to its canonical + /// . Lookup is by reference identity on the AST + /// node. + /// + public IReadOnlyDictionary ByFragment { get; } = byFragment; +} diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferRewriterTypes.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferRewriterTypes.cs new file mode 100644 index 00000000000..dcfcb91d5dc --- /dev/null +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferRewriterTypes.cs @@ -0,0 +1,14 @@ +using System.Collections.Immutable; +using HotChocolate.Fusion.Execution.Nodes; +using HotChocolate.Language; + +namespace HotChocolate.Fusion.Planning; + +/// +/// The result of splitting an operation at @defer boundaries: a stripped +/// main operation plus one subplan descriptor per unique +/// set. +/// +internal readonly record struct DeferSplitResult( + OperationDefinitionNode MainOperation, + ImmutableArray SubPlanDescriptors); diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferSubPlanDescriptor.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferSubPlanDescriptor.cs new file mode 100644 index 00000000000..3821987a6dc --- /dev/null +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferSubPlanDescriptor.cs @@ -0,0 +1,45 @@ +using System.Collections.Immutable; +using HotChocolate.Execution; +using HotChocolate.Fusion.Execution.Nodes; +using HotChocolate.Language; + +namespace HotChocolate.Fusion.Planning; + +/// +/// Describes a single subplan: the compiled operation for a unique +/// set together with the set itself (sorted by +/// for stability). Fields whose active defer +/// usage set equals are fetched by this subplan +/// and delivered to every delivery group in the set. +/// +internal sealed class DeferSubPlanDescriptor( + ImmutableArray deferUsageSet, + OperationDefinitionNode operation, + SelectionPath path, + DeferSubPlanDescriptor? parent) +{ + /// + /// The set this subplan is keyed by, sorted + /// ascending by . + /// + public ImmutableArray DeferUsageSet { get; } = deferUsageSet; + + /// + /// The compiled operation for this subplan. + /// + public OperationDefinitionNode Operation { get; internal set; } = operation; + + /// + /// The path where the subplan's data is inserted in the response tree. + /// Derived from the deepest in the set. + /// + public SelectionPath Path { get; } = path; + + /// + /// The parent subplan for nested @defer, or null for a + /// top-level subplan. Determined by walking each set member's + /// chain and finding the first already- + /// emitted subplan whose set contains a matching ancestor. + /// + public DeferSubPlanDescriptor? Parent { get; } = parent; +} diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferUsageSetKey.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferUsageSetKey.cs new file mode 100644 index 00000000000..35d3aabc011 --- /dev/null +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/DeferUsageSetKey.cs @@ -0,0 +1,87 @@ +using System.Collections.Immutable; +using HotChocolate.Fusion.Execution.Nodes; + +namespace HotChocolate.Fusion.Planning; + +/// +/// A canonical key for a set. The wrapped array is +/// sorted ascending by so sequence equality +/// doubles as set equality. +/// +internal readonly record struct DeferUsageSetKey(ImmutableArray Items) + : IEquatable + , IComparable +{ + public static DeferUsageSetKey Empty { get; } = new([]); + + public bool IsEmpty => Items.IsDefaultOrEmpty; + + public bool Equals(DeferUsageSetKey other) + { + if (Items.IsDefaultOrEmpty) + { + return other.Items.IsDefaultOrEmpty; + } + + if (other.Items.IsDefaultOrEmpty) + { + return false; + } + + if (Items.Length != other.Items.Length) + { + return false; + } + + for (var i = 0; i < Items.Length; i++) + { + if (!ReferenceEquals(Items[i], other.Items[i])) + { + return false; + } + } + + return true; + } + + public override int GetHashCode() + { + if (Items.IsDefaultOrEmpty) + { + return 0; + } + + var hash = new HashCode(); + for (var i = 0; i < Items.Length; i++) + { + hash.Add(Items[i].Id); + } + + return hash.ToHashCode(); + } + + public int CompareTo(DeferUsageSetKey other) + { + if (Items.IsDefaultOrEmpty) + { + return other.Items.IsDefaultOrEmpty ? 0 : -1; + } + + if (other.Items.IsDefaultOrEmpty) + { + return 1; + } + + var commonLength = Math.Min(Items.Length, other.Items.Length); + for (var i = 0; i < commonLength; i++) + { + var cmp = Items[i].Id.CompareTo(other.Items[i].Id); + if (cmp != 0) + { + return cmp; + } + } + + return Items.Length.CompareTo(other.Items.Length); + } +} diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/FieldLocation.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/FieldLocation.cs new file mode 100644 index 00000000000..8cdc507da19 --- /dev/null +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/FieldLocation.cs @@ -0,0 +1,51 @@ +using System.Collections.Immutable; + +namespace HotChocolate.Fusion.Planning; + +/// +/// Identifies a leaf field by the path to its parent object plus its +/// response name. Used as a dictionary key when partitioning fields by +/// effective . Equality is sequence-based on +/// , which is why this type overrides the record-struct +/// default (the default would compare the underlying ImmutableArray by +/// reference, not by contents). +/// +internal readonly record struct FieldLocation( + ImmutableArray Path, + string ResponseName) +{ + public bool Equals(FieldLocation other) + { + if (!string.Equals(ResponseName, other.ResponseName, StringComparison.Ordinal)) + { + return false; + } + + if (Path.Length != other.Path.Length) + { + return false; + } + + for (var i = 0; i < Path.Length; i++) + { + if (!Path[i].Equals(other.Path[i])) + { + return false; + } + } + + return true; + } + + public override int GetHashCode() + { + var hash = StringComparer.Ordinal.GetHashCode(ResponseName); + + for (var i = 0; i < Path.Length; i++) + { + hash = HashCode.Combine(hash, Path[i]); + } + + return hash; + } +} diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/FieldOccurrence.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/FieldOccurrence.cs new file mode 100644 index 00000000000..2a2ff979b9a --- /dev/null +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/FieldOccurrence.cs @@ -0,0 +1,18 @@ +using System.Collections.Immutable; +using HotChocolate.Fusion.Execution.Nodes; +using HotChocolate.Language; + +namespace HotChocolate.Fusion.Planning; + +/// +/// A single occurrence of a leaf field in the operation AST, captured during +/// the collector pass together with the enclosing @defer leaf and the +/// active type condition so later passes can group by location and rebuild +/// the wrapping selection set. +/// +internal sealed record FieldOccurrence( + ImmutableArray ParentPath, + string ResponseName, + FieldNode FieldNode, + DeferUsage? EnclosingDefer, + NamedTypeNode? TypeCondition); diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/FieldPathSegment.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/FieldPathSegment.cs new file mode 100644 index 00000000000..8b261514663 --- /dev/null +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/FieldPathSegment.cs @@ -0,0 +1,9 @@ +namespace HotChocolate.Fusion.Planning; + +/// +/// A segment of a field path, identifying a field by its name and optional alias. +/// +internal readonly record struct FieldPathSegment(string FieldName, string? Alias) +{ + public string ResponseName => Alias ?? FieldName; +} diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/OperationPlanner.BuildExecutionTree.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/OperationPlanner.BuildExecutionTree.cs index c016ff9773a..d3dda452717 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/OperationPlanner.BuildExecutionTree.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/OperationPlanner.BuildExecutionTree.cs @@ -7,6 +7,7 @@ using HotChocolate.Language; using HotChocolate.Language.Visitors; using HotChocolate.Types; +using ThrowHelper = HotChocolate.Fusion.Execution.ThrowHelper; namespace HotChocolate.Fusion.Planning; @@ -23,6 +24,8 @@ private OperationPlan BuildExecutionPlan( Operation operation, OperationDefinitionNode operationDefinition, ImmutableList planSteps, + ImmutableArray deliveryGroups, + ImmutableArray deferredSubPlans, int searchSpace, int expandedNodes, CancellationToken cancellationToken) @@ -37,7 +40,7 @@ private OperationPlan BuildExecutionPlan( var nodes = ImmutableArray.Create(introspectionNode); - return OperationPlan.Create(operation, nodes, nodes, searchSpace, expandedNodes); + return OperationPlan.Create(operation, nodes, nodes, [], [], searchSpace, expandedNodes); } var ctx = new ExecutionPlanBuildContext(); @@ -74,7 +77,135 @@ private OperationPlan BuildExecutionPlan( node.Seal(); } - return OperationPlan.Create(operation, rootNodes, allNodes, searchSpace, expandedNodes); + // Resolve each deferred subplan's parent execution node id now that both + // the main plan's nodes and each subplan's own nodes are built. + // Top-level subplans resolve against the main plan's allNodes; a nested + // subplan resolves against the enclosing subplan's AllNodes, where the + // enclosing subplan is the one whose DeliveryGroups contain the parent + // DeferUsage of any usage in the nested subplan's key set. + if (!deferredSubPlans.IsDefaultOrEmpty) + { + foreach (var subPlan in deferredSubPlans) + { + var path = ResolveSubPlanPath(subPlan); + var parent = ResolveSubPlanParent(subPlan, deferredSubPlans); + var owningNodes = parent is null ? allNodes : parent.AllNodes; + subPlan.ParentNodeId = ResolveDeferParentNodeId(owningNodes, path) + ?? throw ThrowHelper.DeferredSubPlanParentNotFound(path); + } + } + + return OperationPlan.Create( + operation, + rootNodes, + allNodes, + deliveryGroups, + deferredSubPlans, + searchSpace, + expandedNodes); + } + + /// + /// Returns the path at which the given subplan's data is inserted into the + /// result tree. Equivalent to the deepest + /// across the subplan's delivery groups. + /// + private static SelectionPath ResolveSubPlanPath(ExecutionSubPlan subPlan) + { + SelectionPath? best = null; + + foreach (var usage in subPlan.DeliveryGroups) + { + if (usage.Path is null) + { + continue; + } + + if (best is null || usage.Path.Length > best.Length) + { + best = usage.Path; + } + } + + return best ?? SelectionPath.Root; + } + + /// + /// Finds the enclosing subplan for the given nested subplan. A subplan is + /// nested inside another subplan when some in its + /// key set has a that belongs to another + /// subplan's key set. Returns null for top-level subplans. + /// + private static ExecutionSubPlan? ResolveSubPlanParent( + ExecutionSubPlan subPlan, + ImmutableArray subPlans) + { + foreach (var usage in subPlan.DeliveryGroups) + { + var ancestor = usage.Parent; + while (ancestor is not null) + { + foreach (var candidate in subPlans) + { + if (ReferenceEquals(candidate, subPlan)) + { + continue; + } + + foreach (var candidateUsage in candidate.DeliveryGroups) + { + if (ReferenceEquals(candidateUsage, ancestor)) + { + return candidate; + } + } + } + + ancestor = ancestor.Parent; + } + } + + return null; + } + + /// + /// Finds the execution node in whose fetch + /// lands on (or inside) the selection set where this defer is anchored. + /// The match is the node whose + /// is the deepest path that is an ancestor of (or equal to) + /// , meaning its output contributes to the + /// enclosing object where the deferred fragment's fields get merged. + /// + private static int? ResolveDeferParentNodeId( + ImmutableArray owningNodes, + SelectionPath deferPath) + { + int? match = null; + var bestDepth = -1; + + for (var i = 0; i < owningNodes.Length; i++) + { + if (owningNodes[i] is not OperationExecutionNode op) + { + continue; + } + + if (!op.Target.IsParentOfOrSame(deferPath)) + { + continue; + } + + // Pick the deepest matching node so we attach to the most specific + // fetch (e.g. a lookup node at $.user rather than a root fetch) when + // multiple nodes could claim the defer's anchor. + if (op.Target.Length > bestDepth) + { + match = op.Id; + bestDepth = op.Target.Length; + } + } + + return match; } private static ImmutableList TransformPlanSteps( @@ -123,6 +254,10 @@ private static ImmutableList TransformPlanSteps( operationPlanStep = updated; } + // Strip @defer directives from subgraph operations. The gateway + // manages deferral itself and subgraphs should not see @defer. + operationPlanStep = StripDeferDirectivesFromStep(operationPlanStep); + // Attach variable definitions so the operation is syntactically valid // when sent to the downstream service. updatedPlanSteps = updatedPlanSteps.Replace( @@ -160,6 +295,80 @@ static OperationPlanStep RemoveEmptySelectionSets(OperationPlanStep step) : step with { Definition = updatedDefinition }; } + static OperationPlanStep StripDeferDirectivesFromStep(OperationPlanStep step) + { + var updated = StripDeferFromSelectionSet(step.Definition.SelectionSet); + + if (ReferenceEquals(updated, step.Definition.SelectionSet)) + { + return step; + } + + return step with { Definition = step.Definition.WithSelectionSet(updated) }; + } + + static SelectionSetNode StripDeferFromSelectionSet(SelectionSetNode selectionSet) + { + List? rewritten = null; + + for (var i = 0; i < selectionSet.Selections.Count; i++) + { + var selection = selectionSet.Selections[i]; + + if (selection is InlineFragmentNode inlineFragment) + { + var strippedDirectives = StripDeferDirective(inlineFragment.Directives); + var strippedInner = StripDeferFromSelectionSet(inlineFragment.SelectionSet); + + if (!ReferenceEquals(strippedDirectives, inlineFragment.Directives) + || !ReferenceEquals(strippedInner, inlineFragment.SelectionSet)) + { + rewritten ??= [.. selectionSet.Selections]; + rewritten[i] = inlineFragment + .WithDirectives(strippedDirectives) + .WithSelectionSet(strippedInner); + } + } + else if (selection is FieldNode { SelectionSet: not null } field) + { + var strippedInner = StripDeferFromSelectionSet(field.SelectionSet); + + if (!ReferenceEquals(strippedInner, field.SelectionSet)) + { + rewritten ??= [.. selectionSet.Selections]; + rewritten[i] = field.WithSelectionSet(strippedInner); + } + } + } + + return rewritten is null ? selectionSet : new SelectionSetNode(rewritten); + } + + static IReadOnlyList StripDeferDirective(IReadOnlyList directives) + { + for (var i = 0; i < directives.Count; i++) + { + if (directives[i].Name.Value.Equals( + DirectiveNames.Defer.Name, + StringComparison.Ordinal)) + { + var result = new List(directives.Count - 1); + + for (var j = 0; j < directives.Count; j++) + { + if (j != i) + { + result.Add(directives[j]); + } + } + + return result; + } + } + + return directives; + } + static OperationPlanStep AddVariableDefinitions( OperationPlanStep step, ForwardVariableRewriter.Context forwardVariableContext) diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/OperationPlanner.Defer.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/OperationPlanner.Defer.cs new file mode 100644 index 00000000000..76070997363 --- /dev/null +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/OperationPlanner.Defer.cs @@ -0,0 +1,164 @@ +using System.Collections.Immutable; +using HotChocolate.Fusion.Execution.Nodes; +using HotChocolate.Language; + +namespace HotChocolate.Fusion.Planning; + +public sealed partial class OperationPlanner +{ + /// + /// Plans each independently and + /// materializes one per unique + /// DeferUsageSet. The resulting subplan carries its own compiled + /// together with the + /// set that keys it (sorted by + /// for stable serialization). The subplan's data is delivered on the wire + /// to every in its + /// . + /// + private ImmutableArray PlanDeferredSubPlans( + string id, + string hash, + DeferSplitResult splitResult, + bool emitPlannerEvents, + CancellationToken cancellationToken) + { + if (splitResult.SubPlanDescriptors.IsEmpty) + { + return []; + } + + var subPlans = ImmutableArray.CreateBuilder(splitResult.SubPlanDescriptors.Length); + + for (var i = 0; i < splitResult.SubPlanDescriptors.Length; i++) + { + cancellationToken.ThrowIfCancellationRequested(); + + var descriptor = splitResult.SubPlanDescriptors[i]; + var subPlanId = i; + + // Plan the subplan as an independent query. + var (deferredPlanSteps, deferredInternalOp) = PlanDeferredSubPlan( + id, + descriptor, + subPlanId, + emitPlannerEvents, + cancellationToken); + + // Build execution nodes for this subplan. + var (rootNodes, allNodes) = BuildDeferredExecutionNodes( + deferredInternalOp ?? descriptor.Operation, + deferredPlanSteps); + + // Compile a standalone Operation for this subplan's result mapping. + var compiledOp = deferredInternalOp ?? descriptor.Operation; + compiledOp = AddTypeNameToAbstractSelections( + compiledOp, + _schema.GetOperationType(compiledOp.Operation)); + var deferredOperation = _operationCompiler.Compile( + id + "#defer_" + subPlanId, + hash + "#defer_" + subPlanId, + compiledOp); + + // The descriptor carries the DeferUsageSet already sorted by Id + // (see DeferOperationRewriter.Split); capture it as this subplan's + // delivery groups. + var subPlan = new ExecutionSubPlan( + deferredOperation, + rootNodes, + allNodes, + descriptor.DeferUsageSet); + + subPlans.Add(subPlan); + } + + return subPlans.ToImmutable(); + } + + /// + /// Plans a single subplan using the A* planner. + /// + private (ImmutableList Steps, OperationDefinitionNode? InternalOp) PlanDeferredSubPlan( + string operationId, + DeferSubPlanDescriptor descriptor, + int subPlanId, + bool emitPlannerEvents, + CancellationToken cancellationToken) + { + var deferredOperation = descriptor.Operation; + + var index = SelectionSetIndexer.Create(deferredOperation); + + var (node, selectionSet) = CreateQueryPlanBase(deferredOperation, "defer", index); + + if (node.Backlog.IsEmpty) + { + return ([], null); + } + + var possiblePlans = new PlanQueue(_schema); + + foreach (var (schemaName, resolutionCost) in _schema.GetPossibleSchemas(selectionSet)) + { + possiblePlans.Enqueue( + node with + { + SchemaName = schemaName, + ResolutionCost = resolutionCost + }); + } + + if (possiblePlans.Count < 1) + { + possiblePlans.Enqueue(node); + } + + var plan = Plan(operationId + "#defer_" + subPlanId, possiblePlans, emitPlannerEvents, cancellationToken); + + if (!plan.HasValue) + { + return ([], null); + } + + return (plan.Value.Steps, plan.Value.InternalOperationDefinition); + } + + /// + /// Builds execution nodes for a subplan's plan steps. + /// + private (ImmutableArray RootNodes, ImmutableArray AllNodes) BuildDeferredExecutionNodes( + OperationDefinitionNode deferredOperation, + ImmutableList planSteps) + { + if (planSteps.Count == 0) + { + return ([], []); + } + + var ctx = new ExecutionPlanBuildContext(); + var hasVariables = deferredOperation.VariableDefinitions.Count > 0; + + planSteps = TransformPlanSteps(planSteps, deferredOperation); + IndexDependencies(planSteps, ctx); + BuildExecutionNodes(planSteps, ctx, _schema, hasVariables, CancellationToken.None); + MergeAndBatchOperations(ctx, _options.EnableRequestGrouping, _options.MergePolicy); + WireExecutionDependencies(ctx); + + var rootNodes = planSteps + .Where(t => !ctx.DependenciesByStepId.ContainsKey(t.Id) && ctx.ExecutionNodes.ContainsKey(t.Id)) + .Select(t => ctx.ExecutionNodes[t.Id]) + .ToImmutableArray(); + + var allNodes = ctx.ExecutionNodes + .OrderBy(t => t.Key) + .Select(t => t.Value) + .ToImmutableArray(); + + foreach (var node in allNodes) + { + node.Seal(); + } + + return (rootNodes, allNodes); + } +} diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/OperationPlanner.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/OperationPlanner.cs index 48f0b9b128b..25966f4f901 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/OperationPlanner.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/OperationPlanner.cs @@ -91,21 +91,54 @@ public OperationPlan CreatePlan( try { + // Check for @defer directives before planning. If present, we split the + // operation into a main (non-deferred) part and per-DeferUsageSet subplans. + // The main operation is planned without the deferred selections, and each + // subplan is planned independently. + // + // PERF: For non-deferred operations (the common case), the only overhead is + // the HasDeferDirective check which does a fast AST walk looking for @defer. + ImmutableArray deliveryGroups = []; + ImmutableArray deferredSubPlans = []; + DeferSplitResult? deferSplit = null; + DeferPartitioningResult? partitioning = null; + var mainOperationDefinition = operationDefinition; + + if (_options.EnableDefer && DeferOperationRewriter.HasDeferDirective(operationDefinition)) + { + // The partitioner walks the original AST once and hands every + // @defer fragment a canonical DeferUsage instance (with Id, + // Path and IfVariable populated). The rewriter consumes the + // same instances so its per-set grouping and the compiler's + // later Selection._deferUsages entries share object identity. + var deferConditions = new DeferConditionCollection(); + partitioning = DeferPartitioner.Partition(operationDefinition, deferConditions); + + var rewriter = new DeferOperationRewriter(_options.InlineUnlabeledDeferFragments); + var splitResult = rewriter.Split(operationDefinition, partitioning); + + if (!splitResult.SubPlanDescriptors.IsEmpty) + { + deferSplit = splitResult; + mainOperationDefinition = splitResult.MainOperation; + } + } + // We first need to create an index to keep track of the logical selections // sets before we can branch them. This allows us to inline requirements later // into the right place. - var index = SelectionSetIndexer.Create(operationDefinition); + var index = SelectionSetIndexer.Create(mainOperationDefinition); // Next, we create the seed plan with a set of initial work items exploring the root selection set. - var (node, selectionSet) = operationDefinition.Operation switch + var (node, selectionSet) = mainOperationDefinition.Operation switch { - OperationType.Query => CreateQueryPlanBase(operationDefinition, shortHash, index), - OperationType.Mutation => CreateMutationPlanBase(operationDefinition, shortHash, index), - OperationType.Subscription => CreateSubscriptionPlanBase(operationDefinition, shortHash, index), + OperationType.Query => CreateQueryPlanBase(mainOperationDefinition, shortHash, index), + OperationType.Mutation => CreateMutationPlanBase(mainOperationDefinition, shortHash, index), + OperationType.Subscription => CreateSubscriptionPlanBase(mainOperationDefinition, shortHash, index), _ => throw new ArgumentOutOfRangeException() }; - var internalOperationDefinition = operationDefinition; + var internalOperationDefinition = mainOperationDefinition; ImmutableList planSteps = []; // The backlog is only empty for pure introspection queries, which the @@ -150,14 +183,35 @@ node with internalOperationDefinition = AddTypeNameToAbstractSelections( internalOperationDefinition, - _schema.GetOperationType(operationDefinition.Operation)); + _schema.GetOperationType(mainOperationDefinition.Operation)); } + // Always compile from the planner's internal definition. For defer, + // this is the stripped main operation (without deferred fragments), + // which ensures the result mapper only includes non-deferred fields. var operation = _operationCompiler.Compile(id, hash, internalOperationDefinition); + + // Plan deferred subplans if @defer was detected. Each unique + // DeferUsageSet becomes one ExecutionSubPlan; the set of all + // DeferUsage instances (one per @defer occurrence) is kept on the + // plan for wire-level delivery-group identity. + if (deferSplit.HasValue && partitioning is not null) + { + deliveryGroups = partitioning.AllDeferUsages; + deferredSubPlans = PlanDeferredSubPlans( + id, + hash, + deferSplit.Value, + eventSourceEnabled, + cancellationToken); + } + var operationPlan = BuildExecutionPlan( operation, - operationDefinition, + mainOperationDefinition, planSteps, + deliveryGroups, + deferredSubPlans, searchSpace, expandedNodes, cancellationToken); diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/OperationPlannerOptions.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/OperationPlannerOptions.cs index 8ceb5f9678d..e915566f543 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/OperationPlannerOptions.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Planning/OperationPlannerOptions.cs @@ -57,6 +57,35 @@ public bool EnableRequestGrouping } } = true; + /// + /// Gets or sets whether @defer support is enabled in the planner. + /// When disabled, the planner skips defer processing entirely. + /// + public bool EnableDefer + { + get; + set + { + ExpectMutableOptions(); + field = value; + } + } = true; + + /// + /// When enabled, nested @defer fragments without a label are inlined + /// into their parent deferred group instead of producing a separate group. + /// This reduces incremental delivery overhead at the cost of less granular streaming. + /// + public bool InlineUnlabeledDeferFragments + { + get; + set + { + ExpectMutableOptions(); + field = value; + } + } = true; + /// /// Gets or sets how aggressively structurally-identical operations are merged /// to reduce downstream request count. Cycle safety is always enforced regardless diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Properties/FusionExecutionResources.Designer.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Properties/FusionExecutionResources.Designer.cs index 615d774f6c1..0d879cb07ac 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Properties/FusionExecutionResources.Designer.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Properties/FusionExecutionResources.Designer.cs @@ -165,6 +165,12 @@ internal static string OperationPlan_NodeNotFound { } } + internal static string OperationPlan_DeferredSubPlanParentNotFound { + get { + return ResourceManager.GetString("OperationPlan_DeferredSubPlanParentNotFound", resourceCulture); + } + } + internal static string OperationBatchExecutionNode_MissingBatchResult { get { return ResourceManager.GetString("OperationBatchExecutionNode_MissingBatchResult", resourceCulture); diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Properties/FusionExecutionResources.resx b/src/HotChocolate/Fusion/src/Fusion.Execution/Properties/FusionExecutionResources.resx index ce9782f6f98..44d9126ac97 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Properties/FusionExecutionResources.resx +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Properties/FusionExecutionResources.resx @@ -78,6 +78,9 @@ No execution node with id '{0}' exists in this plan. + + Could not resolve parent execution node for deferred subplan at path '{0}'. + The batch response does not contain any result for operation '{0}'. diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/CompositeResultDocument.Text.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/CompositeResultDocument.Text.cs index 40c5f02bae5..82f9c1d35e4 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/CompositeResultDocument.Text.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/CompositeResultDocument.Text.cs @@ -9,7 +9,7 @@ public sealed partial class CompositeResultDocument { internal string? GetString(Cursor cursor, ElementTokenType expectedType) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); var tokenType = row.TokenType; @@ -53,7 +53,7 @@ internal string GetNameOfPropertyValue(Cursor valueCursor) internal ReadOnlySpan GetPropertyNameRaw(Cursor valueCursor) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); // The property name is stored one row before the value var nameCursor = valueCursor + (-1); @@ -71,7 +71,7 @@ internal string GetRawValueAsString(Cursor cursor) internal ReadOnlySpan GetRawValue(Cursor cursor, bool includeQuotes) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); @@ -102,7 +102,7 @@ internal string GetPropertyRawValueAsString(Cursor valueCursor) private ReadOnlySpan GetPropertyRawValue(Cursor valueCursor) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); // The property name is stored one row before the value Debug.Assert(_metaDb.GetElementTokenType(valueCursor - 1) == ElementTokenType.PropertyName); @@ -122,7 +122,7 @@ private ReadOnlySpan GetPropertyRawValue(Cursor valueCursor) internal bool TextEquals(Cursor cursor, ReadOnlySpan otherText, bool isPropertyName) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); byte[]? otherUtf8TextArray = null; @@ -162,7 +162,7 @@ OperationStatus.DestinationTooSmall or internal bool TextEquals(Cursor cursor, ReadOnlySpan otherUtf8Text, bool isPropertyName, bool shouldUnescape) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var matchCursor = isPropertyName ? cursor + (-1) : cursor; var row = _metaDb.Get(matchCursor); diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/CompositeResultDocument.TryGetProperty.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/CompositeResultDocument.TryGetProperty.cs index 27d43b8de2d..7c926b07f8c 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/CompositeResultDocument.TryGetProperty.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/CompositeResultDocument.TryGetProperty.cs @@ -11,7 +11,7 @@ internal bool TryGetNamedPropertyValue( string propertyName, out CompositeResultElement value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.GetValue(ref startCursor); CheckExpectedType(ElementTokenType.StartObject, row.TokenType); @@ -121,7 +121,7 @@ internal bool TryGetNamedPropertyValue( ReadOnlySpan propertyName, out CompositeResultElement value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.GetValue(ref startCursor); CheckExpectedType(ElementTokenType.StartObject, row.TokenType); @@ -245,7 +245,7 @@ internal CompositeResultElement GetPropertyBySelectionId( Cursor startCursor, int selectionId) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.GetValue(ref startCursor); Debug.Assert(row.TokenType is ElementTokenType.StartObject); @@ -263,14 +263,14 @@ internal CompositeResultElement GetPropertyBySelectionId( internal Cursor GetStartCursor(Cursor cursor) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); (cursor, _) = _metaDb.GetStartCursor(cursor); return cursor; } internal Cursor GetEndCursor(Cursor cursor) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); return cursor + _metaDb.GetNumberOfRows(cursor); } } diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/CompositeResultDocument.TryGetValue.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/CompositeResultDocument.TryGetValue.cs index 1c81fd4cbd5..6467fd1a752 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/CompositeResultDocument.TryGetValue.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/CompositeResultDocument.TryGetValue.cs @@ -6,7 +6,7 @@ public sealed partial class CompositeResultDocument { internal bool TryGetValue(Cursor cursor, out sbyte value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); CheckExpectedType(ElementTokenType.Number, row.TokenType); @@ -26,7 +26,7 @@ internal bool TryGetValue(Cursor cursor, out sbyte value) internal bool TryGetValue(Cursor cursor, out byte value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); CheckExpectedType(ElementTokenType.Number, row.TokenType); @@ -46,7 +46,7 @@ internal bool TryGetValue(Cursor cursor, out byte value) internal bool TryGetValue(Cursor cursor, out short value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); CheckExpectedType(ElementTokenType.Number, row.TokenType); @@ -66,7 +66,7 @@ internal bool TryGetValue(Cursor cursor, out short value) internal bool TryGetValue(Cursor cursor, out ushort value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); CheckExpectedType(ElementTokenType.Number, row.TokenType); @@ -86,7 +86,7 @@ internal bool TryGetValue(Cursor cursor, out ushort value) internal bool TryGetValue(Cursor cursor, out int value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); CheckExpectedType(ElementTokenType.Number, row.TokenType); @@ -106,7 +106,7 @@ internal bool TryGetValue(Cursor cursor, out int value) internal bool TryGetValue(Cursor cursor, out uint value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); CheckExpectedType(ElementTokenType.Number, row.TokenType); @@ -126,7 +126,7 @@ internal bool TryGetValue(Cursor cursor, out uint value) internal bool TryGetValue(Cursor cursor, out long value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); CheckExpectedType(ElementTokenType.Number, row.TokenType); @@ -146,7 +146,7 @@ internal bool TryGetValue(Cursor cursor, out long value) internal bool TryGetValue(Cursor cursor, out ulong value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); CheckExpectedType(ElementTokenType.Number, row.TokenType); @@ -166,7 +166,7 @@ internal bool TryGetValue(Cursor cursor, out ulong value) internal bool TryGetValue(Cursor cursor, out double value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); CheckExpectedType(ElementTokenType.Number, row.TokenType); @@ -186,7 +186,7 @@ internal bool TryGetValue(Cursor cursor, out double value) internal bool TryGetValue(Cursor cursor, out float value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); CheckExpectedType(ElementTokenType.Number, row.TokenType); @@ -206,7 +206,7 @@ internal bool TryGetValue(Cursor cursor, out float value) internal bool TryGetValue(Cursor cursor, out decimal value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(cursor); CheckExpectedType(ElementTokenType.Number, row.TokenType); diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/CompositeResultDocument.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/CompositeResultDocument.cs index 89427e67fc1..c76b0722a7b 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/CompositeResultDocument.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/CompositeResultDocument.cs @@ -13,18 +13,21 @@ public sealed partial class CompositeResultDocument : IDisposable private readonly List _sources = []; private readonly Operation _operation; private readonly ulong _includeFlags; + private readonly ulong _deferFlags; private readonly PathSegmentLocalPool? _pathPool; internal MetaDb _metaDb; - private bool _disposed; + private int _disposed; internal CompositeResultDocument( Operation operation, ulong includeFlags, + ulong deferFlags = 0, PathSegmentLocalPool? pathPool = null) { _metaDb = MetaDb.CreateForEstimatedRows(Cursor.RowsPerChunk * 8); _operation = operation; _includeFlags = includeFlags; + _deferFlags = deferFlags; _pathPool = pathPool; Data = CreateObject(Cursor.Zero, operation.RootSelectionSet); @@ -83,7 +86,7 @@ internal Operation GetOperation() internal CompositeResultElement GetArrayIndexElement(Cursor current, int arrayIndex) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.GetValue(ref current); CheckExpectedType(ElementTokenType.StartArray, row.TokenType); @@ -99,7 +102,7 @@ internal CompositeResultElement GetArrayIndexElement(Cursor current, int arrayIn internal int GetArrayLength(Cursor current) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.GetValue(ref current); CheckExpectedType(ElementTokenType.StartArray, row.TokenType); @@ -109,7 +112,7 @@ internal int GetArrayLength(Cursor current) internal int GetPropertyCount(Cursor current) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.GetValue(ref current); CheckExpectedType(ElementTokenType.StartObject, row.TokenType); @@ -226,7 +229,7 @@ internal CompositeResultElement GetParent(Cursor current) internal bool IsInvalidated(Cursor current) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(current); @@ -250,7 +253,7 @@ internal bool IsInvalidated(Cursor current) internal bool IsNullOrInvalidated(Cursor current) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(current); @@ -274,7 +277,7 @@ internal bool IsNullOrInvalidated(Cursor current) internal bool IsInternalProperty(Cursor current) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); // The flag sits on the property row (one before value) var propertyCursor = current.AddRows(-1); @@ -284,7 +287,7 @@ internal bool IsInternalProperty(Cursor current) internal void Invalidate(Cursor current) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _metaDb.Get(current); @@ -469,7 +472,7 @@ private void WriteEmptyProperty(Cursor parent, Selection selection) flags = ElementFlags.IsInternal; } - if (!selection.IsIncluded(_includeFlags)) + if (!selection.IsIncluded(_includeFlags) || selection.IsDeferred(_deferFlags)) { flags |= ElementFlags.IsExcluded; } @@ -498,10 +501,19 @@ private static void CheckExpectedType(ElementTokenType expected, ElementTokenTyp public void Dispose() { - if (!_disposed) + ReturnRentedMemory(); + GC.SuppressFinalize(this); + } + + private void ReturnRentedMemory() + { + if (Interlocked.Exchange(ref _disposed, 1) != 0) { - _metaDb.Dispose(); - _disposed = true; + return; } + + _metaDb.Dispose(); } + + ~CompositeResultDocument() => ReturnRentedMemory(); } diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/CompositeResultElement.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/CompositeResultElement.cs index bd5841b804a..bec94988162 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/CompositeResultElement.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/CompositeResultElement.cs @@ -5,6 +5,7 @@ using HotChocolate.Text.Json; using HotChocolate.Types; using static HotChocolate.Fusion.Properties.FusionExecutionResources; +using static HotChocolate.Fusion.Text.Json.CompositeResultDocument; #pragma warning disable CS1574, CS1584, CS1581, CS1580 @@ -13,9 +14,9 @@ namespace HotChocolate.Fusion.Text.Json; public readonly partial struct CompositeResultElement { private readonly CompositeResultDocument _parent; - private readonly CompositeResultDocument.Cursor _cursor; + private readonly Cursor _cursor; - internal CompositeResultElement(CompositeResultDocument parent, CompositeResultDocument.Cursor cursor) + internal CompositeResultElement(CompositeResultDocument parent, Cursor cursor) { // parent is usually not null, but the Current property // on the enumerators (when initialized as `default`) can @@ -42,7 +43,7 @@ internal void WriteTo(JsonWriter jsonWriter) { CheckValidInstance(); - var formatter = new CompositeResultDocument.RawJsonFormatter(_parent, jsonWriter); + var formatter = new RawJsonFormatter(_parent, jsonWriter); var row = _parent._metaDb.Get(_cursor); formatter.WriteValue(_cursor, row); } @@ -50,7 +51,7 @@ internal void WriteTo(JsonWriter jsonWriter) /// /// Gets the internal meta-db cursor. /// - internal CompositeResultDocument.Cursor Cursor => _cursor; + internal Cursor Cursor => _cursor; [DebuggerBrowsable(DebuggerBrowsableState.Never)] private ElementTokenType TokenType => _parent?.GetElementTokenType(_cursor) ?? ElementTokenType.None; @@ -117,7 +118,7 @@ public Selection? Selection { CheckValidInstance(); - if (_cursor == CompositeResultDocument.Cursor.Zero) + if (_cursor == Cursor.Zero) { return null; } @@ -135,7 +136,7 @@ public IType? Type { get { - if (_cursor == CompositeResultDocument.Cursor.Zero) + if (_cursor == Cursor.Zero) { return null; } @@ -236,7 +237,7 @@ public bool IsNullable { CheckValidInstance(); - if (_cursor == CompositeResultDocument.Cursor.Zero) + if (_cursor == Cursor.Zero) { return false; } diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/SourceResultDocument.Parse.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/SourceResultDocument.Parse.cs index e9a2e5cb2e8..a973f5c258c 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/SourceResultDocument.Parse.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/SourceResultDocument.Parse.cs @@ -17,7 +17,12 @@ internal static SourceResultDocument CreateEmptyObject() internal static SourceResultDocument Parse( byte[] data, int size) - => Parse([data], size, usedChunks: 1, pooledMemory: true); + { + // Fixed-size pooled chunks are always BufferSize. Smaller arrays are typically + // ad-hoc input buffers and must not be returned to JsonMemory. + var pooledMemory = data.Length == JsonMemory.BufferSize; + return Parse([data], size, usedChunks: 1, pooledMemory); + } internal static SourceResultDocument Parse( byte[][] dataChunks, diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/SourceResultDocument.Text.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/SourceResultDocument.Text.cs index d83fc08a282..73486a53c16 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/SourceResultDocument.Text.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/SourceResultDocument.Text.cs @@ -11,7 +11,7 @@ public sealed partial class SourceResultDocument { internal string? GetString(Cursor cursor, JsonTokenType expectedType) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _parsedData.Get(cursor); var rowTokenType = row.TokenType; @@ -32,7 +32,7 @@ public sealed partial class SourceResultDocument internal bool TextEquals(Cursor cursor, ReadOnlySpan otherText, bool isPropertyName) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); byte[]? otherUtf8TextArray = null; var length = checked(otherText.Length * JsonConstants.MaxExpansionFactorWhileTranscoding); @@ -74,7 +74,7 @@ internal bool TextEquals(Cursor cursor, ReadOnlySpan otherText, bool isPro internal bool TextEquals(Cursor cursor, ReadOnlySpan otherUtf8Text, bool isPropertyName, bool shouldUnescape) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); // The propertyName is stored exactly one row before its value var matchCursor = isPropertyName ? cursor - 1 : cursor; @@ -117,7 +117,7 @@ internal string GetNameOfPropertyValue(Cursor valueCursor) internal ReadOnlySpan GetPropertyNameRaw(Cursor valueCursor) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _parsedData.Get(valueCursor - 1); Debug.Assert(row.TokenType is JsonTokenType.PropertyName); @@ -139,7 +139,7 @@ internal string GetPropertyRawValueAsString(Cursor valueCursor) internal ReadOnlySpan GetRawValue(Cursor cursor, bool includeQuotes) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _parsedData.Get(cursor); @@ -165,7 +165,7 @@ internal ReadOnlySpan GetRawValue(Cursor cursor, bool includeQuotes) internal ReadOnlyMemory GetRawValueAsMemory(Cursor cursor, bool includeQuotes) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _parsedData.Get(cursor); @@ -191,7 +191,7 @@ internal ReadOnlyMemory GetRawValueAsMemory(Cursor cursor, bool includeQuo internal ValueRange GetRawValuePointer(Cursor cursor, bool includeQuotes) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _parsedData.Get(cursor); @@ -217,7 +217,7 @@ internal ValueRange GetRawValuePointer(Cursor cursor, bool includeQuotes) private ReadOnlySpan GetPropertyRawValue(Cursor valueCursor) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); // The property name is stored one row before the value var nameRow = _parsedData.Get(valueCursor - 1); @@ -248,7 +248,7 @@ private ReadOnlySpan GetPropertyRawValue(Cursor valueCursor) internal Cursor GetEndIndex(Cursor cursor, bool includeEndElement) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _parsedData.Get(cursor); diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/SourceResultDocument.TryGetProperty.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/SourceResultDocument.TryGetProperty.cs index 6dd076590c1..935bce75135 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/SourceResultDocument.TryGetProperty.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/SourceResultDocument.TryGetProperty.cs @@ -12,7 +12,7 @@ internal bool TryGetNamedPropertyValue( ReadOnlySpan propertyName, out SourceResultElement value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _parsedData.Get(objectCursor); @@ -101,7 +101,7 @@ internal bool TryGetNamedPropertyValue( ReadOnlySpan propertyName, out SourceResultElement value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _parsedData.Get(objectCursor); diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/SourceResultDocument.TryGetValue.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/SourceResultDocument.TryGetValue.cs index 0763b28be56..ba251152185 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/SourceResultDocument.TryGetValue.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/SourceResultDocument.TryGetValue.cs @@ -7,7 +7,7 @@ public sealed partial class SourceResultDocument { internal bool TryGetValue(Cursor cursor, out sbyte value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _parsedData.Get(cursor); @@ -28,7 +28,7 @@ internal bool TryGetValue(Cursor cursor, out sbyte value) internal bool TryGetValue(Cursor cursor, out byte value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _parsedData.Get(cursor); @@ -49,7 +49,7 @@ internal bool TryGetValue(Cursor cursor, out byte value) internal bool TryGetValue(Cursor cursor, out short value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _parsedData.Get(cursor); @@ -70,7 +70,7 @@ internal bool TryGetValue(Cursor cursor, out short value) internal bool TryGetValue(Cursor cursor, out ushort value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _parsedData.Get(cursor); @@ -91,7 +91,7 @@ internal bool TryGetValue(Cursor cursor, out ushort value) internal bool TryGetValue(Cursor cursor, out int value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _parsedData.Get(cursor); @@ -112,7 +112,7 @@ internal bool TryGetValue(Cursor cursor, out int value) internal bool TryGetValue(Cursor cursor, out uint value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _parsedData.Get(cursor); @@ -133,7 +133,7 @@ internal bool TryGetValue(Cursor cursor, out uint value) internal bool TryGetValue(Cursor cursor, out long value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _parsedData.Get(cursor); @@ -154,7 +154,7 @@ internal bool TryGetValue(Cursor cursor, out long value) internal bool TryGetValue(Cursor cursor, out ulong value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _parsedData.Get(cursor); @@ -175,7 +175,7 @@ internal bool TryGetValue(Cursor cursor, out ulong value) internal bool TryGetValue(Cursor cursor, out double value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _parsedData.Get(cursor); @@ -196,7 +196,7 @@ internal bool TryGetValue(Cursor cursor, out double value) internal bool TryGetValue(Cursor cursor, out float value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _parsedData.Get(cursor); @@ -217,7 +217,7 @@ internal bool TryGetValue(Cursor cursor, out float value) internal bool TryGetValue(Cursor cursor, out decimal value) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _parsedData.Get(cursor); diff --git a/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/SourceResultDocument.cs b/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/SourceResultDocument.cs index 2febb29311c..886e3260cf4 100644 --- a/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/SourceResultDocument.cs +++ b/src/HotChocolate/Fusion/src/Fusion.Execution/Text/Json/SourceResultDocument.cs @@ -13,7 +13,7 @@ public sealed partial class SourceResultDocument : IDisposable private readonly byte[][] _dataChunks; private readonly int _usedChunks; private readonly bool _pooledMemory; - private bool _disposed; + private int _disposed; private SourceResultDocument(MetaDb parsedData, byte[][] dataChunks, int usedChunks, bool pooledMemory) { @@ -34,7 +34,7 @@ internal JsonTokenType GetElementTokenType(Cursor cursor) internal int GetArrayLength(Cursor cursor) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _parsedData.Get(cursor); @@ -45,7 +45,7 @@ internal int GetArrayLength(Cursor cursor) internal int GetPropertyCount(Cursor cursor) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _parsedData.Get(cursor); @@ -56,7 +56,7 @@ internal int GetPropertyCount(Cursor cursor) internal SourceResultElement GetArrayIndexElement(Cursor startCursor, int arrayIndex) { - ObjectDisposedException.ThrowIf(_disposed, this); + ObjectDisposedException.ThrowIf(_disposed != 0, this); var row = _parsedData.Get(startCursor); @@ -234,25 +234,33 @@ private static void CheckExpectedType(JsonTokenType expected, JsonTokenType actu public void Dispose() { - if (!_disposed) - { - if (_pooledMemory) - { - JsonMemory.Return(JsonMemoryKind.Json, _dataChunks, _usedChunks); + ReturnRentedMemory(); + GC.SuppressFinalize(this); + } - if (_dataChunks.Length > 1) - { - _dataChunks.AsSpan(0, _usedChunks).Clear(); - ArrayPool.Shared.Return(_dataChunks); - } - } + private void ReturnRentedMemory() + { + if (Interlocked.Exchange(ref _disposed, 1) != 0) + { + return; + } - _parsedData.Dispose(); + if (_pooledMemory) + { + JsonMemory.Return(JsonMemoryKind.Json, _dataChunks, _usedChunks); - _disposed = true; + if (_dataChunks.Length > 1) + { + _dataChunks.AsSpan(0, _usedChunks).Clear(); + ArrayPool.Shared.Return(_dataChunks); + } } + + _parsedData.Dispose(); } + ~SourceResultDocument() => ReturnRentedMemory(); + public override string ToString() { if (_usedChunks == 0) diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/DeferTests.cs b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/DeferTests.cs new file mode 100644 index 00000000000..05aa0fdb72d --- /dev/null +++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/DeferTests.cs @@ -0,0 +1,715 @@ +using System.Text.Json; +using HotChocolate.Transport; +using HotChocolate.Transport.Http; + +namespace HotChocolate.Fusion; + +public class DeferTests : FusionTestBase +{ + [Fact] + public async Task Defer_Single_Fragment_Returns_Incremental_Response() + { + // arrange + using var server1 = CreateSourceSchema( + "A", + """ + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + email: String! + } + """); + + using var server2 = CreateSourceSchema( + "B", + """ + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + reviews: [Review!]! + } + + type Review { + title: String! + body: String! + } + """); + + using var gateway = await CreateCompositeSchemaAsync( + [ + ("A", server1), + ("B", server2) + ]); + + // act + using var client = GraphQLHttpClient.Create(gateway.CreateClient()); + + var request = new OperationRequest( + """ + query GetUser { + user(id: "1") { + name + ... @defer(label: "reviews") { + reviews { + title + body + } + } + } + } + """); + + using var result = await client.PostAsync( + request, + new Uri("http://localhost:5000/graphql")); + + // assert + await MatchSnapshotAsync(gateway, request, result, stableStream: true); + } + + [Fact] + public async Task Defer_IfFalse_Variable_Should_Return_NonStreamed_Result() + { + // arrange + using var server1 = CreateSourceSchema( + "A", + """ + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + email: String! + } + """); + + using var server2 = CreateSourceSchema( + "B", + """ + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + reviews: [Review!]! + } + + type Review { + title: String! + body: String! + } + """); + + using var gateway = await CreateCompositeSchemaAsync( + [ + ("A", server1), + ("B", server2) + ]); + + // act + using var client = GraphQLHttpClient.Create(gateway.CreateClient()); + + var request = new OperationRequest( + query: """ + query GetUser($shouldDefer: Boolean!) { + user(id: "1") { + name + ... @defer(if: $shouldDefer, label: "reviews") { + reviews { + title + body + } + } + } + } + """, + variables: new Dictionary { ["shouldDefer"] = false }); + + using var result = await client.PostAsync( + request, + new Uri("http://localhost:5000/graphql")); + + // assert + await MatchSnapshotAsync(gateway, request, result, stableStream: true); + } + + [Fact] + public async Task Defer_IfTrue_Variable_Should_Return_Streamed_Result() + { + // arrange + using var server1 = CreateSourceSchema( + "A", + """ + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + email: String! + } + """); + + using var server2 = CreateSourceSchema( + "B", + """ + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + reviews: [Review!]! + } + + type Review { + title: String! + body: String! + } + """); + + using var gateway = await CreateCompositeSchemaAsync( + [ + ("A", server1), + ("B", server2) + ]); + + // act + using var client = GraphQLHttpClient.Create(gateway.CreateClient()); + + var request = new OperationRequest( + query: """ + query GetUser($shouldDefer: Boolean!) { + user(id: "1") { + name + ... @defer(if: $shouldDefer, label: "reviews") { + reviews { + title + body + } + } + } + } + """, + variables: new Dictionary { ["shouldDefer"] = true }); + + using var result = await client.PostAsync( + request, + new Uri("http://localhost:5000/graphql")); + + // assert + await MatchSnapshotAsync(gateway, request, result, stableStream: true); + } + + [Fact] + public async Task Defer_Nested_Should_Return_Incremental_Response_In_Order() + { + // arrange + using var server1 = CreateSourceSchema( + "A", + """ + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + """); + + using var server2 = CreateSourceSchema( + "B", + """ + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! + address: String! + } + """); + + using var gateway = await CreateCompositeSchemaAsync( + [ + ("A", server1), + ("B", server2) + ]); + + // act + using var client = GraphQLHttpClient.Create(gateway.CreateClient()); + + var request = new OperationRequest( + """ + query { + user(id: "1") { + name + ... @defer(label: "outer") { + email + ... @defer(label: "inner") { + address + } + } + } + } + """); + + using var result = await client.PostAsync( + request, + new Uri("http://localhost:5000/graphql")); + + // assert + await MatchSnapshotAsync(gateway, request, result, stableStream: true); + } + + [Fact] + public async Task Defer_Nested_Without_Label_On_Inner_Should_Return_Incremental_Response() + { + // arrange + using var server1 = CreateSourceSchema( + "A", + """ + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + """); + + using var server2 = CreateSourceSchema( + "B", + """ + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! + address: String! + } + """); + + using var gateway = await CreateCompositeSchemaAsync( + [ + ("A", server1), + ("B", server2) + ]); + + // act + using var client = GraphQLHttpClient.Create(gateway.CreateClient()); + + var request = new OperationRequest( + """ + query { + user(id: "1") { + name + ... @defer(label: "outer") { + email + ... @defer { + address + } + } + } + } + """); + + using var result = await client.PostAsync( + request, + new Uri("http://localhost:5000/graphql")); + + // assert + await MatchSnapshotAsync(gateway, request, result, stableStream: true); + } + + [Fact] + public async Task Defer_Two_Siblings_With_Overlapping_Fields_Should_Deduplicate() + { + // arrange + using var server1 = CreateSourceSchema( + "A", + """ + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + """); + + using var server2 = CreateSourceSchema( + "B", + """ + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! + address: String! + } + """); + + using var gateway = await CreateCompositeSchemaAsync( + [ + ("A", server1), + ("B", server2) + ]); + + // act + using var client = GraphQLHttpClient.Create(gateway.CreateClient()); + + var request = new OperationRequest( + """ + query { + user(id: "1") { + name + ... @defer(label: "contact") { + email + } + ... @defer(label: "location") { + email + address + } + } + } + """); + + using var result = await client.PostAsync( + request, + new Uri("http://localhost:5000/graphql")); + + // assert + await MatchSnapshotAsync(gateway, request, result, stableStream: true); + } + + [Fact] + public async Task Defer_Two_Siblings_Sharing_Field_Emit_One_Incremental_That_Completes_Both_Groups() + { + // arrange + using var server1 = CreateSourceSchema( + "A", + """ + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + """); + + using var server2 = CreateSourceSchema( + "B", + """ + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! + address: String! + } + """); + + using var gateway = await CreateCompositeSchemaAsync( + [ + ("A", server1), + ("B", server2) + ]); + + // act + using var client = GraphQLHttpClient.Create(gateway.CreateClient()); + + var request = new OperationRequest( + """ + query { + user(id: "1") { + name + ... @defer(label: "contact") { + email + } + ... @defer(label: "location") { + email + address + } + } + } + """); + + using var result = await client.PostAsync( + request, + new Uri("http://localhost:5000/graphql")); + + // assert + // The stable-stream snapshot lays out the per-frame timeline (pending / + // incremental / completed) so the shared subplan emitting once under the + // best delivery-group id while still completing both groups is visible + // as a single block. + await MatchSnapshotAsync(gateway, request, result, stableStream: true); + } + + [Fact] + public async Task Defer_With_Error_In_Deferred_Fragment_Should_Return_Error_In_Incremental_Payload() + { + // arrange + using var server1 = CreateSourceSchema( + "A", + """ + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + """); + + using var server2 = CreateSourceSchema( + "B", + """ + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! @error + } + """); + + using var gateway = await CreateCompositeSchemaAsync( + [ + ("A", server1), + ("B", server2) + ]); + + // act + using var client = GraphQLHttpClient.Create(gateway.CreateClient()); + + var request = new OperationRequest( + """ + query GetUser { + user(id: "1") { + name + ... @defer(label: "email") { + email + } + } + } + """); + + using var result = await client.PostAsync( + request, + new Uri("http://localhost:5000/graphql")); + + // assert + await MatchSnapshotAsync(gateway, request, result, stableStream: true); + } + + [Fact(Skip = "Requires validation of @skip/@include interaction with @defer at the planning level")] + public async Task Defer_With_Skip_Directive_Should_Skip_Deferred_Fragment() + { + // arrange + using var server1 = CreateSourceSchema( + "A", + """ + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + email: String! + } + """); + + using var server2 = CreateSourceSchema( + "B", + """ + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + reviews: [Review!]! + } + + type Review { + title: String! + body: String! + } + """); + + using var gateway = await CreateCompositeSchemaAsync( + [ + ("A", server1), + ("B", server2) + ]); + + // act + using var client = GraphQLHttpClient.Create(gateway.CreateClient()); + + var request = new OperationRequest( + """ + query GetUser { + user(id: "1") { + name + ... @defer(label: "reviews") @include(if: false) { + reviews { + title + body + } + } + } + } + """); + + using var result = await client.PostAsync( + request, + new Uri("http://localhost:5000/graphql")); + + // assert — with @include(if: false), the deferred fragment should be entirely + // removed during planning, resulting in a single non-incremental response. + var rawBody = await result.HttpResponseMessage.Content.ReadAsStringAsync(); + var payloads = rawBody + .Split('\n', StringSplitOptions.RemoveEmptyEntries) + .Select(line => JsonDocument.Parse(line)) + .ToList(); + + Assert.Single(payloads); + + var initial = payloads[0].RootElement; + Assert.True(initial.TryGetProperty("data", out var data)); + Assert.Equal("User: VXNlcjox", data.GetProperty("user").GetProperty("name").GetString()); + + // Reviews should NOT be in the response since the fragment was skipped + Assert.False(data.GetProperty("user").TryGetProperty("reviews", out _)); + + // No incremental delivery + Assert.False(initial.TryGetProperty("pending", out _)); + Assert.False(initial.TryGetProperty("incremental", out _)); + + foreach (var doc in payloads) + { + doc.Dispose(); + } + } + + [Fact(Skip = "Known limitation: @defer on mutations forces Query operation type in deferred plan")] + public async Task Defer_On_Mutation_Result_Should_Return_Incremental_Response() + { + // arrange + using var server1 = CreateSourceSchema( + "A", + """ + type Query { + productById(id: ID!): Product @lookup + } + + type Mutation { + createProduct(name: String!): Product! + } + + type Product @key(fields: "id") { + id: ID! + name: String! + } + """); + + using var server2 = CreateSourceSchema( + "B", + """ + type Query { + productById(id: ID!): Product @lookup + } + + type Product @key(fields: "id") { + id: ID! + price: Float! + } + """); + + using var gateway = await CreateCompositeSchemaAsync( + [ + ("A", server1), + ("B", server2) + ]); + + // act + using var client = GraphQLHttpClient.Create(gateway.CreateClient()); + + var request = new OperationRequest( + """ + mutation { + createProduct(name: "Widget") { + name + ... @defer(label: "pricing") { + price + } + } + } + """); + + using var result = await client.PostAsync( + request, + new Uri("http://localhost:5000/graphql")); + + // assert — initial payload should have the mutation result with name, + // deferred payload should deliver the price from source B. + var rawBody = await result.HttpResponseMessage.Content.ReadAsStringAsync(); + var payloads = rawBody + .Split('\n', StringSplitOptions.RemoveEmptyEntries) + .Select(line => JsonDocument.Parse(line)) + .ToList(); + + Assert.Equal(2, payloads.Count); + + // --- Initial payload --- + var initial = payloads[0].RootElement; + Assert.True(initial.TryGetProperty("data", out var data)); + Assert.True(data.TryGetProperty("createProduct", out var product)); + Assert.Equal("Product: UHJvZHVjdDox", product.GetProperty("name").GetString()); + Assert.True(initial.GetProperty("hasNext").GetBoolean()); + + // --- Deferred payload --- + var deferred = payloads[1].RootElement; + Assert.True(deferred.TryGetProperty("incremental", out var incremental)); + Assert.Equal(1, incremental.GetArrayLength()); + + var incrementalData = incremental[0].GetProperty("data"); + Assert.True( + incrementalData.GetProperty("createProduct").TryGetProperty("price", out _)); + + Assert.False(deferred.GetProperty("hasNext").GetBoolean()); + + foreach (var doc in payloads) + { + doc.Dispose(); + } + } +} diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/FusionTestBase.MatchSnapshot.cs b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/FusionTestBase.MatchSnapshot.cs index 12ab4921554..367cda5dccf 100644 --- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/FusionTestBase.MatchSnapshot.cs +++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/FusionTestBase.MatchSnapshot.cs @@ -1,9 +1,11 @@ +using System.Buffers; using System.Collections.Frozen; using System.Net; using System.Net.Http.Headers; using System.Text; using System.Text.Json; using System.Text.RegularExpressions; +using CookieCrumble.HotChocolate.Formatters; using HotChocolate.Buffers; using HotChocolate.Fusion.Execution; using HotChocolate.Fusion.Execution.Clients; @@ -24,16 +26,40 @@ protected async Task MatchSnapshotAsync( OperationRequest request, GraphQLHttpResponse response, string? postFix = null, - RawRequest? rawRequest = null) + RawRequest? rawRequest = null, + bool stableStream = false) { var snapshot = new Snapshot(postFix, ".yaml"); var results = new List(); + string? stableStreamText = null; - // We first wait and capture all possible gateway responses. - await foreach (var result in response.ReadAsResultStreamAsync()) + if (stableStream) { - results.Add(result); + var bodyBytes = await response.HttpResponseMessage.Content.ReadAsByteArrayAsync(); + + using var formatterResponseMessage = CloneHttpResponseMessage(response.HttpResponseMessage, bodyBytes); + using var formatterResponse = new GraphQLHttpResponse(formatterResponseMessage); + + var buffer = new ArrayBufferWriter(); + SnapshotValueFormatters.GraphQLHttpStable.Format(buffer, formatterResponse); + stableStreamText = Encoding.UTF8.GetString(buffer.WrittenSpan); + + using var resultsResponseMessage = CloneHttpResponseMessage(response.HttpResponseMessage, bodyBytes); + using var resultsResponse = new GraphQLHttpResponse(resultsResponseMessage); + + await foreach (var result in resultsResponse.ReadAsResultStreamAsync()) + { + results.Add(result); + } + } + else + { + // We first wait and capture all possible gateway responses. + await foreach (var result in response.ReadAsResultStreamAsync()) + { + results.Add(result); + } } var testServerRegistrations = gateway.Services @@ -50,7 +76,7 @@ protected async Task MatchSnapshotAsync( WriteOperationRequest(writer, request, rawRequest); writer.Unindent(); - WriteResults(writer, results); + WriteResults(writer, results, stableStreamText); writer.WriteLine("sourceSchemas:"); writer.Indent(); @@ -116,8 +142,17 @@ private static async Task TryWriteOperationPlanAsync( } } - private void WriteResults(CodeWriter writer, List results) + private void WriteResults(CodeWriter writer, List results, string? stableStreamText = null) { + if (stableStreamText is not null) + { + writer.WriteLine("stableResponseStream: |"); + writer.Indent(); + WriteMultilineString(writer, stableStreamText.TrimEnd()); + writer.Unindent(); + return; + } + if (results is [{ } singleResult]) { writer.WriteLine("response:"); @@ -149,6 +184,39 @@ private void WriteResults(CodeWriter writer, List results) } } + private static HttpResponseMessage CloneHttpResponseMessage(HttpResponseMessage source, byte[] bodyBytes) + { + var clone = new HttpResponseMessage(source.StatusCode); + + foreach (var header in source.Headers) + { + clone.Headers.TryAddWithoutValidation(header.Key, header.Value); + } + + var content = new ByteArrayContent(bodyBytes); + + if (source.Content.Headers.ContentType is not null) + { + content.Headers.ContentType = + MediaTypeHeaderValue.Parse(source.Content.Headers.ContentType.ToString()); + } + + foreach (var header in source.Content.Headers) + { + if (string.Equals(header.Key, "Content-Type", StringComparison.OrdinalIgnoreCase) + || string.Equals(header.Key, "Content-Length", StringComparison.OrdinalIgnoreCase)) + { + continue; + } + + content.Headers.TryAddWithoutValidation(header.Key, header.Value); + } + + clone.Content = content; + + return clone; + } + private static void WriteResult(CodeWriter writer, OperationResult result) { var memoryStream = new MemoryStream(); @@ -220,7 +288,12 @@ private static void WriteSourceSchema( writer.WriteLine("interactions:"); writer.Indent(); - foreach (var (_, interaction) in interactions.OrderBy(x => x.Key)) + // Order by (OperationPlanId, NodeId) so parallel mini-plans (e.g. + // deferred groups) render in a stable order independent of the + // runtime arrival order of their subgraph responses. + foreach (var interaction in interactions.Values + .OrderBy(x => x.OperationPlanId, StringComparer.Ordinal) + .ThenBy(x => x.NodeId)) { var request = interaction.Request!; diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/FusionTestBase.cs b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/FusionTestBase.cs index 45660675881..2c7fc333973 100644 --- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/FusionTestBase.cs +++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/FusionTestBase.cs @@ -43,10 +43,16 @@ protected async Task CreateCompositeSchemaAsync( var gatewayServices = new ServiceCollection(); var gatewayBuilder = gatewayServices.AddGraphQLGatewayServer(); var interactions = new ConcurrentDictionary>(); + // Interactions are keyed by an atomically-incremented int, but looked up + // by (OperationPlanId, NodeId) so that parallel mini-plans (e.g. deferred + // execution groups) that reuse the same node id within their own plan do + // not collide onto the same bucket. var nodeToInteractionId = - new ConcurrentDictionary>(StringComparer.Ordinal); + new ConcurrentDictionary>( + StringComparer.Ordinal); var requestToInteractionId = new ConcurrentDictionary>(StringComparer.Ordinal); + var nextInteractionId = 0; foreach (var (name, server) in sourceSchemaServers) { @@ -199,10 +205,18 @@ SourceSchemaInteraction GetOrCreateInteractionForRequest( schemaName, _ => new ConcurrentDictionary(ReferenceEqualityComparer.Instance)); - var interactionId = schemaRequestToInteractionId.GetOrAdd(requestMessage, _ => node.Id); - schemaNodeToInteractionId[node.Id] = interactionId; + var planId = context.OperationPlan.Id; + var lookupKey = (planId, node.Id); - return schemaInteractions.GetOrAdd(interactionId, _ => new SourceSchemaInteraction()); + var interactionId = schemaRequestToInteractionId.GetOrAdd( + requestMessage, + _ => Interlocked.Increment(ref nextInteractionId)); + schemaNodeToInteractionId[lookupKey] = interactionId; + + var interaction = schemaInteractions.GetOrAdd(interactionId, _ => new SourceSchemaInteraction()); + interaction.OperationPlanId = planId; + interaction.NodeId = node.Id; + return interaction; } SourceSchemaInteraction GetSourceSchemaInteraction(OperationPlanContext context, ExecutionNode node) @@ -211,12 +225,23 @@ SourceSchemaInteraction GetSourceSchemaInteraction(OperationPlanContext context, var schemaInteractions = interactions.GetOrAdd(schemaName, _ => []); var schemaNodeToInteractionId = nodeToInteractionId.GetOrAdd(schemaName, _ => []); - if (schemaNodeToInteractionId.TryGetValue(node.Id, out var interactionId)) + var planId = context.OperationPlan.Id; + var lookupKey = (planId, node.Id); + + if (schemaNodeToInteractionId.TryGetValue(lookupKey, out var interactionId)) { - return schemaInteractions.GetOrAdd(interactionId, _ => new SourceSchemaInteraction()); + var existing = schemaInteractions.GetOrAdd(interactionId, _ => new SourceSchemaInteraction()); + existing.OperationPlanId ??= planId; + existing.NodeId ??= node.Id; + return existing; } - return schemaInteractions.GetOrAdd(node.Id, _ => new SourceSchemaInteraction()); + var fallbackId = Interlocked.Increment(ref nextInteractionId); + schemaNodeToInteractionId[lookupKey] = fallbackId; + var fallback = schemaInteractions.GetOrAdd(fallbackId, _ => new SourceSchemaInteraction()); + fallback.OperationPlanId ??= planId; + fallback.NodeId ??= node.Id; + return fallback; } } @@ -270,6 +295,20 @@ protected class SourceSchemaInteraction public string? ContentType { get; set; } + /// + /// The that owned the execution node + /// producing this interaction. Used for stable ordering in snapshots + /// when parallel mini-plans (e.g. deferred execution groups) produce + /// concurrent subgraph calls. + /// + public string? OperationPlanId { get; set; } + + /// + /// The within its owning operation plan. + /// Used together with for stable ordering. + /// + public int? NodeId { get; set; } + public sealed class RawSourceSchemaRequest { public required MemoryStream Body { get; init; } diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_IfFalse_Variable_Should_Return_NonStreamed_Result.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_IfFalse_Variable_Should_Return_NonStreamed_Result.yaml new file mode 100644 index 00000000000..b8cf11d1f5a --- /dev/null +++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_IfFalse_Variable_Should_Return_NonStreamed_Result.yaml @@ -0,0 +1,237 @@ +title: Defer_IfFalse_Variable_Should_Return_NonStreamed_Result +request: + document: | + query GetUser($shouldDefer: Boolean!) { + user(id: "1") { + name + ... @defer(if: $shouldDefer, label: "reviews") { + reviews { + title + body + } + } + } + } + variables: | + { + "shouldDefer": false + } +stableResponseStream: | + { + "data": { + "user": { + "name": "User: VXNlcjox", + "reviews": [ + { + "body": "Review: UmV2aWV3OjE=", + "title": "Review: UmV2aWV3OjE=" + }, + { + "body": "Review: UmV2aWV3OjI=", + "title": "Review: UmV2aWV3OjI=" + }, + { + "body": "Review: UmV2aWV3OjM=", + "title": "Review: UmV2aWV3OjM=" + } + ] + } + } + } +sourceSchemas: + - name: A + schema: | + schema { + query: Query + } + + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + email: String! + } + interactions: + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query GetUser_033679a6_1($shouldDefer: Boolean!) { + user(id: "1") { + name + ... @skip(if: $shouldDefer) { + id + } + } + } + variables: | + { + "shouldDefer": false + } + response: + results: + - | + { + "data": { + "user": { + "name": "User: VXNlcjox", + "id": "VXNlcjox" + } + } + } + - name: B + schema: | + schema { + query: Query + } + + type Query { + userById(id: ID!): User @lookup + } + + type Review { + title: String! + body: String! + } + + type User @key(fields: "id") { + id: ID! + reviews: [Review!]! + } + interactions: + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query GetUser_033679a6_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + reviews { + title + body + } + } + } + variables: | + { + "__fusion_1_id": "VXNlcjox" + } + response: + results: + - | + { + "data": { + "userById": { + "reviews": [ + { + "title": "Review: UmV2aWV3OjE=", + "body": "Review: UmV2aWV3OjE=" + }, + { + "title": "Review: UmV2aWV3OjI=", + "body": "Review: UmV2aWV3OjI=" + }, + { + "title": "Review: UmV2aWV3OjM=", + "body": "Review: UmV2aWV3OjM=" + } + ] + } + } + } +operationPlan: + operation: + - document: | + query GetUser($shouldDefer: Boolean!) { + user(id: "1") { + name + ... @skip(if: $shouldDefer) { + reviews { + title + body + } + id @fusion__requirement + } + } + } + name: GetUser + hash: 033679a6deb9150ce9d5cd47bd725497 + searchSpace: 1 + expandedNodes: 2 + nodes: + - id: 1 + type: Operation + schema: A + operation: | + query GetUser_033679a6_1($shouldDefer: Boolean!) { + user(id: "1") { + name + ... @skip(if: $shouldDefer) { + id + } + } + } + forwardedVariables: + - shouldDefer + - id: 2 + type: Operation + schema: B + operation: | + query GetUser_033679a6_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + reviews { + title + body + } + } + } + source: $.userById + target: $.user + requirements: + - name: __fusion_1_id + selectionMap: >- + id + conditions: + - variable: $shouldDefer + passingValue: false + dependencies: + - id: 1 + deliveryGroups: + - id: 0 + path: $.user + label: reviews + ifVariable: $shouldDefer + deferredSubPlans: + - deliveryGroupIds: + - 0 + parentNodeId: 2 + nodes: + - id: 1 + type: Operation + schema: A + operation: | + query GetUser_defer_1 { + user(id: "1") { + id + } + } + - id: 2 + type: Operation + schema: B + operation: | + query GetUser_defer_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + reviews { + title + body + } + } + } + source: $.userById + target: $.user + requirements: + - name: __fusion_1_id + selectionMap: >- + id + dependencies: + - id: 1 diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_IfTrue_Variable_Should_Return_Streamed_Result.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_IfTrue_Variable_Should_Return_Streamed_Result.yaml new file mode 100644 index 00000000000..1106742f70d --- /dev/null +++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_IfTrue_Variable_Should_Return_Streamed_Result.yaml @@ -0,0 +1,303 @@ +title: Defer_IfTrue_Variable_Should_Return_Streamed_Result +request: + document: | + query GetUser($shouldDefer: Boolean!) { + user(id: "1") { + name + ... @defer(if: $shouldDefer, label: "reviews") { + reviews { + title + body + } + } + } + } + variables: | + { + "shouldDefer": true + } +stableResponseStream: | + { + "kind": "stable-stream", + "payloadCount": 2, + "initial": { + "data": { + "user": { + "name": "User: VXNlcjox" + } + } + }, + "pending": [ + { + "id": "0", + "path": [ + "user" + ], + "label": "reviews" + } + ], + "incremental": [ + { + "id": "0", + "path": [ + "user" + ], + "data": { + "reviews": [ + { + "body": "Review: UmV2aWV3OjE=", + "title": "Review: UmV2aWV3OjE=" + }, + { + "body": "Review: UmV2aWV3OjI=", + "title": "Review: UmV2aWV3OjI=" + }, + { + "body": "Review: UmV2aWV3OjM=", + "title": "Review: UmV2aWV3OjM=" + } + ] + } + } + ], + "completed": [ + { + "id": "0" + } + ], + "final": { + "data": { + "user": { + "name": "User: VXNlcjox", + "reviews": [ + { + "body": "Review: UmV2aWV3OjE=", + "title": "Review: UmV2aWV3OjE=" + }, + { + "body": "Review: UmV2aWV3OjI=", + "title": "Review: UmV2aWV3OjI=" + }, + { + "body": "Review: UmV2aWV3OjM=", + "title": "Review: UmV2aWV3OjM=" + } + ] + } + } + } + } +sourceSchemas: + - name: A + schema: | + schema { + query: Query + } + + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + email: String! + } + interactions: + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query GetUser_033679a6_1($shouldDefer: Boolean!) { + user(id: "1") { + name + ... @skip(if: $shouldDefer) { + id + } + } + } + variables: | + { + "shouldDefer": true + } + response: + results: + - | + { + "data": { + "user": { + "name": "User: VXNlcjox" + } + } + } + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query GetUser_defer_1 { + user(id: "1") { + id + } + } + response: + results: + - | + { + "data": { + "user": { + "id": "VXNlcjox" + } + } + } + - name: B + schema: | + schema { + query: Query + } + + type Query { + userById(id: ID!): User @lookup + } + + type Review { + title: String! + body: String! + } + + type User @key(fields: "id") { + id: ID! + reviews: [Review!]! + } + interactions: + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query GetUser_defer_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + reviews { + title + body + } + } + } + variables: | + { + "__fusion_1_id": "VXNlcjox" + } + response: + results: + - | + { + "data": { + "userById": { + "reviews": [ + { + "title": "Review: UmV2aWV3OjE=", + "body": "Review: UmV2aWV3OjE=" + }, + { + "title": "Review: UmV2aWV3OjI=", + "body": "Review: UmV2aWV3OjI=" + }, + { + "title": "Review: UmV2aWV3OjM=", + "body": "Review: UmV2aWV3OjM=" + } + ] + } + } + } +operationPlan: + operation: + - document: | + query GetUser($shouldDefer: Boolean!) { + user(id: "1") { + name + ... @skip(if: $shouldDefer) { + reviews { + title + body + } + id @fusion__requirement + } + } + } + name: GetUser + hash: 033679a6deb9150ce9d5cd47bd725497 + searchSpace: 1 + expandedNodes: 2 + nodes: + - id: 1 + type: Operation + schema: A + operation: | + query GetUser_033679a6_1($shouldDefer: Boolean!) { + user(id: "1") { + name + ... @skip(if: $shouldDefer) { + id + } + } + } + forwardedVariables: + - shouldDefer + - id: 2 + type: Operation + schema: B + operation: | + query GetUser_033679a6_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + reviews { + title + body + } + } + } + source: $.userById + target: $.user + requirements: + - name: __fusion_1_id + selectionMap: >- + id + conditions: + - variable: $shouldDefer + passingValue: false + dependencies: + - id: 1 + deliveryGroups: + - id: 0 + path: $.user + label: reviews + ifVariable: $shouldDefer + deferredSubPlans: + - deliveryGroupIds: + - 0 + parentNodeId: 2 + nodes: + - id: 1 + type: Operation + schema: A + operation: | + query GetUser_defer_1 { + user(id: "1") { + id + } + } + - id: 2 + type: Operation + schema: B + operation: | + query GetUser_defer_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + reviews { + title + body + } + } + } + source: $.userById + target: $.user + requirements: + - name: __fusion_1_id + selectionMap: >- + id + dependencies: + - id: 1 diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_Nested_Should_Return_Incremental_Response_In_Order.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_Nested_Should_Return_Incremental_Response_In_Order.yaml new file mode 100644 index 00000000000..cb1e50395cd --- /dev/null +++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_Nested_Should_Return_Incremental_Response_In_Order.yaml @@ -0,0 +1,299 @@ +title: Defer_Nested_Should_Return_Incremental_Response_In_Order +request: + document: | + { + user(id: "1") { + name + ... @defer(label: "outer") { + email + ... @defer(label: "inner") { + address + } + } + } + } +stableResponseStream: | + { + "kind": "stable-stream", + "payloadCount": 3, + "initial": { + "data": { + "user": { + "name": "User: VXNlcjox" + } + } + }, + "pending": [ + { + "id": "0", + "path": [ + "user" + ], + "label": "outer" + }, + { + "id": "1", + "path": [ + "user" + ], + "label": "inner" + } + ], + "incremental": [ + { + "id": "0", + "path": [ + "user" + ], + "data": { + "email": "User: VXNlcjox" + } + }, + { + "id": "1", + "path": [ + "user" + ], + "data": { + "address": "User: VXNlcjox" + } + } + ], + "completed": [ + { + "id": "0" + }, + { + "id": "1" + } + ], + "final": { + "data": { + "user": { + "address": "User: VXNlcjox", + "email": "User: VXNlcjox", + "name": "User: VXNlcjox" + } + } + } + } +sourceSchemas: + - name: A + schema: | + schema { + query: Query + } + + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + interactions: + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query Op_d1db12b9_1 { + user(id: "1") { + name + } + } + response: + results: + - | + { + "data": { + "user": { + "name": "User: VXNlcjox" + } + } + } + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query Op_defer_1 { + user(id: "1") { + id + } + } + response: + results: + - | + { + "data": { + "user": { + "id": "VXNlcjox" + } + } + } + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query Op_defer_1 { + user(id: "1") { + id + } + } + response: + results: + - | + { + "data": { + "user": { + "id": "VXNlcjox" + } + } + } + - name: B + schema: | + schema { + query: Query + } + + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! + address: String! + } + interactions: + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query Op_defer_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + email + } + } + variables: | + { + "__fusion_1_id": "VXNlcjox" + } + response: + results: + - | + { + "data": { + "userById": { + "email": "User: VXNlcjox" + } + } + } + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query Op_defer_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + address + } + } + variables: | + { + "__fusion_1_id": "VXNlcjox" + } + response: + results: + - | + { + "data": { + "userById": { + "address": "User: VXNlcjox" + } + } + } +operationPlan: + operation: + - document: | + { + user(id: "1") { + name + } + } + hash: d1db12b9a5735063f8b816612efd3c53 + searchSpace: 1 + expandedNodes: 1 + nodes: + - id: 1 + type: Operation + schema: A + operation: | + query Op_d1db12b9_1 { + user(id: "1") { + name + } + } + deliveryGroups: + - id: 0 + path: $.user + label: outer + - id: 1 + path: $.user + label: inner + parentId: 0 + deferredSubPlans: + - deliveryGroupIds: + - 0 + parentNodeId: 1 + nodes: + - id: 1 + type: Operation + schema: A + operation: | + query Op_defer_1 { + user(id: "1") { + id + } + } + - id: 2 + type: Operation + schema: B + operation: | + query Op_defer_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + email + } + } + source: $.userById + target: $.user + requirements: + - name: __fusion_1_id + selectionMap: >- + id + dependencies: + - id: 1 + - deliveryGroupIds: + - 1 + parentNodeId: 2 + nodes: + - id: 1 + type: Operation + schema: A + operation: | + query Op_defer_1 { + user(id: "1") { + id + } + } + - id: 2 + type: Operation + schema: B + operation: | + query Op_defer_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + address + } + } + source: $.userById + target: $.user + requirements: + - name: __fusion_1_id + selectionMap: >- + id + dependencies: + - id: 1 diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_Nested_Without_Label_On_Inner_Should_Return_Incremental_Response.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_Nested_Without_Label_On_Inner_Should_Return_Incremental_Response.yaml new file mode 100644 index 00000000000..898551dea5b --- /dev/null +++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_Nested_Without_Label_On_Inner_Should_Return_Incremental_Response.yaml @@ -0,0 +1,213 @@ +title: Defer_Nested_Without_Label_On_Inner_Should_Return_Incremental_Response +request: + document: | + { + user(id: "1") { + name + ... @defer(label: "outer") { + email + ... @defer { + address + } + } + } + } +stableResponseStream: | + { + "kind": "stable-stream", + "payloadCount": 2, + "initial": { + "data": { + "user": { + "name": "User: VXNlcjox" + } + } + }, + "pending": [ + { + "id": "0", + "path": [ + "user" + ], + "label": "outer" + } + ], + "incremental": [ + { + "id": "0", + "path": [ + "user" + ], + "data": { + "address": "User: VXNlcjox", + "email": "User: VXNlcjox" + } + } + ], + "completed": [ + { + "id": "0" + } + ], + "final": { + "data": { + "user": { + "address": "User: VXNlcjox", + "email": "User: VXNlcjox", + "name": "User: VXNlcjox" + } + } + } + } +sourceSchemas: + - name: A + schema: | + schema { + query: Query + } + + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + interactions: + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query Op_f58e5f87_1 { + user(id: "1") { + name + } + } + response: + results: + - | + { + "data": { + "user": { + "name": "User: VXNlcjox" + } + } + } + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query Op_defer_1 { + user(id: "1") { + id + } + } + response: + results: + - | + { + "data": { + "user": { + "id": "VXNlcjox" + } + } + } + - name: B + schema: | + schema { + query: Query + } + + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! + address: String! + } + interactions: + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query Op_defer_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + email + address + } + } + variables: | + { + "__fusion_1_id": "VXNlcjox" + } + response: + results: + - | + { + "data": { + "userById": { + "email": "User: VXNlcjox", + "address": "User: VXNlcjox" + } + } + } +operationPlan: + operation: + - document: | + { + user(id: "1") { + name + } + } + hash: f58e5f87c8862b9cc4c515a1fb51b598 + searchSpace: 1 + expandedNodes: 1 + nodes: + - id: 1 + type: Operation + schema: A + operation: | + query Op_f58e5f87_1 { + user(id: "1") { + name + } + } + deliveryGroups: + - id: 0 + path: $.user + label: outer + - id: 1 + path: $.user + parentId: 0 + deferredSubPlans: + - deliveryGroupIds: + - 0 + parentNodeId: 1 + nodes: + - id: 1 + type: Operation + schema: A + operation: | + query Op_defer_1 { + user(id: "1") { + id + } + } + - id: 2 + type: Operation + schema: B + operation: | + query Op_defer_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + email + address + } + } + source: $.userById + target: $.user + requirements: + - name: __fusion_1_id + selectionMap: >- + id + dependencies: + - id: 1 diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_Single_Fragment_Returns_Incremental_Response.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_Single_Fragment_Returns_Incremental_Response.yaml new file mode 100644 index 00000000000..7da2c9d0cb6 --- /dev/null +++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_Single_Fragment_Returns_Incremental_Response.yaml @@ -0,0 +1,256 @@ +title: Defer_Single_Fragment_Returns_Incremental_Response +request: + document: | + query GetUser { + user(id: "1") { + name + ... @defer(label: "reviews") { + reviews { + title + body + } + } + } + } +stableResponseStream: | + { + "kind": "stable-stream", + "payloadCount": 2, + "initial": { + "data": { + "user": { + "name": "User: VXNlcjox" + } + } + }, + "pending": [ + { + "id": "0", + "path": [ + "user" + ], + "label": "reviews" + } + ], + "incremental": [ + { + "id": "0", + "path": [ + "user" + ], + "data": { + "reviews": [ + { + "body": "Review: UmV2aWV3OjE=", + "title": "Review: UmV2aWV3OjE=" + }, + { + "body": "Review: UmV2aWV3OjI=", + "title": "Review: UmV2aWV3OjI=" + }, + { + "body": "Review: UmV2aWV3OjM=", + "title": "Review: UmV2aWV3OjM=" + } + ] + } + } + ], + "completed": [ + { + "id": "0" + } + ], + "final": { + "data": { + "user": { + "name": "User: VXNlcjox", + "reviews": [ + { + "body": "Review: UmV2aWV3OjE=", + "title": "Review: UmV2aWV3OjE=" + }, + { + "body": "Review: UmV2aWV3OjI=", + "title": "Review: UmV2aWV3OjI=" + }, + { + "body": "Review: UmV2aWV3OjM=", + "title": "Review: UmV2aWV3OjM=" + } + ] + } + } + } + } +sourceSchemas: + - name: A + schema: | + schema { + query: Query + } + + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + email: String! + } + interactions: + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query GetUser_9d7b4d57_1 { + user(id: "1") { + name + } + } + response: + results: + - | + { + "data": { + "user": { + "name": "User: VXNlcjox" + } + } + } + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query GetUser_defer_1 { + user(id: "1") { + id + } + } + response: + results: + - | + { + "data": { + "user": { + "id": "VXNlcjox" + } + } + } + - name: B + schema: | + schema { + query: Query + } + + type Query { + userById(id: ID!): User @lookup + } + + type Review { + title: String! + body: String! + } + + type User @key(fields: "id") { + id: ID! + reviews: [Review!]! + } + interactions: + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query GetUser_defer_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + reviews { + title + body + } + } + } + variables: | + { + "__fusion_1_id": "VXNlcjox" + } + response: + results: + - | + { + "data": { + "userById": { + "reviews": [ + { + "title": "Review: UmV2aWV3OjE=", + "body": "Review: UmV2aWV3OjE=" + }, + { + "title": "Review: UmV2aWV3OjI=", + "body": "Review: UmV2aWV3OjI=" + }, + { + "title": "Review: UmV2aWV3OjM=", + "body": "Review: UmV2aWV3OjM=" + } + ] + } + } + } +operationPlan: + operation: + - document: | + query GetUser { + user(id: "1") { + name + } + } + name: GetUser + hash: 9d7b4d57fdf8672e3b4b1975babbc7a0 + searchSpace: 1 + expandedNodes: 1 + nodes: + - id: 1 + type: Operation + schema: A + operation: | + query GetUser_9d7b4d57_1 { + user(id: "1") { + name + } + } + deliveryGroups: + - id: 0 + path: $.user + label: reviews + deferredSubPlans: + - deliveryGroupIds: + - 0 + parentNodeId: 1 + nodes: + - id: 1 + type: Operation + schema: A + operation: | + query GetUser_defer_1 { + user(id: "1") { + id + } + } + - id: 2 + type: Operation + schema: B + operation: | + query GetUser_defer_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + reviews { + title + body + } + } + } + source: $.userById + target: $.user + requirements: + - name: __fusion_1_id + selectionMap: >- + id + dependencies: + - id: 1 diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_Two_Siblings_Sharing_Field_Emit_One_Incremental_That_Completes_Both_Groups.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_Two_Siblings_Sharing_Field_Emit_One_Incremental_That_Completes_Both_Groups.yaml new file mode 100644 index 00000000000..6764eb650d2 --- /dev/null +++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_Two_Siblings_Sharing_Field_Emit_One_Incremental_That_Completes_Both_Groups.yaml @@ -0,0 +1,300 @@ +title: Defer_Two_Siblings_Sharing_Field_Emit_One_Incremental_That_Completes_Both_Groups +request: + document: | + { + user(id: "1") { + name + ... @defer(label: "contact") { + email + } + ... @defer(label: "location") { + email + address + } + } + } +stableResponseStream: | + { + "kind": "stable-stream", + "payloadCount": 3, + "initial": { + "data": { + "user": { + "name": "User: VXNlcjox" + } + } + }, + "pending": [ + { + "id": "0", + "path": [ + "user" + ], + "label": "contact" + }, + { + "id": "1", + "path": [ + "user" + ], + "label": "location" + } + ], + "incremental": [ + { + "id": "0", + "path": [ + "user" + ], + "data": { + "email": "User: VXNlcjox" + } + }, + { + "id": "1", + "path": [ + "user" + ], + "data": { + "address": "User: VXNlcjox" + } + } + ], + "completed": [ + { + "id": "0" + }, + { + "id": "1" + } + ], + "final": { + "data": { + "user": { + "address": "User: VXNlcjox", + "email": "User: VXNlcjox", + "name": "User: VXNlcjox" + } + } + } + } +sourceSchemas: + - name: A + schema: | + schema { + query: Query + } + + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + interactions: + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query Op_29ccee0f_1 { + user(id: "1") { + name + } + } + response: + results: + - | + { + "data": { + "user": { + "name": "User: VXNlcjox" + } + } + } + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query Op_defer_1 { + user(id: "1") { + id + } + } + response: + results: + - | + { + "data": { + "user": { + "id": "VXNlcjox" + } + } + } + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query Op_defer_1 { + user(id: "1") { + id + } + } + response: + results: + - | + { + "data": { + "user": { + "id": "VXNlcjox" + } + } + } + - name: B + schema: | + schema { + query: Query + } + + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! + address: String! + } + interactions: + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query Op_defer_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + email + } + } + variables: | + { + "__fusion_1_id": "VXNlcjox" + } + response: + results: + - | + { + "data": { + "userById": { + "email": "User: VXNlcjox" + } + } + } + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query Op_defer_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + address + } + } + variables: | + { + "__fusion_1_id": "VXNlcjox" + } + response: + results: + - | + { + "data": { + "userById": { + "address": "User: VXNlcjox" + } + } + } +operationPlan: + operation: + - document: | + { + user(id: "1") { + name + } + } + hash: 29ccee0fa1c82a6cb002b417ee776893 + searchSpace: 1 + expandedNodes: 1 + nodes: + - id: 1 + type: Operation + schema: A + operation: | + query Op_29ccee0f_1 { + user(id: "1") { + name + } + } + deliveryGroups: + - id: 0 + path: $.user + label: contact + - id: 1 + path: $.user + label: location + deferredSubPlans: + - deliveryGroupIds: + - 0 + - 1 + parentNodeId: 1 + nodes: + - id: 1 + type: Operation + schema: A + operation: | + query Op_defer_1 { + user(id: "1") { + id + } + } + - id: 2 + type: Operation + schema: B + operation: | + query Op_defer_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + email + } + } + source: $.userById + target: $.user + requirements: + - name: __fusion_1_id + selectionMap: >- + id + dependencies: + - id: 1 + - deliveryGroupIds: + - 1 + parentNodeId: 1 + nodes: + - id: 1 + type: Operation + schema: A + operation: | + query Op_defer_1 { + user(id: "1") { + id + } + } + - id: 2 + type: Operation + schema: B + operation: | + query Op_defer_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + address + } + } + source: $.userById + target: $.user + requirements: + - name: __fusion_1_id + selectionMap: >- + id + dependencies: + - id: 1 diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_Two_Siblings_With_Overlapping_Fields_Should_Deduplicate.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_Two_Siblings_With_Overlapping_Fields_Should_Deduplicate.yaml new file mode 100644 index 00000000000..bfaa006a18f --- /dev/null +++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_Two_Siblings_With_Overlapping_Fields_Should_Deduplicate.yaml @@ -0,0 +1,300 @@ +title: Defer_Two_Siblings_With_Overlapping_Fields_Should_Deduplicate +request: + document: | + { + user(id: "1") { + name + ... @defer(label: "contact") { + email + } + ... @defer(label: "location") { + email + address + } + } + } +stableResponseStream: | + { + "kind": "stable-stream", + "payloadCount": 3, + "initial": { + "data": { + "user": { + "name": "User: VXNlcjox" + } + } + }, + "pending": [ + { + "id": "0", + "path": [ + "user" + ], + "label": "contact" + }, + { + "id": "1", + "path": [ + "user" + ], + "label": "location" + } + ], + "incremental": [ + { + "id": "0", + "path": [ + "user" + ], + "data": { + "email": "User: VXNlcjox" + } + }, + { + "id": "1", + "path": [ + "user" + ], + "data": { + "address": "User: VXNlcjox" + } + } + ], + "completed": [ + { + "id": "0" + }, + { + "id": "1" + } + ], + "final": { + "data": { + "user": { + "address": "User: VXNlcjox", + "email": "User: VXNlcjox", + "name": "User: VXNlcjox" + } + } + } + } +sourceSchemas: + - name: A + schema: | + schema { + query: Query + } + + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + interactions: + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query Op_29ccee0f_1 { + user(id: "1") { + name + } + } + response: + results: + - | + { + "data": { + "user": { + "name": "User: VXNlcjox" + } + } + } + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query Op_defer_1 { + user(id: "1") { + id + } + } + response: + results: + - | + { + "data": { + "user": { + "id": "VXNlcjox" + } + } + } + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query Op_defer_1 { + user(id: "1") { + id + } + } + response: + results: + - | + { + "data": { + "user": { + "id": "VXNlcjox" + } + } + } + - name: B + schema: | + schema { + query: Query + } + + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! + address: String! + } + interactions: + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query Op_defer_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + email + } + } + variables: | + { + "__fusion_1_id": "VXNlcjox" + } + response: + results: + - | + { + "data": { + "userById": { + "email": "User: VXNlcjox" + } + } + } + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query Op_defer_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + address + } + } + variables: | + { + "__fusion_1_id": "VXNlcjox" + } + response: + results: + - | + { + "data": { + "userById": { + "address": "User: VXNlcjox" + } + } + } +operationPlan: + operation: + - document: | + { + user(id: "1") { + name + } + } + hash: 29ccee0fa1c82a6cb002b417ee776893 + searchSpace: 1 + expandedNodes: 1 + nodes: + - id: 1 + type: Operation + schema: A + operation: | + query Op_29ccee0f_1 { + user(id: "1") { + name + } + } + deliveryGroups: + - id: 0 + path: $.user + label: contact + - id: 1 + path: $.user + label: location + deferredSubPlans: + - deliveryGroupIds: + - 0 + - 1 + parentNodeId: 1 + nodes: + - id: 1 + type: Operation + schema: A + operation: | + query Op_defer_1 { + user(id: "1") { + id + } + } + - id: 2 + type: Operation + schema: B + operation: | + query Op_defer_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + email + } + } + source: $.userById + target: $.user + requirements: + - name: __fusion_1_id + selectionMap: >- + id + dependencies: + - id: 1 + - deliveryGroupIds: + - 1 + parentNodeId: 1 + nodes: + - id: 1 + type: Operation + schema: A + operation: | + query Op_defer_1 { + user(id: "1") { + id + } + } + - id: 2 + type: Operation + schema: B + operation: | + query Op_defer_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + address + } + } + source: $.userById + target: $.user + requirements: + - name: __fusion_1_id + selectionMap: >- + id + dependencies: + - id: 1 diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_With_Error_In_Deferred_Fragment_Should_Return_Error_In_Incremental_Payload.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_With_Error_In_Deferred_Fragment_Should_Return_Error_In_Incremental_Payload.yaml new file mode 100644 index 00000000000..3d9dd0f5332 --- /dev/null +++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/DeferTests.Defer_With_Error_In_Deferred_Fragment_Should_Return_Error_In_Incremental_Payload.yaml @@ -0,0 +1,206 @@ +title: Defer_With_Error_In_Deferred_Fragment_Should_Return_Error_In_Incremental_Payload +request: + document: | + query GetUser { + user(id: "1") { + name + ... @defer(label: "email") { + email + } + } + } +stableResponseStream: | + { + "kind": "stable-stream", + "payloadCount": 2, + "initial": { + "data": { + "user": { + "name": "User: VXNlcjox" + } + } + }, + "pending": [ + { + "id": "0", + "path": [ + "user" + ], + "label": "email" + } + ], + "completed": [ + { + "id": "0", + "errors": [ + { + "message": "Unexpected Execution Error", + "path": [ + "user", + "email" + ] + } + ] + } + ], + "final": { + "data": { + "user": { + "name": "User: VXNlcjox" + } + } + } + } +sourceSchemas: + - name: A + schema: | + schema { + query: Query + } + + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + interactions: + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query GetUser_28690061_1 { + user(id: "1") { + name + } + } + response: + results: + - | + { + "data": { + "user": { + "name": "User: VXNlcjox" + } + } + } + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query GetUser_defer_1 { + user(id: "1") { + id + } + } + response: + results: + - | + { + "data": { + "user": { + "id": "VXNlcjox" + } + } + } + - name: B + schema: | + schema { + query: Query + } + + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! @error + } + interactions: + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query GetUser_defer_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + email + } + } + variables: | + { + "__fusion_1_id": "VXNlcjox" + } + response: + results: + - | + { + "errors": [ + { + "message": "Unexpected Execution Error", + "path": [ + "userById", + "email" + ] + } + ], + "data": { + "userById": null + } + } +operationPlan: + operation: + - document: | + query GetUser { + user(id: "1") { + name + } + } + name: GetUser + hash: 286900619fe56222f78d6a6c45482586 + searchSpace: 1 + expandedNodes: 1 + nodes: + - id: 1 + type: Operation + schema: A + operation: | + query GetUser_28690061_1 { + user(id: "1") { + name + } + } + deliveryGroups: + - id: 0 + path: $.user + label: email + deferredSubPlans: + - deliveryGroupIds: + - 0 + parentNodeId: 1 + nodes: + - id: 1 + type: Operation + schema: A + operation: | + query GetUser_defer_1 { + user(id: "1") { + id + } + } + - id: 2 + type: Operation + schema: B + operation: | + query GetUser_defer_2($__fusion_1_id: ID!) { + userById(id: $__fusion_1_id) { + email + } + } + source: $.userById + target: $.user + requirements: + - name: __fusion_1_id + selectionMap: >- + id + dependencies: + - id: 1 diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/IntrospectionTests.Download_Schema.graphql b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/IntrospectionTests.Download_Schema.graphql index e0985da6cff..3a428e69188 100644 --- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/IntrospectionTests.Download_Schema.graphql +++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/IntrospectionTests.Download_Schema.graphql @@ -85,3 +85,5 @@ type PageInfo { "When paginating backwards, the cursor to continue." startCursor: String } + +directive @defer(if: Boolean = true, label: String) on FRAGMENT_SPREAD | INLINE_FRAGMENT diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/IntrospectionTests.IntrospectionQueries_IntrospectionQuery.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/IntrospectionTests.IntrospectionQueries_IntrospectionQuery.yaml index 80d6048d632..73193d7f3a2 100644 --- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/IntrospectionTests.IntrospectionQueries_IntrospectionQuery.yaml +++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/IntrospectionTests.IntrospectionQueries_IntrospectionQuery.yaml @@ -1663,6 +1663,41 @@ response: } ], "directives": [ + { + "name": "defer", + "description": null, + "isRepeatable": false, + "args": [ + { + "name": "if", + "description": null, + "type": { + "kind": "SCALAR", + "name": "Boolean", + "ofType": null + }, + "defaultValue": "true", + "isDeprecated": false, + "deprecationReason": null + }, + { + "name": "label", + "description": null, + "type": { + "kind": "SCALAR", + "name": "String", + "ofType": null + }, + "defaultValue": null, + "isDeprecated": false, + "deprecationReason": null + } + ], + "locations": [ + "FRAGMENT_SPREAD", + "INLINE_FRAGMENT" + ] + }, { "name": "skip", "description": "Directs the executor to skip this field or fragment when the \u0060if\u0060 argument is true.", diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/IntrospectionTests.Typename_On_Introspection_Types.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/IntrospectionTests.Typename_On_Introspection_Types.yaml index c2613b28a48..5e5f5848ff1 100644 --- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/IntrospectionTests.Typename_On_Introspection_Types.yaml +++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/IntrospectionTests.Typename_On_Introspection_Types.yaml @@ -37,6 +37,9 @@ response: { "__typename": "__Directive" }, + { + "__typename": "__Directive" + }, { "__typename": "__Directive" } diff --git a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SubscriptionsOverHttpStoreTests.Subscribe_Simple.yaml b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SubscriptionsOverHttpStoreTests.Subscribe_Simple.yaml index 3d50939c711..dc198ef698b 100644 --- a/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SubscriptionsOverHttpStoreTests.Subscribe_Simple.yaml +++ b/src/HotChocolate/Fusion/test/Fusion.AspNetCore.Tests/__snapshots__/SubscriptionsOverHttpStoreTests.Subscribe_Simple.yaml @@ -127,6 +127,20 @@ sourceSchemas: } } } + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query Op_a6fc02fc_2($__fusion_1_id: Int!) { + bookById(id: $__fusion_1_id) { + title + } + } + variables: | + { + "__fusion_1_id": 2 + } + response: + results: - | { "data": { @@ -135,6 +149,20 @@ sourceSchemas: } } } + - request: + accept: application/graphql-response+json; charset=utf-8, application/json; charset=utf-8, application/jsonl; charset=utf-8, text/event-stream; charset=utf-8 + document: | + query Op_a6fc02fc_2($__fusion_1_id: Int!) { + bookById(id: $__fusion_1_id) { + title + } + } + variables: | + { + "__fusion_1_id": 3 + } + response: + results: - | { "data": { diff --git a/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Execution/Serialization/JsonOperationPlanSerializationTests.cs b/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Execution/Serialization/JsonOperationPlanSerializationTests.cs index c5062dbab97..2dfcad1d311 100644 --- a/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Execution/Serialization/JsonOperationPlanSerializationTests.cs +++ b/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Execution/Serialization/JsonOperationPlanSerializationTests.cs @@ -16,7 +16,6 @@ public void Parse_Plan() { // arrange var compositeSchema = CreateCompositeSchema(); - var originalPlan = PlanOperation( compositeSchema, """ @@ -42,26 +41,6 @@ fragment Product on Product { }); formatter.Format(buffer, originalPlan); - var json = JsonNode.Parse(buffer.WrittenSpan)!; - var operationNodes = json["nodes"]! - .AsArray() - .Select(t => t!.AsObject()) - .Where(t => - { - var type = t["type"]?.GetValue(); - return type is "Operation" or "OperationBatch"; - }) - .ToList(); - - Assert.NotEmpty(operationNodes); - Assert.All( - operationNodes, - node => - { - Assert.True(node.ContainsKey("resultSelectionSet")); - Assert.False(node.ContainsKey("responseNames")); - }); - // act var compiler = new OperationCompiler( compositeSchema, @@ -71,14 +50,15 @@ fragment Product on Product { var parsedPlan = parser.Parse(buffer.WrittenMemory); // assert - var parsedPlanFormatted = formatter.Format(parsedPlan); - parsedPlanFormatted.MatchInlineSnapshot(Encoding.UTF8.GetString(buffer.WrittenSpan)); + formatter.Format(parsedPlan).MatchInlineSnapshot(Encoding.UTF8.GetString(buffer.WrittenSpan)); } [Fact] public void Parse_Plan_Uses_SelectionSet_Syntax_When_Present() { // arrange + // Inject a custom selection set string into the formatted plan, then + // parse it back to confirm the parser preserves the syntax. var compositeSchema = CreateCompositeSchema(); var originalPlan = PlanOperation( compositeSchema, @@ -103,16 +83,8 @@ public void Parse_Plan_Uses_SelectionSet_Syntax_When_Present() .Select(t => t!.AsObject()) .First(t => t["type"]?.GetValue() is "Operation"); var operationNodeId = operationNode["id"]!.GetValue(); - operationNode["resultSelectionSet"] = "{ __typename }"; - - var planSource = Encoding.UTF8.GetBytes( - json.ToJsonString( - new JsonSerializerOptions - { - WriteIndented = true - })); - + var planSource = Encoding.UTF8.GetBytes(json.ToJsonString(new JsonSerializerOptions { WriteIndented = true })); var compiler = new OperationCompiler( compositeSchema, new DefaultObjectPool>>( @@ -121,11 +93,11 @@ public void Parse_Plan_Uses_SelectionSet_Syntax_When_Present() // act var parsedPlan = parser.Parse(planSource); + + // assert var parsedOperationNode = parsedPlan.AllNodes .OfType() .Single(t => t.Id == operationNodeId); - - // assert Assert.Equal("{ __typename }", parsedOperationNode.ResultSelectionSet.ToString(indented: false)); } @@ -195,10 +167,90 @@ ... on Author { parsedPlanFormatted.MatchInlineSnapshot(Encoding.UTF8.GetString(buffer.WrittenSpan)); } + [Fact] + public void Parse_Plan_Preserves_DeliveryGroup_Identity_Across_Plan_And_SubPlans() + { + // arrange + // Two sibling @defer fragments share a field (email) plus a nested @defer + // adds a parent chain. Round-trip must restore canonical DeferUsage instances. + var schema = ComposeSchema( + """ + # name: a + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + """, + """ + # name: b + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! + address: String! + } + """); + + var originalPlan = PlanOperation( + schema, + """ + query { + user(id: "1") { + name + ... @defer(label: "contact") { + email + ... @defer(label: "nested") { + address + } + } + ... @defer(label: "location") { + email + } + } + } + """); + + using var buffer = new PooledArrayWriter(); + var formatter = new JsonOperationPlanFormatter( + new JsonWriterOptions + { + Indented = true, + Encoder = JavaScriptEncoder.UnsafeRelaxedJsonEscaping + }); + formatter.Format(buffer, originalPlan); + + // act + var compiler = new OperationCompiler( + schema, + new DefaultObjectPool>>( + new DefaultPooledObjectPolicy>>())); + var parser = new JsonOperationPlanParser(compiler); + var parsedPlan = parser.Parse(buffer.WrittenMemory); + + // assert + Encoding.UTF8.GetString(buffer.WrittenSpan).MatchSnapshot(); + Assert.All( + parsedPlan.DeferredSubPlans, + p => Assert.All( + p.DeliveryGroups, + g => Assert.Same(parsedPlan.DeliveryGroups.Single(d => d.Id == g.Id), g))); + Assert.All( + parsedPlan.DeliveryGroups.Where(g => g.Parent is not null), + g => Assert.Same(parsedPlan.DeliveryGroups.Single(d => d.Id == g.Parent!.Id), g.Parent)); + } + [Fact] public void Parse_Plan_Without_BatchingGroupId() { // arrange + // Strip batchingGroupId from a formatted plan to simulate a legacy payload. var compositeSchema = CreateCompositeSchema(); var originalPlan = PlanOperation( compositeSchema, @@ -222,34 +274,25 @@ public void Parse_Plan_Without_BatchingGroupId() formatter.Format(buffer, originalPlan); var json = JsonNode.Parse(buffer.WrittenSpan)!; - var nodes = json["nodes"]!.AsArray(); - - foreach (var node in nodes) + foreach (var node in json["nodes"]!.AsArray()) { if (node?["type"]?.GetValue() is "Operation") { node.AsObject().Remove("batchingGroupId"); } } - var legacyPlanSource = Encoding.UTF8.GetBytes( - json.ToJsonString( - new JsonSerializerOptions - { - WriteIndented = true - })); - - // act + json.ToJsonString(new JsonSerializerOptions { WriteIndented = true })); var compiler = new OperationCompiler( compositeSchema, new DefaultObjectPool>>( new DefaultPooledObjectPolicy>>())); var parser = new JsonOperationPlanParser(compiler); + + // act var parsedPlan = parser.Parse(legacyPlanSource); // assert - // BatchingGroupId no longer exists on OperationExecutionNode; - // the legacy plan without batchingGroupId should still parse successfully. Assert.NotEmpty(parsedPlan.AllNodes.OfType()); } } diff --git a/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Execution/Serialization/__snapshots__/JsonOperationPlanSerializationTests.Parse_Plan_Preserves_DeliveryGroup_Identity_Across_Plan_And_SubPlans.snap b/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Execution/Serialization/__snapshots__/JsonOperationPlanSerializationTests.Parse_Plan_Preserves_DeliveryGroup_Identity_Across_Plan_And_SubPlans.snap new file mode 100644 index 00000000000..4a7410cee69 --- /dev/null +++ b/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Execution/Serialization/__snapshots__/JsonOperationPlanSerializationTests.Parse_Plan_Preserves_DeliveryGroup_Identity_Across_Plan_And_SubPlans.snap @@ -0,0 +1,156 @@ +{ + "id": "68c2a64a8c04735ef1b991eeca37e895e81946d427b3c305d68fd2a19d988d51", + "operation": { + "kind": "Query", + "document": "{\n user(id: \"1\") {\n name\n }\n}", + "id": "123456789101112", + "hash": "123456789101112", + "shortHash": "12345678" + }, + "searchSpace": 1, + "expandedNodes": 1, + "nodes": [ + { + "id": 1, + "type": "Operation", + "schema": "a", + "operation": { + "name": "Op_123456789101112_1", + "kind": "Query", + "document": "query Op_123456789101112_1 {\n user(id: \"1\") {\n name\n }\n}", + "hash": "167b5bb497f34e9c7516ec2d4598ae6d7f0fd73a9a9a329156910e99e5db4ec3", + "shortHash": "167b5bb4" + }, + "resultSelectionSet": "{ user }" + } + ], + "deliveryGroups": [ + { + "id": 0, + "path": "$.user", + "label": "contact" + }, + { + "id": 1, + "path": "$.user", + "label": "nested", + "parentId": 0 + }, + { + "id": 2, + "path": "$.user", + "label": "location" + } + ], + "deferredSubPlans": [ + { + "deliveryGroupIds": [ + 0, + 2 + ], + "parentNodeId": 1, + "operation": { + "kind": "Query", + "document": "{\n user(id: \"1\") {\n email\n id @fusion__requirement\n }\n}", + "id": "123456789101112#defer_0", + "hash": "123456789101112#defer_0", + "shortHash": "12345678" + }, + "nodes": [ + { + "id": 1, + "type": "Operation", + "schema": "a", + "operation": { + "name": "Op_defer_1", + "kind": "Query", + "document": "query Op_defer_1 {\n user(id: \"1\") {\n id\n }\n}", + "hash": "93841f3bbdb1c1bf3a949d7089b6713b11e6d33666aae10bdc8a885607465ca5", + "shortHash": "93841f3b" + }, + "resultSelectionSet": "{ user }" + }, + { + "id": 2, + "type": "Operation", + "schema": "b", + "operation": { + "name": "Op_defer_2", + "kind": "Query", + "document": "query Op_defer_2($__fusion_1_id: ID!) {\n userById(id: $__fusion_1_id) {\n email\n }\n}", + "hash": "65dbc31ddfe32ceb540eaace9ebb429fdc84b5f7758a861ec21db29c098ce8ee", + "shortHash": "65dbc31d" + }, + "resultSelectionSet": "{ email }", + "source": "$.userById", + "target": "$.user", + "requirements": [ + { + "name": "__fusion_1_id", + "type": "ID!", + "path": "$.user", + "selectionMap": "id" + } + ], + "dependencies": [ + 1 + ] + } + ] + }, + { + "deliveryGroupIds": [ + 1 + ], + "parentNodeId": 2, + "operation": { + "kind": "Query", + "document": "{\n user(id: \"1\") {\n address\n id @fusion__requirement\n }\n}", + "id": "123456789101112#defer_1", + "hash": "123456789101112#defer_1", + "shortHash": "12345678" + }, + "nodes": [ + { + "id": 1, + "type": "Operation", + "schema": "a", + "operation": { + "name": "Op_defer_1", + "kind": "Query", + "document": "query Op_defer_1 {\n user(id: \"1\") {\n id\n }\n}", + "hash": "93841f3bbdb1c1bf3a949d7089b6713b11e6d33666aae10bdc8a885607465ca5", + "shortHash": "93841f3b" + }, + "resultSelectionSet": "{ user }" + }, + { + "id": 2, + "type": "Operation", + "schema": "b", + "operation": { + "name": "Op_defer_2", + "kind": "Query", + "document": "query Op_defer_2($__fusion_1_id: ID!) {\n userById(id: $__fusion_1_id) {\n address\n }\n}", + "hash": "1abf4c1bb273be32d1f61887253fa9843c5a15491e86d6962b34ecd01ad1cb01", + "shortHash": "1abf4c1b" + }, + "resultSelectionSet": "{ address }", + "source": "$.userById", + "target": "$.user", + "requirements": [ + { + "name": "__fusion_1_id", + "type": "ID!", + "path": "$.user", + "selectionMap": "id" + } + ], + "dependencies": [ + 1 + ] + } + ] + } + ] +} diff --git a/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Execution/Types/__snapshots__/SerializeAsTests.SerializeAs_Will_Be_In_The_Schema.graphql b/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Execution/Types/__snapshots__/SerializeAsTests.SerializeAs_Will_Be_In_The_Schema.graphql index 050484f5d68..fc5b875f940 100644 --- a/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Execution/Types/__snapshots__/SerializeAsTests.SerializeAs_Will_Be_In_The_Schema.graphql +++ b/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Execution/Types/__snapshots__/SerializeAsTests.SerializeAs_Will_Be_In_The_Schema.graphql @@ -96,4 +96,6 @@ enum ScalarSerializationType { scalar Custom @serializeAs(type: STRING) +directive @defer(if: Boolean = true, label: String) on FRAGMENT_SPREAD | INLINE_FRAGMENT + directive @serializeAs(pattern: String, type: [ScalarSerializationType!]!) on SCALAR diff --git a/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Execution/Types/__snapshots__/SerializeAsTests.SerializeAs_Will_Not_Be_In_The_Schema.graphql b/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Execution/Types/__snapshots__/SerializeAsTests.SerializeAs_Will_Not_Be_In_The_Schema.graphql index efde98735d7..99cb588baf6 100644 --- a/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Execution/Types/__snapshots__/SerializeAsTests.SerializeAs_Will_Not_Be_In_The_Schema.graphql +++ b/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Execution/Types/__snapshots__/SerializeAsTests.SerializeAs_Will_Not_Be_In_The_Schema.graphql @@ -95,3 +95,5 @@ enum ScalarSerializationType { } scalar Custom + +directive @defer(if: Boolean = true, label: String) on FRAGMENT_SPREAD | INLINE_FRAGMENT diff --git a/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Planning/DeferPlannerTests.cs b/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Planning/DeferPlannerTests.cs new file mode 100644 index 00000000000..2d2c0f4718e --- /dev/null +++ b/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Planning/DeferPlannerTests.cs @@ -0,0 +1,734 @@ +namespace HotChocolate.Fusion.Planning; + +public class DeferPlannerTests : FusionTestBase +{ + [Fact] + public void Defer_SingleFragment_ProducesDeferredGroup() + { + // arrange + var schema = ComposeSchema( + """ + # name: a + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + """, + """ + # name: b + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! + } + """); + + // act + var plan = PlanOperation( + schema, + """ + query { + user(id: "1") { + name + ... @defer { + email + } + } + } + """); + + // assert + Assert.Single(plan.DeferredSubPlans); + + var subPlan = plan.DeferredSubPlans[0]; + var group = subPlan.DeliveryGroups[0]; + Assert.Equal(0, group.Id); + Assert.Null(group.Label); + Assert.Equal("$.user", group.Path!.ToString()); + } + + [Fact] + public void Defer_MultipleFragments_ProducesMultipleDeferredGroups() + { + // arrange + var schema = ComposeSchema( + """ + # name: a + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + profile: Profile! + } + + type Profile { + bio: String! + } + """, + """ + # name: b + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! + } + """); + + // act + var plan = PlanOperation( + schema, + """ + query { + user(id: "1") { + name + ... @defer(label: "emailDefer") { + email + } + profile { + ... @defer(label: "bioDefer") { + bio + } + } + } + } + """); + + // assert + Assert.Equal(2, plan.DeferredSubPlans.Length); + + var emailSubPlan = plan.DeferredSubPlans.First(s => s.DeliveryGroups[0].Label == "emailDefer"); + var bioSubPlan = plan.DeferredSubPlans.First(s => s.DeliveryGroups[0].Label == "bioDefer"); + + Assert.NotNull(emailSubPlan); + Assert.NotNull(bioSubPlan); + Assert.NotEqual(emailSubPlan.DeliveryGroups[0].Id, bioSubPlan.DeliveryGroups[0].Id); + } + + [Fact] + public void Defer_WithLabel_LabelIsPropagated() + { + // arrange + var schema = ComposeSchema( + """ + # name: a + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + """, + """ + # name: b + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! + } + """); + + // act + var plan = PlanOperation( + schema, + """ + query { + user(id: "1") { + name + ... @defer(label: "myLabel") { + email + } + } + } + """); + + // assert + Assert.Single(plan.DeferredSubPlans); + Assert.Equal("myLabel", plan.DeferredSubPlans[0].DeliveryGroups[0].Label); + } + + [Fact] + public void Defer_OperationHasIncrementalParts() + { + // arrange + var schema = ComposeSchema( + """ + # name: a + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + """, + """ + # name: b + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! + } + """); + + // act + var plan = PlanOperation( + schema, + """ + query { + user(id: "1") { + name + ... @defer { + email + } + } + } + """); + + // assert + Assert.False(plan.DeferredSubPlans.IsEmpty); + } + + [Fact] + public void Defer_NoDefer_NoDeferredGroups() + { + // arrange + var schema = ComposeSchema( + """ + # name: a + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + """, + """ + # name: b + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! + } + """); + + // act + var plan = PlanOperation( + schema, + """ + query { + user(id: "1") { + name + email + } + } + """); + + // assert + Assert.True(plan.DeferredSubPlans.IsEmpty); + Assert.False(plan.Operation.HasIncrementalParts); + } + + [Fact] + public void Defer_ConditionalVariable_IfVariableRecorded() + { + // arrange + var schema = ComposeSchema( + """ + # name: a + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + """, + """ + # name: b + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! + } + """); + + // act + var plan = PlanOperation( + schema, + """ + query ($shouldDefer: Boolean!) { + user(id: "1") { + name + ... @defer(if: $shouldDefer) { + email + } + } + } + """); + + // assert + Assert.Single(plan.DeferredSubPlans); + Assert.Equal("shouldDefer", plan.DeferredSubPlans[0].DeliveryGroups[0].IfVariable); + } + + [Fact] + public void Defer_MainPlanStillExecutes() + { + // arrange + var schema = ComposeSchema( + """ + # name: a + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + """, + """ + # name: b + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! + } + """); + + // act + var plan = PlanOperation( + schema, + """ + query { + user(id: "1") { + name + ... @defer { + email + } + } + } + """); + + // assert + Assert.NotEmpty(plan.RootNodes); + Assert.NotEmpty(plan.AllNodes); + + // The deferred subplan should also have its own execution nodes + var subPlan = plan.DeferredSubPlans[0]; + Assert.False(subPlan.RootNodes.IsEmpty); + Assert.False(subPlan.AllNodes.IsEmpty); + Assert.Null(subPlan.DeliveryGroups[0].Parent); + } + + [Fact] + public void Defer_IfFalseLiteral_Should_ProduceNoDeferredGroups() + { + // arrange + var schema = ComposeSchema( + """ + # name: a + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + """, + """ + # name: b + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! + } + """); + + // act + var plan = PlanOperation( + schema, + """ + query { + user(id: "1") { + name + ... @defer(if: false) { + email + } + } + } + """); + + // assert + Assert.True(plan.DeferredSubPlans.IsEmpty); + } + + [Fact] + public void Defer_IfTrueLiteral_Should_ProduceDeferredGroup() + { + // arrange + var schema = ComposeSchema( + """ + # name: a + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + """, + """ + # name: b + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! + } + """); + + // act + var plan = PlanOperation( + schema, + """ + query { + user(id: "1") { + name + ... @defer(if: true) { + email + } + } + } + """); + + // assert + Assert.Single(plan.DeferredSubPlans); + Assert.Null(plan.DeferredSubPlans[0].DeliveryGroups[0].IfVariable); + } + + [Fact] + public void Defer_NestedDefer_Should_ProduceParentChildRelationship() + { + // arrange + var schema = ComposeSchema( + """ + # name: a + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + """, + """ + # name: b + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! + address: String! + } + """); + + // act + var plan = PlanOperation( + schema, + """ + query { + user(id: "1") { + name + ... @defer(label: "outer") { + email + ... @defer(label: "inner") { + address + } + } + } + } + """); + + // assert + Assert.Equal(2, plan.DeferredSubPlans.Length); + + var outerGroup = plan.DeferredSubPlans + .Select(s => s.DeliveryGroups[0]) + .First(g => g.Label == "outer"); + var innerGroup = plan.DeferredSubPlans + .Select(s => s.DeliveryGroups[0]) + .First(g => g.Label == "inner"); + + Assert.Null(outerGroup.Parent); + Assert.NotNull(innerGroup.Parent); + Assert.Equal(outerGroup.Id, innerGroup.Parent.Id); + } + + [Fact] + public void Defer_WithIncludeDirective_Should_ProduceDeferredGroup() + { + // arrange + var schema = ComposeSchema( + """ + # name: a + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + """, + """ + # name: b + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! + } + """); + + // act + var plan = PlanOperation( + schema, + """ + query { + user(id: "1") { + name + ... @defer @include(if: true) { + email + } + } + } + """); + + // assert + Assert.Single(plan.DeferredSubPlans); + } + + [Fact] + public void Plan_Should_Partition_Nested_Defer_With_Mixed_If_Conditions_Correctly() + { + // arrange + var schema = ComposeSchema( + """ + # name: a + type Query { + user(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + """, + """ + # name: b + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! + address: String! + } + """); + + // act + assert: a=true, b=true (both active) + var planBothActive = PlanOperation( + schema, + """ + { + user(id: "1") { + name + ... @defer(label: "outer", if: true) { + email + ... @defer(label: "inner", if: true) { + address + } + } + } + } + """); + + Assert.Equal(2, planBothActive.DeferredSubPlans.Length); + var outerSubPlan = planBothActive.DeferredSubPlans + .First(s => s.DeliveryGroups.Any(g => g.Label == "outer")); + var innerSubPlan = planBothActive.DeferredSubPlans + .First(s => s.DeliveryGroups.Any(g => g.Label == "inner")); + Assert.Single(outerSubPlan.DeliveryGroups, g => g.Label == "outer"); + Assert.Single(innerSubPlan.DeliveryGroups, g => g.Label == "inner"); + Assert.Null(outerSubPlan.DeliveryGroups[0].Parent); + var innerParent = innerSubPlan.DeliveryGroups[0].Parent; + Assert.NotNull(innerParent); + Assert.Equal(outerSubPlan.DeliveryGroups[0].Id, innerParent.Id); + + // act + assert: a=true, b=false (inner inactive, its address collapses into outer) + var planInnerInactive = PlanOperation( + schema, + """ + { + user(id: "1") { + name + ... @defer(label: "outer", if: true) { + email + ... @defer(label: "inner", if: false) { + address + } + } + } + } + """); + + Assert.Single(planInnerInactive.DeferredSubPlans); + var collapsedOuter = planInnerInactive.DeferredSubPlans[0]; + Assert.Single(collapsedOuter.DeliveryGroups); + Assert.Equal("outer", collapsedOuter.DeliveryGroups[0].Label); + + // act + assert: a=false, b=true (outer inactive, inner is top-level; email in initial) + var planOuterInactive = PlanOperation( + schema, + """ + { + user(id: "1") { + name + ... @defer(label: "outer", if: false) { + email + ... @defer(label: "inner", if: true) { + address + } + } + } + } + """); + + Assert.Single(planOuterInactive.DeferredSubPlans); + var innerOnly = planOuterInactive.DeferredSubPlans[0]; + Assert.Single(innerOnly.DeliveryGroups); + Assert.Equal("inner", innerOnly.DeliveryGroups[0].Label); + Assert.Null(innerOnly.DeliveryGroups[0].Parent); + + // act + assert: a=false, b=false (both inactive, no subplans) + var planBothInactive = PlanOperation( + schema, + """ + { + user(id: "1") { + name + ... @defer(label: "outer", if: false) { + email + ... @defer(label: "inner", if: false) { + address + } + } + } + } + """); + + Assert.True(planBothInactive.DeferredSubPlans.IsEmpty); + } + + [Fact(Skip = "Known bug: BuildDeferredOperation forces OperationType.Query, causing KeyNotFoundException for mutation fields")] + public void Defer_OnMutationResult_Should_ProduceDeferredGroup() + { + // arrange + var schema = ComposeSchema( + """ + # name: a + type Query { + user(id: ID!): User @lookup + } + + type Mutation { + createUser(name: String!): User! + } + + type User @key(fields: "id") { + id: ID! + name: String! + } + """, + """ + # name: b + type Query { + userById(id: ID!): User @lookup + } + + type User @key(fields: "id") { + id: ID! + email: String! + } + """); + + // act + var plan = PlanOperation( + schema, + """ + mutation { + createUser(name: "test") { + name + ... @defer { + email + } + } + } + """); + + // assert + Assert.Single(plan.DeferredSubPlans); + + var subPlan = plan.DeferredSubPlans[0]; + Assert.False(subPlan.RootNodes.IsEmpty); + Assert.False(subPlan.AllNodes.IsEmpty); + } +} diff --git a/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Planning/__snapshots__/LookupTests.Require_Inaccessible_Data.graphql b/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Planning/__snapshots__/LookupTests.Require_Inaccessible_Data.graphql index f4078625107..f3ab1492ae8 100644 --- a/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Planning/__snapshots__/LookupTests.Require_Inaccessible_Data.graphql +++ b/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Planning/__snapshots__/LookupTests.Require_Inaccessible_Data.graphql @@ -15,3 +15,5 @@ type Product { id: Int! name: String! } + +directive @defer(if: Boolean = true, label: String) on FRAGMENT_SPREAD | INLINE_FRAGMENT diff --git a/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Text/Json/SourceResultDocumentTests.cs b/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Text/Json/SourceResultDocumentTests.cs index 422ddcccde8..d696711f793 100644 --- a/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Text/Json/SourceResultDocumentTests.cs +++ b/src/HotChocolate/Fusion/test/Fusion.Execution.Tests/Text/Json/SourceResultDocumentTests.cs @@ -5,6 +5,21 @@ namespace HotChocolate.Fusion.Text.Json; public class SourceResultDocumentTests { + [Fact] + public void Parse_Should_NotThrow_When_DisposedAndInputBufferIsNotPooled() + { + var json = """ + { + "id": 1 + } + """u8.ToArray(); + + var result = SourceResultDocument.Parse(json, json.Length); + var exception = Record.Exception(result.Dispose); + + Assert.Null(exception); + } + [Fact] public void TryGetProperty_String_Name() {