diff --git a/.circleci/config.yml b/.circleci/config.yml index 526cdff2ee..d8c00a26c9 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -5,42 +5,47 @@ version: 2.1 # These "CircleCI Orbs" are reusable bits of configuration that can be shared # across projects. See https://circleci.com/orbs/ for more information. orbs: - gh: circleci/github-cli@2.7.0 slack: circleci/slack@5.1.1 secops: apollo/circleci-secops-orb@2.0.7 executors: amd_linux_build: &amd_linux_build_executor docker: - - image: cimg/base:current + - image: ghcr.io/apollographql/ci-utility-docker-images/apollo-rust-builder:0.1.0 resource_class: xlarge environment: CARGO_BUILD_JOBS: 4 RUST_TEST_THREADS: 6 + MISE_ENV: ci amd_linux_helm: &amd_linux_helm_executor docker: - - image: cimg/base:current + - image: ghcr.io/apollographql/ci-utility-docker-images/apollo-rust-builder:0.1.0 resource_class: small + environment: + MISE_ENV: ci amd_linux_test: &amd_linux_test_executor docker: - - image: cimg/base:current + - image: ghcr.io/apollographql/ci-utility-docker-images/apollo-rust-builder:0.1.0 - image: cimg/redis:7.4.2 - image: openzipkin/zipkin:3.5.0 - image: ghcr.io/datadog/dd-apm-test-agent/ddapm-test-agent:v1.21.1 resource_class: 2xlarge environment: + MISE_ENV: ci CARGO_BUILD_JOBS: 4 arm_linux_build: &arm_linux_build_executor docker: - - image: cimg/base:current - resource_class: arm.large + - image: ghcr.io/apollographql/ci-utility-docker-images/apollo-rust-builder:0.1.0 + resource_class: arm.xlarge environment: + MISE_ENV: ci CARGO_BUILD_JOBS: 8 arm_linux_test: &arm_linux_test_executor docker: - - image: cimg/base:current + - image: ghcr.io/apollographql/ci-utility-docker-images/apollo-rust-builder:0.1.0 resource_class: arm.xlarge environment: + MISE_ENV: ci CARGO_BUILD_JOBS: 8 macos_build: &macos_build_executor macos: @@ -49,6 +54,8 @@ executors: # We use the major.minor notation to bring in compatible patches. xcode: "15.4.0" resource_class: macos.m1.large.gen1 + environment: + MISE_ENV: ci,ci-mac macos_test: &macos_test_executor macos: # See https://circleci.com/docs/xcode-policy along with the support matrix @@ -56,35 +63,34 @@ executors: # We use the major.minor notation to bring in compatible patches. xcode: "15.4.0" resource_class: macos.m1.large.gen1 + environment: + MISE_ENV: ci,ci-mac windows_build: &windows_build_executor machine: image: "windows-server-2019-vs2019:2024.02.21" resource_class: windows.xlarge shell: bash.exe --login -eo pipefail + environment: + MISE_ENV: ci,windows windows_test: &windows_test_executor machine: image: "windows-server-2019-vs2019:2024.02.21" resource_class: windows.xlarge shell: bash.exe --login -eo pipefail + environment: + MISE_ENV: ci,windows # We don't use {{ arch }} because on windows it is unstable https://discuss.circleci.com/t/value-of-arch-unstable-on-windows/40079 parameters: toolchain_version: type: string - default: '{{ checksum ".circleci/config.yml" }}-v3-{{ checksum "~/.arch" }}-{{ checksum "rust-toolchain.toml" }}-{{ checksum "~/.daily_version" }}' + default: '{{ checksum ".circleci/config.yml" }}-v3-{{ checksum "~/.arch" }}-{{ checksum ".config/mise/config.toml" }}-{{ checksum "rust-toolchain.toml" }}-{{ checksum "~/.daily_version" }}' xtask_version: type: string - default: '{{ checksum ".circleci/config.yml" }}-v3-{{ checksum "~/.arch" }}-{{ checksum "rust-toolchain.toml" }}-{{ checksum "~/.xtask_version" }}' + default: '{{ checksum ".circleci/config.yml" }}-v3-{{ checksum "~/.arch" }}-{{ checksum ".config/mise/config.toml" }}-{{ checksum "rust-toolchain.toml" }}-{{ checksum "~/.xtask_version" }}' merge_version: type: string - default: '{{ checksum ".circleci/config.yml" }}-v3-{{ checksum "~/.arch" }}-{{ checksum "rust-toolchain.toml" }}-{{ checksum "~/.xtask_version" }}-{{ checksum "~/.merge_version" }}' - protoc_version: - type: string - default: "21.8" - # note the cmake version is only used for manual installs, not for installs from a package manager like apt or homebrew - cmake_version: - type: string - default: "3.31.1" + default: '{{ checksum ".circleci/config.yml" }}-v3-{{ checksum "~/.arch" }}-{{ checksum ".config/mise/config.toml" }}-{{ checksum "rust-toolchain.toml" }}-{{ checksum "~/.xtask_version" }}-{{ checksum "~/.merge_version" }}' nightly: type: boolean default: false @@ -119,18 +125,12 @@ commands: - restore_cache: keys: - "<< pipeline.parameters.toolchain_version >>" - - install_packages: - platform: << parameters.platform >> - - install_protoc: - platform: << parameters.platform >> - - install_rust: + - install_mise: platform: << parameters.platform >> - - install_extra_tools - fetch_dependencies - save_cache: key: "<< pipeline.parameters.toolchain_version >>" paths: - - ~/.deb - ~/.cargo - ~/.rustup - ~/.local @@ -152,6 +152,7 @@ commands: or: - equal: [ *amd_linux_build_executor, << parameters.platform >> ] - equal: [ *amd_linux_test_executor, << parameters.platform >> ] + - equal: [ *amd_linux_helm_executor, << parameters.platform >> ] steps: - run: name: Write arch @@ -189,11 +190,12 @@ commands: - equal: [ *windows_test_executor, << parameters.platform >> ] steps: - run: - name: Create bash aliases + name: Extend Bash profile for Windows command: | echo 'alias find=/bin/find' >> "$BASH_ENV" echo 'alias sort=/bin/sort' >> "$BASH_ENV" echo 'export EXECUTABLE_SUFFIX=".exe"' >> "$BASH_ENV" + echo 'export PATH="$HOME/AppData/Local/mise/shims:$HOME/scoop/apps/mise/current/bin:$HOME/scoop/shims:$PATH"' >> "$BASH_ENV" - run: name: Write arch command: | @@ -217,8 +219,8 @@ commands: echo "Common ancestor is ${COMMON_ANCESTOR_REF}" echo "${CIRCLE_PROJECT_REPONAME}-${COMMON_ANCESTOR_REF}" > ~/.merge_version - # Linux specific step to install packages that are needed - install_packages: + # Install mise for tool version management + install_mise: parameters: platform: type: executor @@ -226,83 +228,21 @@ commands: - when: condition: or: + - equal: [ *amd_linux_helm_executor, << parameters.platform >> ] - equal: [ *amd_linux_build_executor, << parameters.platform >> ] - equal: [ *amd_linux_test_executor, << parameters.platform >> ] - equal: [ *arm_linux_build_executor, << parameters.platform >> ] - equal: [ *arm_linux_test_executor, << parameters.platform >> ] - steps: - - run: - name: Update and install dependencies - command: | - if [[ ! -d "$HOME/.deb" ]]; then - mkdir $HOME/.deb - sudo apt-get update - sudo apt-get --download-only -o Dir::Cache="$HOME/.deb" -o Dir::Cache::archives="$HOME/.deb" install build-essential libssl-dev libdw-dev - fi - sudo dpkg -i $HOME/.deb/*.deb - - - when: - condition: - or: - - equal: [ *windows_build_executor, << parameters.platform >> ] - - equal: [ *windows_test_executor, << parameters.platform >> ] - steps: - - run: - name: Install CMake - command: | - mkdir -p "$HOME/.local" - if [[ ! -f "$HOME/.local/bin/cmake" ]]; then - curl -L https://github.com/Kitware/CMake/releases/download/v<< pipeline.parameters.cmake_version >>/cmake-<< pipeline.parameters.cmake_version >>-windows-x86_64.zip --output cmake.zip - # The zip file has a root directory, so we put it somewhere else first before placing the files in .local - unzip cmake.zip -d /tmp > /dev/null - cp /tmp/cmake-<< pipeline.parameters.cmake_version >>-windows-x86_64/* -R "$HOME/.local" - fi - - cmake --version - install_protoc: - parameters: - platform: - type: executor - steps: - - when: - condition: - or: - - equal: [ *amd_linux_build_executor, << parameters.platform >> ] - - equal: [ *amd_linux_test_executor, << parameters.platform >> ] - steps: - - run: - name: Install protoc - command: | - if [[ ! -f "$HOME/.local/bin/protoc" ]]; then - curl -L https://github.com/protocolbuffers/protobuf/releases/download/v<< pipeline.parameters.protoc_version >>/protoc-<< pipeline.parameters.protoc_version >>-linux-x86_64.zip --output protoc.zip - unzip protoc.zip -d $HOME/.local - fi - - when: - condition: - or: - - equal: [ *arm_linux_build_executor, << parameters.platform >> ] - - equal: [ *arm_linux_test_executor, << parameters.platform >> ] - steps: - - run: - name: Install protoc - command: | - if [[ ! -f "$HOME/.local/bin/protoc" ]]; then - curl -L https://github.com/protocolbuffers/protobuf/releases/download/v<< pipeline.parameters.protoc_version >>/protoc-<< pipeline.parameters.protoc_version >>-linux-aarch_64.zip --output protoc.zip - unzip protoc.zip -d $HOME/.local - fi - - when: - condition: - or: - equal: [ *macos_build_executor, << parameters.platform >> ] - equal: [ *macos_test_executor, << parameters.platform >> ] steps: - run: - name: Install protoc + name: Install mise command: | - if [[ ! -f "$HOME/.local/bin/protoc" ]]; then - curl -L https://github.com/protocolbuffers/protobuf/releases/download/v<< pipeline.parameters.protoc_version >>/protoc-<< pipeline.parameters.protoc_version >>-osx-universal_binary.zip --output protoc.zip - unzip protoc.zip -d $HOME/.local - fi + curl https://mise.jdx.dev/install.sh | sh + mise activate bash >> "$BASH_ENV" + mise trust + mise install --yes - when: condition: or: @@ -310,85 +250,22 @@ commands: - equal: [ *windows_test_executor, << parameters.platform >> ] steps: - run: - name: Install protoc + name: Install scoop + shell: powershell.exe -ExecutionPolicy Bypass command: | - if [[ ! -f "$HOME/.local/bin/protoc$EXECUTABLE_SUFFIX" ]]; then - curl -L https://github.com/protocolbuffers/protobuf/releases/download/v<< pipeline.parameters.protoc_version >>/protoc-<< pipeline.parameters.protoc_version >>-win64.zip --output protoc.zip - unzip protoc.zip -d $HOME/.local - fi - - install_rust: - parameters: - platform: - type: executor - steps: - - run: - name: Install Rust - command: | - if [[ ! -d "$HOME/.cargo" ]]; then - curl https://sh.rustup.rs -sSf -o rustup.sh - chmod 755 ./rustup.sh - ./rustup.sh -y --profile minimal --component clippy --component rustfmt --default-toolchain none - fi - echo 'export PATH="$HOME/.cargo/bin:$PATH"' >> "$BASH_ENV" - export PATH="$HOME/.cargo/bin:$PATH" - rustup toolchain install - rustc -V - - - when: - condition: - or: - - equal: [ *windows_build_executor, << parameters.platform >> ] - - equal: [ *windows_test_executor, << parameters.platform >> ] - steps: - - run: - name: Special case for Windows because of ssh-agent - command: | - printf "[net]\ngit-fetch-with-cli = true" >> ~/.cargo/Cargo.toml - - when: - condition: - or: - - equal: [ *macos_build_executor, << parameters.platform >> ] - steps: - - run: - name: Special case for OSX x86_64 builds - command: | - rustup target add x86_64-apple-darwin - - - when: - condition: - equal: [ *arm_linux_test_executor, << parameters.platform >> ] - steps: + iex "& {$(irm get.scoop.sh)} -RunAsAdmin" - run: - name: Install nightly Rust to build the fuzzers + name: Install mise command: | - rustup install nightly - - install_extra_tools: - steps: - - run: - name: Install cargo deny, about, edit - command: | - if [[ ! -f "$HOME/.cargo/bin/cargo-binstall$EXECUTABLE_SUFFIX" ]]; then - curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash - fi - - if [[ ! -f "$HOME/.cargo/bin/cargo-deny$EXECUTABLE_SUFFIX" ]]; then - cargo binstall -y --version 0.18.2 cargo-deny - cargo binstall -y --version 0.12.2 cargo-edit - cargo binstall -y --version 0.12.0 cargo-fuzz - cargo binstall -y --version 0.6.6 cargo-about - fi - - if [[ ! -f "$HOME/.cargo/bin/cargo-nextest$EXECUTABLE_SUFFIX" ]]; then - cargo binstall -y --version 0.9.70 cargo-nextest - fi - + scoop install mise + mise trust + mise install --yes fetch_dependencies: steps: - run: name: Fetch dependencies command: cargo fetch --locked + install_xtask: steps: - restore_cache: @@ -411,7 +288,7 @@ commands: - restore_cache: keys: - "<< pipeline.parameters.merge_version >>-lint" - - run: xtask lint + - run: $HOME/.cargo/bin/xtask$EXECUTABLE_SUFFIX lint - when: condition: equal: [ "dev", "<< pipeline.git.branch >>" ] @@ -429,13 +306,6 @@ commands: - run: name: Validate helm manifests command: | - # Install Helm - curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash - - # Install kubeconform - KUBECONFORM_INSTALL=$(mktemp -d) - curl -L https://github.com/yannh/kubeconform/releases/latest/download/kubeconform-linux-amd64.tar.gz | tar xz -C "${KUBECONFORM_INSTALL}" - # Create list of kube versions CURRENT_KUBE_VERSIONS=$(curl -s -L https://raw.githubusercontent.com/kubernetes/website/main/data/releases/schedule.yaml \ | yq -o json '.' \ @@ -470,7 +340,7 @@ commands: helm template --kube-version "${kube_version}" router helm/chart/router --set autoscaling.enabled=true > "${TEMPLATE_DIR}/router-${kube_version}.yaml" # Execute kubeconform on our templated charts to ensure they are good - "${KUBECONFORM_INSTALL}/kubeconform" \ + kubeconform \ --kubernetes-version "${kube_version}" \ --strict \ --schema-location default \ @@ -486,7 +356,7 @@ commands: - "<< pipeline.parameters.merge_version >>-compliance" # cargo-deny fetches a rustsec advisory DB, which has to happen on github.com over https - run: git config --global --unset-all url.ssh://git@github.com.insteadof - - run: xtask check-compliance + - run: $HOME/.cargo/bin/xtask$EXECUTABLE_SUFFIX check-compliance - when: condition: equal: [ "dev", "<< pipeline.git.branch >>" ] @@ -513,7 +383,7 @@ commands: # TODO: remove this workaround once we update to Xcode >= 15.1.0 # See: https://github.com/apollographql/router/pull/5462 RUST_LIB_BACKTRACE: 0 - command: xtask test --workspace --locked --features ci,snapshot + command: $HOME/.cargo/bin/xtask$EXECUTABLE_SUFFIX test --workspace --locked --features ci,snapshot - run: name: Delete large files from cache command: | @@ -561,6 +431,8 @@ jobs: equal: [ *amd_linux_helm_executor, << parameters.platform >> ] steps: - checkout + - setup_environment: + platform: << parameters.platform >> - xtask_check_helm check_compliance: @@ -755,25 +627,6 @@ jobs: --keychain-password ${MACOS_KEYCHAIN_PASSWORD} --notarization-password ${MACOS_NOTARIZATION_PASSWORD} --output artifacts/ - - when: - condition: - and: - - equal: [ *amd_linux_build_executor, << parameters.platform >> ] - - equal: [ true, << parameters.nightly >> ] - steps: - - run: - name: Helm install - command: | - # Install Helm - curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash - - run: - name: helm-docs install - command: | - # install golang (to ${HOME}/go) - curl -OLs https://go.dev/dl/go1.21.3.linux-amd64.tar.gz - tar -C "${HOME}" -xf go1.21.3.linux-amd64.tar.gz - # install helm-docs - PATH="${HOME}/go/bin" GOPATH="${HOME}/.local" GO111MODULE=on go install github.com/norwoodj/helm-docs/cmd/helm-docs@latest - when: condition: or: @@ -932,7 +785,7 @@ jobs: publish_github_release: docker: - - image: cimg/base:current + - image: ghcr.io/apollographql/ci-utility-docker-images/apollo-rust-builder:0.1.0 resource_class: small environment: <<: *common_job_environment @@ -951,6 +804,8 @@ jobs: equal: [ "https://github.com/apollographql/router", << pipeline.project.git_url >> ] steps: - checkout + - setup_environment: + platform: amd_linux_build - setup_remote_docker: # CircleCI Image Policy # https://circleci.com/docs/remote-docker-images-support-policy/ @@ -958,7 +813,6 @@ jobs: docker_layer_caching: true - attach_workspace: at: artifacts - - gh/setup - run: command: > cd artifacts && sha256sum *.tar.gz > sha256sums.txt @@ -1003,8 +857,6 @@ jobs: - run: name: Helm build command: | - # Install Helm - curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash # Package up the helm chart helm package helm/chart/router # Make sure we have the newest chart diff --git a/.config/mise/config.ci-mac.toml b/.config/mise/config.ci-mac.toml new file mode 100644 index 0000000000..d4c01f837b --- /dev/null +++ b/.config/mise/config.ci-mac.toml @@ -0,0 +1,3 @@ +[tools] +# renovate-automation: rustc version +rust = { version = "1.85.1", targets = "x86_64-apple-darwin,aarch64-apple-darwin" } diff --git a/.config/mise/config.ci.toml b/.config/mise/config.ci.toml new file mode 100644 index 0000000000..c0b0b248fe --- /dev/null +++ b/.config/mise/config.ci.toml @@ -0,0 +1,3 @@ +[tools] +"cargo:cargo-fuzz" = "0.12.0" +kubeconform = "0.6.7" diff --git a/.config/mise/config.toml b/.config/mise/config.toml index d2ab2c583e..a87be631d6 100644 --- a/.config/mise/config.toml +++ b/.config/mise/config.toml @@ -1,15 +1,18 @@ [tools] # renovate-automation: rustc version -rust = "1.85.0" +rust = "1.85.1" +"aqua:cargo-bins/cargo-binstall" = "1.12.3" "cargo:cargo-nextest" = "0.9.70" "cargo:cargo-deny" = "0.18.2" "cargo:cargo-edit" = "0.12.2" "cargo:cargo-about" = "0.6.6" "cargo:cargo-insta" = "1.38.0" "cargo:htmlq" = "0.4.0" -"cargo:cargo-upgrades" = "2.1.1" "cargo:cargo-watch" = "8.5.3" -"cargo:graphql_client_cli" = "0.14.0" -"cargo:cargo-llvm-cov" = "0.6.10" -"cargo:cargo-fuzz" = "0.12.0" "cargo:typos-cli" = "1.31.1" +protoc = "21.8" +gh = "2.72.0" +helm = "3.17.3" +helm-docs = "1.14.2" +yq = "4.45.1" +jq = "1.7.1" diff --git a/.config/mise/config.windows.toml b/.config/mise/config.windows.toml new file mode 100644 index 0000000000..750983275c --- /dev/null +++ b/.config/mise/config.windows.toml @@ -0,0 +1,2 @@ +[tools] +cmake = "3.31.1" diff --git a/.gitignore b/.gitignore index 6129111330..0624c9b1de 100644 --- a/.gitignore +++ b/.gitignore @@ -37,4 +37,9 @@ dockerfiles/federation2-demo dhat-heap.json # env file -.env \ No newline at end of file +.env + +# generated fuzz/ files +fuzz/crash-* +fuzz/slow-unit-* +fuzz/timeout-* \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 97581dd50d..cb23dc82ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,116 @@ This project adheres to [Semantic Versioning v2.0.0](https://semver.org/spec/v2.0.0.html). +# [2.2.1] - 2025-05-13 + +## 🐛 Fixes + +### Redis connection leak on schema changes ([PR #7319](https://github.com/apollographql/router/pull/7319)) + +The router performs a 'hot reload' whenever it detects a schema update. During this reload, it effectively instantiates a new internal router, warms it up (optional), redirects all traffic to this new router, and drops the old internal router. + +This change fixes a bug in that "drop" process where the Redis connections are never told to terminate, even though the Redis client pool is dropped. This leads to an ever-increasing number of inactive Redis connections as each new schema comes in and goes out of service, which eats up memory. + +The solution adds a new up-down counter metric, `apollo.router.cache.redis.connections`, to track the number of open Redis connections. This metric includes a `kind` label to discriminate between different Redis connection pools, which mirrors the `kind` label on other cache metrics (ie `apollo.router.cache.hit.time`). + +By [@carodewig](https://github.com/carodewig) in https://github.com/apollographql/router/pull/7319 + +### Propagate client name and version modifications through telemetry ([PR #7369](https://github.com/apollographql/router/pull/7369)) + +The router accepts modifications to the client name and version (`apollo::telemetry::client_name` and `apollo::telemetry::client_version`), but those modifications are not currently propagated through the telemetry layers to update spans and traces. + +This PR moves where the client name and version are bound to the span, so that the modifications from plugins **on the `router` service** are propagated. + +By [@carodewig](https://github.com/carodewig) in https://github.com/apollographql/router/pull/7369 + +### Progressive overrides are not disabled when connectors are used ([PR #7351](https://github.com/apollographql/router/pull/7351)) + +Prior to this fix, introducing a connector disabled the progressive override plugin. + +By [@lennyburdette](https://github.com/lennyburdette) in https://github.com/apollographql/router/pull/7351 + +### Avoid unnecessary cloning in the deduplication plugin ([PR #7347](https://github.com/apollographql/router/pull/7347)) + +The deduplication plugin always cloned responses, even if there were not multiple simultaneous requests that would benefit from the cloned response. + +We now check to see if deduplication will provide a benefit before we clone the subgraph response. + +There was also an undiagnosed race condition which meant that a notification could be missed. This would have resulted in additional work being performed as the missed notification would have led to another subgraph request. + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/7347 + +### Spans should only include path in `http.route` ([PR #7390](https://github.com/apollographql/router/pull/7390)) + +Per the [OpenTelemetry spec](https://opentelemetry.io/docs/specs/semconv/attributes-registry/http/#http-route), the `http.route` should only include "the matched route, that is, the path template used in the format used by the respective server framework." + +The router currently sends the full URI in `http.route`, which can be high cardinality (ie `/graphql?operation=one_of_many_values`). After this change, the router will only include the path (`/graphql`). + +By [@carodewig](https://github.com/carodewig) in https://github.com/apollographql/router/pull/7390 + +### Decrease log level for JWT authentication failure ([PR #7396](https://github.com/apollographql/router/pull/7396)) + +A recent change inadvertently increased the log level of JWT authentication failures from `info` to `error`. This reverts that change returning it to the previous behavior. + +By [@carodewig](https://github.com/carodewig) in https://github.com/apollographql/router/pull/7396 + +### Avoid fractional decimals when generating `apollo.router.operations.batching.size` metrics for GraphQL request batch sizes ([PR #7306](https://github.com/apollographql/router/pull/7306)) + +Corrects the calculation of the `apollo.router.operations.batching.size` metric to reflect accurate batch sizes rather than occasionally returning fractional numbers. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/7306 + +## 📃 Configuration + +### Log warnings for deprecated coprocessor `context` configuration usage ([PR #7349](https://github.com/apollographql/router/pull/7349)) + +`context: true` is an alias for `context: deprecated` but should not be used. The router now logs a runtime warning on startup if you do use it. + +Instead of: + +```yaml +coprocessor: + supergraph: + request: + context: true # ❌ +``` + +Explicitly use `deprecated` or `all`: + +```yaml +coprocessor: + supergraph: + request: + context: deprecated # ✅ +``` + +See [the 2.x upgrade guide](https://www.apollographql.com/docs/graphos/routing/upgrade/from-router-v1#context-keys-for-coprocessors) for more detailed upgrade steps. + +By [@goto-bus-stop](https://github.com/goto-bus-stop) in https://github.com/apollographql/router/pull/7349 + +## 🛠 Maintenance + +### Linux: Compatibility with glibc 2.28 or newer ([PR #7355](https://github.com/apollographql/router/pull/7355)) + +The default build images provided in our CI environment have a relatively modern version of `glibc` (2.35). This means that on some distributions, notably those based around RedHat, it wasn't possible to use our binaries since the version of `glibc` was older than 2.35. + +We now maintain a build image which is based on a distribution with `glibc` 2.28. This is old enough that recent releases of either of the main Linux distribution families (Debian and RedHat) can make use of our binary releases. + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/7355 + +### Reject `@skip`/`@include` on subscription root fields in validation ([PR #7338](https://github.com/apollographql/router/pull/7338)) + +This implements a [GraphQL spec RFC](https://github.com/graphql/graphql-spec/pull/860), rejecting subscriptions in validation that can be invalid during execution. + +By [@goto-bus-stop](https://github.com/goto-bus-stop) in https://github.com/apollographql/router/pull/7338 + +## 📚 Documentation + +### Query planning best practices ([PR #7263](https://github.com/apollographql/router/pull/7263)) + +Added a new page under Routing docs about [Query Planning Best Practices](https://www.apollographql.com/docs/graphos/routing/query-planning/query-planning-best-practices). + +By [@smyrick](https://github.com/smyrick) in https://github.com/apollographql/router/pull/7263 + # [2.2.0] - 2025-04-28 ## 🚀 Features diff --git a/Cargo.lock b/Cargo.lock index 183bb67ac3..a1f1dd9ebb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -162,9 +162,9 @@ checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" [[package]] name = "apollo-compiler" -version = "1.27.0" +version = "1.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bb8a0d8a54b31d8a9efcc25d4be3d949d823e8105a710861d6d4a4ef811b5f2" +checksum = "b3eb9f97e5cc573361cdeb65204fbb7c459c9a9d5a6bec48ee37355c642a06ad" dependencies = [ "ahash", "apollo-parser", @@ -173,7 +173,7 @@ dependencies = [ "rowan", "serde", "serde_json_bytes", - "thiserror 1.0.69", + "thiserror 2.0.10", "triomphe", "typed-arena", ] @@ -191,12 +191,13 @@ dependencies = [ [[package]] name = "apollo-federation" -version = "2.2.0" +version = "2.2.1" dependencies = [ "apollo-compiler", "apollo-federation", "derive_more", "dhat", + "diff", "either", "hashbrown 0.15.2", "hex", @@ -223,7 +224,7 @@ dependencies = [ "strum 0.27.1", "strum_macros 0.27.1", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.10", "time", "tracing", "url", @@ -255,7 +256,7 @@ dependencies = [ [[package]] name = "apollo-router" -version = "2.2.0" +version = "2.2.1" dependencies = [ "ahash", "anyhow", @@ -267,6 +268,8 @@ dependencies = [ "aws-config", "aws-credential-types", "aws-sigv4", + "aws-smithy-async", + "aws-smithy-http-client", "aws-smithy-runtime-api", "aws-types", "axum 0.8.3", @@ -309,7 +312,7 @@ dependencies = [ "humantime", "humantime-serde", "hyper 1.6.0", - "hyper-rustls 0.27.5", + "hyper-rustls", "hyper-util", "hyperlocal", "indexmap 2.9.0", @@ -360,15 +363,15 @@ dependencies = [ "rand 0.8.5", "rand_core 0.6.4", "regex", - "reqwest 0.12.15", + "reqwest", "rhai", "rmp", "rstack", "rstest", "rust-embed", - "rustls 0.23.26", + "rustls", "rustls-native-certs 0.8.1", - "rustls-pemfile 2.2.0", + "rustls-pemfile", "ryu", "schemars", "semver", @@ -389,11 +392,11 @@ dependencies = [ "sysinfo", "tempfile", "test-log", - "thiserror 1.0.69", + "thiserror 2.0.10", "tikv-jemallocator", "time", "tokio", - "tokio-rustls 0.26.2", + "tokio-rustls", "tokio-stream", "tokio-tungstenite", "tokio-util", @@ -406,6 +409,7 @@ dependencies = [ "tracing", "tracing-core", "tracing-futures", + "tracing-mock", "tracing-opentelemetry", "tracing-serde 0.1.3", "tracing-subscriber", @@ -424,7 +428,7 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "2.2.0" +version = "2.2.1" dependencies = [ "apollo-parser", "apollo-router", @@ -821,8 +825,6 @@ checksum = "8c39646d1a6b51240a1a23bb57ea4eebede7e16fbc237fdc876980233dcecb4f" dependencies = [ "aws-credential-types", "aws-runtime", - "aws-sdk-sso", - "aws-sdk-ssooidc", "aws-sdk-sts", "aws-smithy-async", "aws-smithy-http", @@ -833,14 +835,11 @@ dependencies = [ "aws-types", "bytes", "fastrand 2.3.0", - "hex", "http 1.3.1", - "ring", "time", "tokio", "tracing", "url", - "zeroize", ] [[package]] @@ -855,31 +854,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "aws-lc-rs" -version = "1.12.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e4e8200b9a4a5801a769d50eeabc05670fec7e959a8cb7a63a93e4e519942ae" -dependencies = [ - "aws-lc-sys", - "paste", - "zeroize", -] - -[[package]] -name = "aws-lc-sys" -version = "0.26.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f9dd2e03ee80ca2822dd6ea431163d2ef259f2066a4d6ccaca6d9dcb386aa43" -dependencies = [ - "bindgen", - "cc", - "cmake", - "dunce", - "fs_extra", - "paste", -] - [[package]] name = "aws-runtime" version = "1.5.6" @@ -905,52 +879,6 @@ dependencies = [ "uuid", ] -[[package]] -name = "aws-sdk-sso" -version = "1.64.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d4bdb0e5f80f0689e61c77ab678b2b9304af329616af38aef5b6b967b8e736" -dependencies = [ - "aws-credential-types", - "aws-runtime", - "aws-smithy-async", - "aws-smithy-http", - "aws-smithy-json", - "aws-smithy-runtime", - "aws-smithy-runtime-api", - "aws-smithy-types", - "aws-types", - "bytes", - "fastrand 2.3.0", - "http 0.2.12", - "once_cell", - "regex-lite", - "tracing", -] - -[[package]] -name = "aws-sdk-ssooidc" -version = "1.65.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbbb3ce8da257aedbccdcb1aadafbbb6a5fe9adf445db0e1ea897bdc7e22d08" -dependencies = [ - "aws-credential-types", - "aws-runtime", - "aws-smithy-async", - "aws-smithy-http", - "aws-smithy-json", - "aws-smithy-runtime", - "aws-smithy-runtime-api", - "aws-smithy-types", - "aws-types", - "bytes", - "fastrand 2.3.0", - "http 0.2.12", - "once_cell", - "regex-lite", - "tracing", -] - [[package]] name = "aws-sdk-sts" version = "1.65.0" @@ -1042,10 +970,10 @@ dependencies = [ "h2 0.4.7", "http 1.3.1", "hyper 1.6.0", - "hyper-rustls 0.27.5", + "hyper-rustls", "hyper-util", "pin-project-lite", - "rustls 0.23.26", + "rustls", "rustls-native-certs 0.8.1", "rustls-pki-types", "tokio", @@ -1090,7 +1018,6 @@ checksum = "0152749e17ce4d1b47c7747bdfec09dac1ccafdcbc741ebf9daa2a373356730f" dependencies = [ "aws-smithy-async", "aws-smithy-http", - "aws-smithy-http-client", "aws-smithy-observability", "aws-smithy-runtime-api", "aws-smithy-types", @@ -1191,7 +1118,7 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", - "sync_wrapper 1.0.2", + "sync_wrapper", "tower 0.5.2", "tower-layer", "tower-service", @@ -1225,7 +1152,7 @@ dependencies = [ "serde_path_to_error", "serde_urlencoded", "sha1", - "sync_wrapper 1.0.2", + "sync_wrapper", "tokio", "tokio-tungstenite", "tower 0.5.2", @@ -1249,7 +1176,7 @@ dependencies = [ "mime", "pin-project-lite", "rustversion", - "sync_wrapper 1.0.2", + "sync_wrapper", "tower-layer", "tower-service", ] @@ -1268,7 +1195,7 @@ dependencies = [ "mime", "pin-project-lite", "rustversion", - "sync_wrapper 1.0.2", + "sync_wrapper", "tower-layer", "tower-service", "tracing", @@ -1377,29 +1304,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bindgen" -version = "0.69.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" -dependencies = [ - "bitflags 2.6.0", - "cexpr", - "clang-sys", - "itertools 0.11.0", - "lazy_static", - "lazycell", - "log", - "prettyplease", - "proc-macro2", - "quote", - "regex", - "rustc-hash 1.1.0", - "shlex", - "syn 2.0.90", - "which", -] - [[package]] name = "bit-set" version = "0.5.3" @@ -1622,15 +1526,6 @@ dependencies = [ "shlex", ] -[[package]] -name = "cexpr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" -dependencies = [ - "nom", -] - [[package]] name = "cfg-if" version = "1.0.0" @@ -1694,17 +1589,6 @@ dependencies = [ "half", ] -[[package]] -name = "clang-sys" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" -dependencies = [ - "glob", - "libc", - "libloading", -] - [[package]] name = "clap" version = "4.5.36" @@ -1745,15 +1629,6 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" -[[package]] -name = "cmake" -version = "0.1.52" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c682c223677e0e5b6b7f63a64b9351844c3f1b1678a68b7ee617e30fb082620e" -dependencies = [ - "cc", -] - [[package]] name = "colorchoice" version = "1.0.3" @@ -2674,12 +2549,12 @@ dependencies = [ "parking_lot", "rand 0.8.5", "redis-protocol", - "rustls 0.23.26", + "rustls", "rustls-native-certs 0.7.3", "semver", "socket2", "tokio", - "tokio-rustls 0.26.2", + "tokio-rustls", "tokio-stream", "tokio-util", "url", @@ -2707,12 +2582,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "fs_extra" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" - [[package]] name = "fsio" version = "0.4.0" @@ -3247,15 +3116,6 @@ dependencies = [ "digest", ] -[[package]] -name = "home" -version = "0.5.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" -dependencies = [ - "windows-sys 0.52.0", -] - [[package]] name = "hostname" version = "0.3.1" @@ -3433,20 +3293,6 @@ dependencies = [ "want", ] -[[package]] -name = "hyper-rustls" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" -dependencies = [ - "futures-util", - "http 0.2.12", - "hyper 0.14.31", - "rustls 0.21.12", - "tokio", - "tokio-rustls 0.24.1", -] - [[package]] name = "hyper-rustls" version = "0.27.5" @@ -3457,14 +3303,13 @@ dependencies = [ "http 1.3.1", "hyper 1.6.0", "hyper-util", - "log", - "rustls 0.23.26", + "rustls", "rustls-native-certs 0.8.1", "rustls-pki-types", "tokio", - "tokio-rustls 0.26.2", + "tokio-rustls", "tower-service", - "webpki-roots 0.26.7", + "webpki-roots", ] [[package]] @@ -3995,12 +3840,6 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - [[package]] name = "libc" version = "0.2.171" @@ -4017,16 +3856,6 @@ dependencies = [ "cc", ] -[[package]] -name = "libloading" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" -dependencies = [ - "cfg-if", - "windows-targets 0.52.6", -] - [[package]] name = "libredox" version = "0.1.3" @@ -4598,7 +4427,7 @@ dependencies = [ "opentelemetry-http", "opentelemetry-semantic-conventions", "opentelemetry_sdk 0.24.1", - "reqwest 0.12.15", + "reqwest", "rmp", "ryu", "thiserror 1.0.69", @@ -4615,7 +4444,7 @@ dependencies = [ "bytes", "http 1.3.1", "opentelemetry 0.24.0", - "reqwest 0.12.15", + "reqwest", ] [[package]] @@ -4641,7 +4470,7 @@ dependencies = [ "opentelemetry-proto 0.7.0", "opentelemetry_sdk 0.24.1", "prost 0.13.4", - "reqwest 0.12.15", + "reqwest", "thiserror 1.0.69", "tokio", "tonic 0.12.3", @@ -4723,7 +4552,7 @@ dependencies = [ "opentelemetry-http", "opentelemetry-semantic-conventions", "opentelemetry_sdk 0.24.1", - "reqwest 0.12.15", + "reqwest", "serde", "serde_json", "thiserror 1.0.69", @@ -5275,7 +5104,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.1.0", - "rustls 0.23.26", + "rustls", "socket2", "thiserror 2.0.10", "tokio", @@ -5293,7 +5122,7 @@ dependencies = [ "rand 0.8.5", "ring", "rustc-hash 2.1.0", - "rustls 0.23.26", + "rustls", "rustls-pki-types", "slab", "thiserror 2.0.10", @@ -5536,51 +5365,6 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" -[[package]] -name = "reqwest" -version = "0.11.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" -dependencies = [ - "async-compression", - "base64 0.21.7", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2 0.3.26", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.31", - "hyper-rustls 0.24.2", - "ipnet", - "js-sys", - "log", - "mime", - "once_cell", - "percent-encoding", - "pin-project-lite", - "rustls 0.21.12", - "rustls-native-certs 0.6.3", - "rustls-pemfile 1.0.4", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper 0.1.2", - "system-configuration", - "tokio", - "tokio-rustls 0.24.1", - "tokio-util", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasm-streams", - "web-sys", - "webpki-roots 0.25.4", - "winreg", -] - [[package]] name = "reqwest" version = "0.12.15" @@ -5597,7 +5381,7 @@ dependencies = [ "http-body 1.0.1", "http-body-util", "hyper 1.6.0", - "hyper-rustls 0.27.5", + "hyper-rustls", "hyper-util", "ipnet", "js-sys", @@ -5608,16 +5392,16 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.26", + "rustls", "rustls-native-certs 0.8.1", - "rustls-pemfile 2.2.0", + "rustls-pemfile", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 1.0.2", + "sync_wrapper", "tokio", - "tokio-rustls 0.26.2", + "tokio-rustls", "tokio-util", "tower 0.5.2", "tower-service", @@ -5626,7 +5410,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 0.26.7", + "webpki-roots", "windows-registry", ] @@ -5799,7 +5583,7 @@ dependencies = [ "libfuzzer-sys", "log", "rand 0.8.5", - "reqwest 0.11.27", + "reqwest", "schemars", "serde", "serde_json", @@ -5950,46 +5734,21 @@ dependencies = [ "windows-sys 0.59.0", ] -[[package]] -name = "rustls" -version = "0.21.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" -dependencies = [ - "log", - "ring", - "rustls-webpki 0.101.7", - "sct", -] - [[package]] name = "rustls" version = "0.23.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df51b5869f3a441595eac5e8ff14d486ff285f7b8c0df8770e49c3b56351f0f0" dependencies = [ - "aws-lc-rs", "log", "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.103.0", + "rustls-webpki", "subtle", "zeroize", ] -[[package]] -name = "rustls-native-certs" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" -dependencies = [ - "openssl-probe", - "rustls-pemfile 1.0.4", - "schannel", - "security-framework 2.11.1", -] - [[package]] name = "rustls-native-certs" version = "0.7.3" @@ -5997,7 +5756,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" dependencies = [ "openssl-probe", - "rustls-pemfile 2.2.0", + "rustls-pemfile", "rustls-pki-types", "schannel", "security-framework 2.11.1", @@ -6015,15 +5774,6 @@ dependencies = [ "security-framework 3.0.1", ] -[[package]] -name = "rustls-pemfile" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" -dependencies = [ - "base64 0.21.7", -] - [[package]] name = "rustls-pemfile" version = "2.2.0" @@ -6042,23 +5792,12 @@ dependencies = [ "web-time", ] -[[package]] -name = "rustls-webpki" -version = "0.101.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "rustls-webpki" version = "0.103.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0aa4eeac2588ffff23e9d7a7e9b3f971c5fb5b7ebc9452745e0c232c64f83b2f" dependencies = [ - "aws-lc-rs", "ring", "rustls-pki-types", "untrusted", @@ -6125,16 +5864,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sct" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "sec1" version = "0.7.3" @@ -6595,12 +6324,6 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "sync_wrapper" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" - [[package]] name = "sync_wrapper" version = "1.0.2" @@ -6644,27 +6367,6 @@ dependencies = [ "windows 0.57.0", ] -[[package]] -name = "system-configuration" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" -dependencies = [ - "bitflags 1.3.2", - "core-foundation 0.9.4", - "system-configuration-sys", -] - -[[package]] -name = "system-configuration-sys" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "tempfile" version = "3.19.1" @@ -6916,23 +6618,13 @@ dependencies = [ "syn 2.0.90", ] -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.12", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" dependencies = [ - "rustls 0.23.26", + "rustls", "tokio", ] @@ -6969,11 +6661,11 @@ checksum = "7a9daff607c6d2bf6c16fd681ccb7eecc83e4e2cdc1ca067ffaadfca5de7f084" dependencies = [ "futures-util", "log", - "rustls 0.23.26", + "rustls", "rustls-native-certs 0.8.1", "rustls-pki-types", "tokio", - "tokio-rustls 0.26.2", + "tokio-rustls", "tungstenite", ] @@ -7053,10 +6745,10 @@ dependencies = [ "pin-project", "prost 0.13.4", "rustls-native-certs 0.8.1", - "rustls-pemfile 2.2.0", + "rustls-pemfile", "socket2", "tokio", - "tokio-rustls 0.26.2", + "tokio-rustls", "tokio-stream", "tower 0.4.13", "tower-layer", @@ -7110,7 +6802,7 @@ dependencies = [ "indexmap 2.9.0", "pin-project-lite", "slab", - "sync_wrapper 1.0.2", + "sync_wrapper", "tokio", "tokio-util", "tower-layer", @@ -7231,6 +6923,16 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-mock" +version = "0.1.0-beta.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ff59989fc8854bc58d3daeadbccf5d7fb0b722af043cf839c7785f1ff0daf0e" +dependencies = [ + "tracing", + "tracing-core", +] + [[package]] name = "tracing-opentelemetry" version = "0.25.0" @@ -7360,7 +7062,7 @@ dependencies = [ "httparse", "log", "rand 0.9.0", - "rustls 0.23.26", + "rustls", "rustls-pki-types", "sha1", "thiserror 2.0.10", @@ -7694,12 +7396,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "webpki-roots" -version = "0.25.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" - [[package]] name = "webpki-roots" version = "0.26.7" @@ -7709,18 +7405,6 @@ dependencies = [ "rustls-pki-types", ] -[[package]] -name = "which" -version = "4.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" -dependencies = [ - "either", - "home", - "once_cell", - "rustix 0.38.42", -] - [[package]] name = "widestring" version = "1.1.0" diff --git a/Cargo.toml b/Cargo.toml index c0d5ab35ac..eba725b7b5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,7 +47,7 @@ debug = 1 # Dependencies used in more than one place are specified here in order to keep versions in sync: # https://doc.rust-lang.org/cargo/reference/workspaces.html#the-dependencies-table [workspace.dependencies] -apollo-compiler = "1.27.0" +apollo-compiler = "1.28.0" apollo-parser = "0.8.4" apollo-smith = "0.15.0" async-trait = "0.1.77" @@ -60,13 +60,7 @@ insta = { version = "1.38.0", features = [ "glob", ] } once_cell = "1.19.0" -reqwest = { version = "0.11.0", default-features = false, features = [ - "rustls-tls", - "rustls-native-certs", - "gzip", - "json", - "stream", -] } +reqwest = { version = "0.12.0", default-features = false } schemars = { version = "0.8.22", features = ["url"] } serde = { version = "1.0.198", features = ["derive", "rc"] } diff --git a/apollo-federation/Cargo.toml b/apollo-federation/Cargo.toml index 90346f8dfc..cdaeb997e9 100644 --- a/apollo-federation/Cargo.toml +++ b/apollo-federation/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-federation" -version = "2.2.0" +version = "2.2.1" authors = ["The Apollo GraphQL Contributors"] edition = "2024" description = "Apollo Federation" @@ -17,7 +17,6 @@ autotests = false # Integration tests are m snapshot_tracing = ["ron"] # `correctness` feature enables the `correctness` module. correctness = [] -"connect_v0.2" = [] [dependencies] apollo-compiler.workspace = true @@ -42,7 +41,7 @@ serde_json.workspace = true serde_json_bytes.workspace = true strum = "0.27.0" strum_macros = "0.27.0" -thiserror = "1.0" +thiserror = "2.0" url = "2" either = "1.13.0" tracing = "0.1.40" @@ -50,6 +49,7 @@ ron = { version = "0.8.1", optional = true } shape = "0.5.1" [dev-dependencies] +diff = "0.1.13" hex.workspace = true insta.workspace = true sha1.workspace = true diff --git a/apollo-federation/cli/src/main.rs b/apollo-federation/cli/src/main.rs index 805151a6b3..5d4ff54cbe 100644 --- a/apollo-federation/cli/src/main.rs +++ b/apollo-federation/cli/src/main.rs @@ -18,6 +18,7 @@ use apollo_federation::query_plan::query_planner::QueryPlannerConfig; use apollo_federation::sources::connect::expand::ExpansionResult; use apollo_federation::sources::connect::expand::expand_connectors; use apollo_federation::subgraph; +use apollo_federation::subgraph::typestate; use clap::Parser; use tracing_subscriber::prelude::*; @@ -92,6 +93,11 @@ enum Command { /// Path(s) to subgraph schemas. schemas: Vec, }, + /// Expand and validate a subgraph schema and print the result + Subgraph { + /// The path to the subgraph schema file, or `-` for stdin + subgraph_schema: PathBuf, + }, /// Extract subgraph schemas from a supergraph schema to stdout (or in a directory if specified) Extract { /// The path to the supergraph schema file, or `-` for stdin @@ -171,6 +177,7 @@ fn main() -> ExitCode { planner, } => cmd_plan(&query, &schemas, planner), Command::Validate { schemas } => cmd_validate(&schemas), + Command::Subgraph { subgraph_schema } => cmd_subgraph(&subgraph_schema), Command::Compose { schemas } => cmd_compose(&schemas), Command::Extract { supergraph_schema, @@ -320,6 +327,22 @@ fn cmd_validate(file_paths: &[PathBuf]) -> Result<(), FederationError> { Ok(()) } +fn cmd_subgraph(file_path: &Path) -> Result<(), FederationError> { + let doc_str = read_input(file_path); + let name = file_path + .file_name() + .and_then(|name| name.to_str().map(|x| x.to_string())); + let name = name.unwrap_or("subgraph".to_string()); + let subgraph = typestate::Subgraph::parse(&name, &format!("http://{name}"), &doc_str) + .expect("valid schema") + .expand_links() + .expect("expanded subgraph to be valid") + .validate(true) + .map_err(|e| e.into_inner())?; + println!("{}", subgraph.schema_string()); + Ok(()) +} + fn cmd_compose(file_paths: &[PathBuf]) -> Result<(), FederationError> { let supergraph = compose_files(file_paths)?; println!("{}", supergraph.schema.schema()); diff --git a/apollo-federation/src/error/mod.rs b/apollo-federation/src/error/mod.rs index 713008d7fb..59c3c812f2 100644 --- a/apollo-federation/src/error/mod.rs +++ b/apollo-federation/src/error/mod.rs @@ -338,6 +338,16 @@ pub enum SingleFederationError { PlanningCancelled, #[error("No plan was found when subgraphs were disabled")] NoPlanFoundWithDisabledSubgraphs, + #[error("@cost cannot be applied to interface \"{interface}.{field}\"")] + CostAppliedToInterfaceField { interface: Name, field: Name }, + #[error("{message}")] + ListSizeAppliedToNonList { message: String }, + #[error("{message}")] + ListSizeInvalidAssumedSize { message: String }, + #[error("{message}")] + ListSizeInvalidSlicingArgument { message: String }, + #[error("{message}")] + ListSizeInvalidSizedField { message: String }, } impl SingleFederationError { @@ -536,9 +546,28 @@ impl SingleFederationError { SingleFederationError::NoPlanFoundWithDisabledSubgraphs => { ErrorCode::NoPlanFoundWithDisabledSubgraphs } + SingleFederationError::CostAppliedToInterfaceField { .. } => { + ErrorCode::CostAppliedToInterfaceField + } + SingleFederationError::ListSizeAppliedToNonList { .. } => { + ErrorCode::ListSizeAppliedToNonList + } + SingleFederationError::ListSizeInvalidAssumedSize { .. } => { + ErrorCode::ListSizeInvalidAssumedSize + } + SingleFederationError::ListSizeInvalidSlicingArgument { .. } => { + ErrorCode::ListSizeInvalidSlicingArgument + } + SingleFederationError::ListSizeInvalidSizedField { .. } => { + ErrorCode::ListSizeInvalidSizedField + } } } + pub fn code_string(&self) -> String { + self.code().definition().code().to_string() + } + pub(crate) fn root_already_used( operation_type: OperationType, expected_name: Name, @@ -586,7 +615,7 @@ impl From for FederationError { #[derive(Debug, Clone, thiserror::Error, Default)] pub struct MultipleFederationErrors { - pub errors: Vec, + pub(crate) errors: Vec, } impl MultipleFederationErrors { @@ -714,6 +743,14 @@ impl FederationError { result.push(other); result.into() } + + pub fn errors(&self) -> Vec<&SingleFederationError> { + match self { + FederationError::SingleFederationError(e) => vec![e], + FederationError::MultipleFederationErrors(e) => e.errors.iter().collect(), + FederationError::AggregateFederationError(e) => e.causes.iter().collect(), + } + } } // Similar to `multi_try` crate, but with `FederationError` instead of `Vec`. @@ -1609,6 +1646,61 @@ static NO_PLAN_FOUND_WITH_DISABLED_SUBGRAPHS: LazyLock = La ) }); +static COST_APPLIED_TO_INTERFACE_FIELD: LazyLock = LazyLock::new(|| { + ErrorCodeDefinition::new( + "COST_APPLIED_TO_INTERFACE_FIELD".to_owned(), + "The `@cost` directive must be applied to concrete types".to_owned(), + Some(ErrorCodeMetadata { + added_in: "2.9.2", + replaces: &[], + }), + ) +}); + +static LIST_SIZE_APPLIED_TO_NON_LIST: LazyLock = LazyLock::new(|| { + ErrorCodeDefinition::new( + "LIST_SIZE_APPLIED_TO_NON_LIST".to_owned(), + "The `@listSize` directive must be applied to list types".to_owned(), + Some(ErrorCodeMetadata { + added_in: "2.9.2", + replaces: &[], + }), + ) +}); + +static LIST_SIZE_INVALID_ASSUMED_SIZE: LazyLock = LazyLock::new(|| { + ErrorCodeDefinition::new( + "LIST_SIZE_INVALID_ASSUMED_SIZE".to_owned(), + "The `@listSize` directive assumed size cannot be negative".to_owned(), + Some(ErrorCodeMetadata { + added_in: "2.9.2", + replaces: &[], + }), + ) +}); + +static LIST_SIZE_INVALID_SLICING_ARGUMENT: LazyLock = LazyLock::new(|| { + ErrorCodeDefinition::new( + "LIST_SIZE_INVALID_SLICING_ARGUMENT".to_owned(), + "The `@listSize` directive must have existing integer slicing arguments".to_owned(), + Some(ErrorCodeMetadata { + added_in: "2.9.2", + replaces: &[], + }), + ) +}); + +static LIST_SIZE_INVALID_SIZED_FIELD: LazyLock = LazyLock::new(|| { + ErrorCodeDefinition::new( + "LIST_SIZE_INVALID_SIZED_FIELD".to_owned(), + "The `@listSize` directive must reference existing list fields as sized fields".to_owned(), + Some(ErrorCodeMetadata { + added_in: "2.9.2", + replaces: &[], + }), + ) +}); + #[derive(Debug, strum_macros::EnumIter)] pub enum ErrorCode { Internal, @@ -1692,6 +1784,11 @@ pub enum ErrorCode { UnsupportedFederationDirective, QueryPlanComplexityExceededError, NoPlanFoundWithDisabledSubgraphs, + CostAppliedToInterfaceField, + ListSizeAppliedToNonList, + ListSizeInvalidAssumedSize, + ListSizeInvalidSlicingArgument, + ListSizeInvalidSizedField, } impl ErrorCode { @@ -1793,6 +1890,11 @@ impl ErrorCode { ErrorCode::UnsupportedFederationDirective => &UNSUPPORTED_FEDERATION_DIRECTIVE, ErrorCode::QueryPlanComplexityExceededError => &QUERY_PLAN_COMPLEXITY_EXCEEDED, ErrorCode::NoPlanFoundWithDisabledSubgraphs => &NO_PLAN_FOUND_WITH_DISABLED_SUBGRAPHS, + ErrorCode::CostAppliedToInterfaceField => &COST_APPLIED_TO_INTERFACE_FIELD, + ErrorCode::ListSizeAppliedToNonList => &LIST_SIZE_APPLIED_TO_NON_LIST, + ErrorCode::ListSizeInvalidAssumedSize => &LIST_SIZE_INVALID_ASSUMED_SIZE, + ErrorCode::ListSizeInvalidSlicingArgument => &LIST_SIZE_INVALID_SLICING_ARGUMENT, + ErrorCode::ListSizeInvalidSizedField => &LIST_SIZE_INVALID_SIZED_FIELD, } } } diff --git a/apollo-federation/src/lib.rs b/apollo-federation/src/lib.rs index 2d3f0bc911..de2502ea96 100644 --- a/apollo-federation/src/lib.rs +++ b/apollo-federation/src/lib.rs @@ -61,6 +61,7 @@ use crate::link::spec_definition::SpecDefinitions; use crate::merge::MergeFailure; use crate::merge::merge_subgraphs; use crate::schema::ValidFederationSchema; +use crate::sources::connect::ConnectSpec; use crate::subgraph::ValidSubgraph; pub use crate::supergraph::ValidFederationSubgraph; pub use crate::supergraph::ValidFederationSubgraphs; @@ -116,6 +117,9 @@ pub(crate) fn validate_supergraph( } }) }).transpose()?; + if let Some(connect_link) = metadata.for_identity(&ConnectSpec::identity()) { + ConnectSpec::try_from(&connect_link.url.version)?; + } Ok(( link_spec_definition, join_spec_definition, @@ -123,6 +127,7 @@ pub(crate) fn validate_supergraph( )) } +#[derive(Debug)] pub struct Supergraph { pub schema: ValidFederationSchema, } @@ -177,3 +182,40 @@ const _: () = { pub(crate) fn is_leaf_type(schema: &Schema, ty: &NamedType) -> bool { schema.get_scalar(ty).is_some() || schema.get_enum(ty).is_some() } + +#[cfg(test)] +mod test_supergraph { + use pretty_assertions::assert_str_eq; + + use super::*; + + #[test] + fn validates_connect_spec_is_known() { + let res = Supergraph::new( + r#" + extend schema @link(url: "https://specs.apollo.dev/connect/v99.99") + + # Required stuff for the supergraph to parse at all, not what we're testing + extend schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) + directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + scalar link__Import + enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION + } + type Query {required: ID!} + "#, + ) + .expect_err("Unknown spec version did not cause error"); + assert_str_eq!(res.to_string(), "Unknown connect version: 99.99"); + } +} diff --git a/apollo-federation/src/link/cost_spec_definition.rs b/apollo-federation/src/link/cost_spec_definition.rs index 05945351d7..26b040e468 100644 --- a/apollo-federation/src/link/cost_spec_definition.rs +++ b/apollo-federation/src/link/cost_spec_definition.rs @@ -1,4 +1,3 @@ -use std::collections::HashSet; use std::sync::LazyLock; use apollo_compiler::Name; @@ -6,11 +5,15 @@ use apollo_compiler::Node; use apollo_compiler::ast::Argument; use apollo_compiler::ast::Directive; use apollo_compiler::ast::DirectiveList; +use apollo_compiler::ast::DirectiveLocation; use apollo_compiler::ast::FieldDefinition; use apollo_compiler::ast::InputValueDefinition; use apollo_compiler::name; use apollo_compiler::schema::Component; use apollo_compiler::schema::ExtendedType; +use apollo_compiler::schema::Value; +use apollo_compiler::ty; +use indexmap::IndexSet; use crate::error::FederationError; use crate::internal_error; @@ -21,9 +24,13 @@ use crate::link::spec::Version; use crate::link::spec_definition::SpecDefinition; use crate::link::spec_definition::SpecDefinitions; use crate::schema::FederationSchema; +use crate::schema::argument_composition_strategies::ArgumentCompositionStrategy; use crate::schema::position::EnumTypeDefinitionPosition; use crate::schema::position::ObjectTypeDefinitionPosition; use crate::schema::position::ScalarTypeDefinitionPosition; +use crate::schema::type_and_directive_specification::ArgumentSpecification; +use crate::schema::type_and_directive_specification::DirectiveArgumentSpecification; +use crate::schema::type_and_directive_specification::DirectiveSpecification; use crate::schema::type_and_directive_specification::TypeAndDirectiveSpecification; const COST_DIRECTIVE_NAME: Name = name!("cost"); @@ -171,7 +178,9 @@ impl CostSpecDefinition { /// Returns the name of the `@cost` directive in the given schema, accounting for import aliases or specification name /// prefixes such as `@federation__cost`. This checks the linked cost specification, if there is one, and falls back /// to the federation spec. - fn cost_directive_name(schema: &FederationSchema) -> Result, FederationError> { + pub(crate) fn cost_directive_name( + schema: &FederationSchema, + ) -> Result, FederationError> { if let Some(spec) = Self::for_federation_schema(schema) { spec.directive_name_in_schema(schema, &COST_DIRECTIVE_NAME) } else if let Ok(fed_spec) = get_federation_spec_definition_from_subgraph(schema) { @@ -184,7 +193,7 @@ impl CostSpecDefinition { /// Returns the name of the `@listSize` directive in the given schema, accounting for import aliases or specification name /// prefixes such as `@federation__listSize`. This checks the linked cost specification, if there is one, and falls back /// to the federation spec. - fn list_size_directive_name( + pub(crate) fn list_size_directive_name( schema: &FederationSchema, ) -> Result, FederationError> { if let Some(spec) = Self::for_federation_schema(schema) { @@ -235,6 +244,79 @@ impl CostSpecDefinition { Ok(None) } } + + fn cost_directive_specification() -> DirectiveSpecification { + DirectiveSpecification::new( + COST_DIRECTIVE_NAME, + &[DirectiveArgumentSpecification { + base_spec: ArgumentSpecification { + name: COST_DIRECTIVE_WEIGHT_ARGUMENT_NAME, + get_type: |_, _| Ok(ty!(Int!)), + default_value: None, + }, + composition_strategy: Some(ArgumentCompositionStrategy::Max), + }], + false, + &[ + DirectiveLocation::ArgumentDefinition, + DirectiveLocation::Enum, + DirectiveLocation::FieldDefinition, + DirectiveLocation::InputFieldDefinition, + DirectiveLocation::Object, + DirectiveLocation::Scalar, + ], + false, + // TODO: Set up supergraph spec later, but the type is hard to work with at the moment + None, + None, + ) + } + + fn list_size_directive_specification() -> DirectiveSpecification { + DirectiveSpecification::new( + LIST_SIZE_DIRECTIVE_NAME, + &[ + DirectiveArgumentSpecification { + base_spec: ArgumentSpecification { + name: LIST_SIZE_DIRECTIVE_ASSUMED_SIZE_ARGUMENT_NAME, + get_type: |_, _| Ok(ty!(Int)), + default_value: None, + }, + composition_strategy: Some(ArgumentCompositionStrategy::Max), + }, + DirectiveArgumentSpecification { + base_spec: ArgumentSpecification { + name: LIST_SIZE_DIRECTIVE_SLICING_ARGUMENTS_ARGUMENT_NAME, + get_type: |_, _| Ok(ty!([String!])), + default_value: None, + }, + composition_strategy: Some(ArgumentCompositionStrategy::Union), + }, + DirectiveArgumentSpecification { + base_spec: ArgumentSpecification { + name: LIST_SIZE_DIRECTIVE_SIZED_FIELDS_ARGUMENT_NAME, + get_type: |_, _| Ok(ty!([String!])), + default_value: None, + }, + composition_strategy: Some(ArgumentCompositionStrategy::Union), + }, + DirectiveArgumentSpecification { + base_spec: ArgumentSpecification { + name: LIST_SIZE_DIRECTIVE_REQUIRE_ONE_SLICING_ARGUMENT_ARGUMENT_NAME, + get_type: |_, _| Ok(ty!(Boolean)), + default_value: Some(Value::Boolean(true)), + }, + composition_strategy: Some(ArgumentCompositionStrategy::Max), + }, + ], + false, + &[DirectiveLocation::FieldDefinition], + false, + // TODO: Set up supergraph spec later, but the type is hard to work with at the moment + None, + None, + ) + } } impl SpecDefinition for CostSpecDefinition { @@ -243,11 +325,14 @@ impl SpecDefinition for CostSpecDefinition { } fn directive_specs(&self) -> Vec> { - todo!() + vec![ + Box::new(Self::cost_directive_specification()), + Box::new(Self::list_size_directive_specification()), + ] } fn type_specs(&self) -> Vec> { - todo!() + vec![] } } @@ -267,7 +352,10 @@ impl CostDirective { self.weight as f64 } - fn from_directives(directive_name: &Name, directives: &DirectiveList) -> Option { + pub(crate) fn from_directives( + directive_name: &Name, + directives: &DirectiveList, + ) -> Option { directives .get(directive_name)? .specified_argument_by_name(&COST_DIRECTIVE_WEIGHT_ARGUMENT_NAME)? @@ -275,7 +363,7 @@ impl CostDirective { .map(|weight| Self { weight }) } - fn from_schema_directives( + pub(crate) fn from_schema_directives( directive_name: &Name, directives: &apollo_compiler::schema::DirectiveList, ) -> Option { @@ -289,8 +377,8 @@ impl CostDirective { pub struct ListSizeDirective { pub assumed_size: Option, - pub slicing_argument_names: Option>, - pub sized_fields: Option>, + pub slicing_argument_names: Option>, + pub sized_fields: Option>, pub require_one_slicing_argument: bool, } @@ -320,7 +408,7 @@ impl ListSizeDirective { .to_i32() } - fn slicing_argument_names(directive: &Directive) -> Option> { + fn slicing_argument_names(directive: &Directive) -> Option> { let names = directive .specified_argument_by_name(&LIST_SIZE_DIRECTIVE_SLICING_ARGUMENTS_ARGUMENT_NAME)? .as_list()? @@ -331,7 +419,7 @@ impl ListSizeDirective { Some(names) } - fn sized_fields(directive: &Directive) -> Option> { + fn sized_fields(directive: &Directive) -> Option> { let fields = directive .specified_argument_by_name(&LIST_SIZE_DIRECTIVE_SIZED_FIELDS_ARGUMENT_NAME)? .as_list()? diff --git a/apollo-federation/src/link/federation_spec_definition.rs b/apollo-federation/src/link/federation_spec_definition.rs index 0b9c78cfd1..8602048546 100644 --- a/apollo-federation/src/link/federation_spec_definition.rs +++ b/apollo-federation/src/link/federation_spec_definition.rs @@ -121,6 +121,13 @@ impl FederationSpecDefinition { Self::for_version(latest_version).unwrap() } + /// Some users rely on auto-expanding fed v1 graphs with fed v2 directives. While technically + /// we should only expand @tag directive from v2 definitions, we will continue expanding other + /// directives (up to v2.4) to ensure backwards compatibility. + pub(crate) fn auto_expanded_federation_spec() -> &'static Self { + Self::for_version(&Version { major: 2, minor: 4 }).unwrap() + } + pub(crate) fn is_fed1(&self) -> bool { self.version().satisfies(&Version { major: 1, minor: 0 }) } @@ -363,10 +370,7 @@ impl FederationSpecDefinition { application: &'doc Node, ) -> Result, FederationError> { Ok(TagDirectiveArguments { - name: directive_required_string_argument( - application, - &FEDERATION_FIELDS_ARGUMENT_NAME, - )?, + name: directive_required_string_argument(application, &FEDERATION_NAME_ARGUMENT_NAME)?, }) } @@ -753,6 +757,25 @@ impl FederationSpecDefinition { ) } + fn tag_directive_specification(&self) -> DirectiveSpecification { + DirectiveSpecification::new( + FEDERATION_TAG_DIRECTIVE_NAME_IN_SPEC, + &[], + self.version().ge(&Version { major: 2, minor: 0 }), + &[ + DirectiveLocation::ArgumentDefinition, + DirectiveLocation::Scalar, + DirectiveLocation::Enum, + DirectiveLocation::EnumValue, + DirectiveLocation::InputObject, + DirectiveLocation::InputFieldDefinition, + ], + false, // TODO: Fix this + None, + None, + ) + } + fn override_directive_specification(&self) -> DirectiveSpecification { let mut args = vec![DirectiveArgumentSpecification { base_spec: ArgumentSpecification { @@ -801,6 +824,18 @@ impl FederationSpecDefinition { None, ) } + + fn interface_object_directive_directive_specification() -> DirectiveSpecification { + DirectiveSpecification::new( + FEDERATION_INTERFACEOBJECT_DIRECTIVE_NAME_IN_SPEC, + &[], + false, + &[DirectiveLocation::Object], + false, + None, + None, + ) + } } fn field_set_type(schema: &FederationSchema) -> Result { @@ -825,16 +860,24 @@ impl SpecDefinition for FederationSpecDefinition { ]; if self.is_fed1() { specs.push(Box::new(Self::extends_directive_specification())); + specs.push(Box::new(self.tag_directive_specification())); return specs; } specs.push(Box::new(self.shareable_directive_specification())); specs.push(Box::new(self.override_directive_specification())); + specs.push(Box::new(self.tag_directive_specification())); if self.version().satisfies(&Version { major: 2, minor: 1 }) { specs.push(Box::new(Self::compose_directive_directive_specification())); } + if self.version().satisfies(&Version { major: 2, minor: 3 }) { + specs.push(Box::new( + Self::interface_object_directive_directive_specification(), + )); + } + // TODO: The remaining directives added in later versions are implemented in separate specs, // which still need to be ported over @@ -921,7 +964,6 @@ pub(crate) fn get_federation_spec_definition_from_subgraph( } /// Adds a bootstrap fed 1 link directive to the schema. -#[allow(dead_code)] pub(crate) fn add_fed1_link_to_schema( schema: &mut FederationSchema, ) -> Result<(), FederationError> { diff --git a/apollo-federation/src/link/link_spec_definition.rs b/apollo-federation/src/link/link_spec_definition.rs index e884aff004..5415a6a019 100644 --- a/apollo-federation/src/link/link_spec_definition.rs +++ b/apollo-federation/src/link/link_spec_definition.rs @@ -4,11 +4,13 @@ use std::sync::LazyLock; use apollo_compiler::Name; use apollo_compiler::Node; use apollo_compiler::ast; +use apollo_compiler::ast::Directive; use apollo_compiler::ast::DirectiveLocation; use apollo_compiler::ast::Type; use apollo_compiler::name; use apollo_compiler::schema::Component; use apollo_compiler::ty; +use itertools::Itertools; use crate::bail; use crate::error::FederationError; @@ -17,7 +19,10 @@ use crate::error::MultiTryAll; use crate::error::SingleFederationError; use crate::link::DEFAULT_IMPORT_SCALAR_NAME; use crate::link::DEFAULT_PURPOSE_ENUM_NAME; +use crate::link::Import; use crate::link::Link; +use crate::link::argument::directive_optional_list_argument; +use crate::link::argument::directive_optional_string_argument; use crate::link::spec::Identity; use crate::link::spec::Url; use crate::link::spec::Version; @@ -127,13 +132,12 @@ impl LinkSpecDefinition { /// Add `self` (the @link spec definition) and a directive application of it to the schema. // Note: we may want to allow some `import` as argument to this method. When we do, we need to // watch for imports of `Purpose` and `Import` and add the types under their imported name. - #[allow(dead_code)] pub(crate) fn add_to_schema( &self, schema: &mut FederationSchema, alias: Option, ) -> Result<(), FederationError> { - self.add_definitions_to_schema(schema, alias.clone())?; + self.add_definitions_to_schema(schema, alias.clone(), vec![])?; // This adds `@link(url: "https://specs.apollo.dev/link/v1.0")` to the "schema" definition. // And we have a choice to add it either the main definition, or to an `extend schema`. @@ -175,15 +179,46 @@ impl LinkSpecDefinition { })); } SchemaDefinitionPosition - .insert_directive(schema, Component::new(ast::Directive { name, arguments }))?; + .insert_directive(schema, Component::new(Directive { name, arguments }))?; Ok(()) } + pub(crate) fn extract_alias_and_imports_on_missing_link_directive_definition( + application: &Node, + ) -> Result<(Option, Vec>), FederationError> { + // PORT_NOTE: This is really logic encapsulated from onMissingDirectiveDefinition() in the + // JS codebase's FederationBlueprint, but moved here since it's all link-specific. The logic + // itself has a lot of problems, but we're porting it as-is for now, and we'll address the + // problems with it in a later version bump. + let url = + directive_optional_string_argument(application, &LINK_DIRECTIVE_URL_ARGUMENT_NAME)?; + if let Some(url) = url { + if url.starts_with(&LinkSpecDefinition::latest().url.identity.to_string()) { + let alias = directive_optional_string_argument( + application, + &LINK_DIRECTIVE_AS_ARGUMENT_NAME, + )? + .map(Name::new) + .transpose()?; + let imports = directive_optional_list_argument( + application, + &LINK_DIRECTIVE_IMPORT_ARGUMENT_NAME, + )? + .into_iter() + .flatten() + .map(|value| Ok::<_, FederationError>(Arc::new(Import::from_value(value)?))) + .process_results(|r| r.collect::>())?; + return Ok((alias, imports)); + } + } + Ok((None, vec![])) + } + pub(crate) fn add_definitions_to_schema( &self, schema: &mut FederationSchema, alias: Option, - // imports: Vec, // Used by `onMissingDirectiveDefinition` in JS (FED-428) + imports: Vec>, ) -> Result<(), FederationError> { if let Some(metadata) = schema.metadata() { let link_spec_def = metadata.link_spec_definition()?; @@ -208,7 +243,7 @@ impl LinkSpecDefinition { let mock_link = Arc::new(Link { url: self.url.clone(), spec_alias: alias, - imports: vec![], // TODO (FED-428) + imports, purpose: None, }); Ok(()) @@ -221,7 +256,7 @@ impl LinkSpecDefinition { ) } - #[allow(dead_code)] + #[allow(unused)] pub(crate) fn fed1_latest() -> &'static Self { // Note: The `unwrap()` calls won't panic, since `CORE_VERSIONS` will always have at // least one version. diff --git a/apollo-federation/src/merge.rs b/apollo-federation/src/merge.rs index ccf3b8d17e..2c8378706c 100644 --- a/apollo-federation/src/merge.rs +++ b/apollo-federation/src/merge.rs @@ -595,7 +595,7 @@ impl Merger { let existing_type = types .entry(object_name.clone()) .or_insert(copy_object_type_stub( - object_name.clone(), + object_name, object, is_interface_object, )); @@ -822,10 +822,9 @@ impl Merger { union_name: NamedType, union: &Node, ) { - let existing_type = types.entry(union_name.clone()).or_insert(copy_union_type( - union_name.clone(), - union.description.clone(), - )); + let existing_type = types + .entry(union_name.clone()) + .or_insert(copy_union_type(union_name, union.description.clone())); if let ExtendedType::Union(u) = existing_type { let join_type_directives = @@ -1337,7 +1336,7 @@ fn add_core_feature_join( supergraph: &mut Schema, subgraphs_and_enum_values: &Vec<(&ValidFederationSubgraph, EnumValue)>, ) { - // @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) + // @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) supergraph .schema_definition .make_mut() @@ -1347,7 +1346,7 @@ fn add_core_feature_join( arguments: vec![ Node::new(Argument { name: name!("url"), - value: "https://specs.apollo.dev/join/v0.3".into(), + value: "https://specs.apollo.dev/join/v0.5".into(), }), Node::new(Argument { name: name!("for"), diff --git a/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__basic.snap b/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__basic.snap index 4927d92d17..443c7765b0 100644 --- a/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__basic.snap +++ b/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__basic.snap @@ -2,7 +2,7 @@ source: apollo-federation/src/merge/tests.rs expression: schema.serialize() --- -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) { query: Query mutation: Mutation } diff --git a/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__inaccessible.snap b/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__inaccessible.snap index 7b99d48c15..3e1f7a8e84 100644 --- a/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__inaccessible.snap +++ b/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__inaccessible.snap @@ -2,7 +2,7 @@ source: apollo-federation/src/merge/tests.rs expression: schema.serialize() --- -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) { query: Query } diff --git a/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__input_types.snap b/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__input_types.snap index 89fde35ed5..fe8f199dac 100644 --- a/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__input_types.snap +++ b/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__input_types.snap @@ -2,7 +2,7 @@ source: apollo-federation/src/merge/tests.rs expression: schema.serialize() --- -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) { query: Query } diff --git a/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__interface_implementing_interface.snap b/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__interface_implementing_interface.snap index ee73fd4093..8e6f4fd4e1 100644 --- a/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__interface_implementing_interface.snap +++ b/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__interface_implementing_interface.snap @@ -2,7 +2,7 @@ source: apollo-federation/src/merge/tests.rs expression: schema.serialize() --- -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) { query: Query } diff --git a/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__interface_object.snap b/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__interface_object.snap index 6e6cb1d576..d94538a344 100644 --- a/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__interface_object.snap +++ b/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__interface_object.snap @@ -2,7 +2,7 @@ source: apollo-federation/src/merge/tests.rs expression: schema.serialize() --- -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) { query: Query } diff --git a/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__steel_thread.snap b/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__steel_thread.snap index 22f6d7eda6..4bb3354a8b 100644 --- a/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__steel_thread.snap +++ b/apollo-federation/src/merge/snapshots/apollo_federation__merge__tests__steel_thread.snap @@ -2,7 +2,7 @@ source: apollo-federation/src/merge/tests.rs expression: schema.serialize() --- -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) { query: Query } diff --git a/apollo-federation/src/operation/contains.rs b/apollo-federation/src/operation/contains.rs index c49675b554..7b855bf94b 100644 --- a/apollo-federation/src/operation/contains.rs +++ b/apollo-federation/src/operation/contains.rs @@ -225,7 +225,7 @@ mod tests { let schema = ValidFederationSchema::new(schema).unwrap(); let left = Operation::parse(schema.clone(), left, "left.graphql") .expect("operation is valid and can be parsed"); - let right = Operation::parse(schema.clone(), right, "right.graphql") + let right = Operation::parse(schema, right, "right.graphql") .expect("operation is valid and can be parsed"); left.selection_set.containment( diff --git a/apollo-federation/src/operation/mod.rs b/apollo-federation/src/operation/mod.rs index 2655339827..155c7f2525 100644 --- a/apollo-federation/src/operation/mod.rs +++ b/apollo-federation/src/operation/mod.rs @@ -2566,6 +2566,7 @@ impl InlineFragmentSelection { }; let mut remove_defer = false; + #[expect(clippy::redundant_clone)] let mut args_copy = args.clone(); if let Some(BooleanOrVariable::Boolean(b)) = &args.if_ { if *b { diff --git a/apollo-federation/src/operation/simplify.rs b/apollo-federation/src/operation/simplify.rs index b6f4e7ed07..cfa7f55902 100644 --- a/apollo-federation/src/operation/simplify.rs +++ b/apollo-federation/src/operation/simplify.rs @@ -147,7 +147,7 @@ impl InlineFragmentSelection { // Note: Rebasing after flattening, since rebasing before that can error out. // Or, `flatten_unnecessary_fragments` could `rebase` at the same time. let selection_set = if useless_fragment { - selection_set.clone() + selection_set } else { selection_set.rebase_on(parent_type, schema)? }; @@ -248,7 +248,7 @@ impl InlineFragmentSelection { let rebased_inline_fragment = self.inline_fragment.rebase_on(parent_type, schema)?; - let mut nonliftable_selections = selection_set.selections.clone(); + let mut nonliftable_selections = selection_set.selections; Arc::make_mut(&mut nonliftable_selections) .retain(|k, _| !liftable_selections.contains_key(k)); diff --git a/apollo-federation/src/operation/tests/mod.rs b/apollo-federation/src/operation/tests/mod.rs index cdeb08b507..3adaf62993 100644 --- a/apollo-federation/src/operation/tests/mod.rs +++ b/apollo-federation/src/operation/tests/mod.rs @@ -981,7 +981,7 @@ fn converting_operation_types() { .unwrap(); let schema = ValidFederationSchema::new(schema).unwrap(); insta::assert_snapshot!(Operation::parse( - schema.clone(), + schema, r#" { intf { @@ -1345,7 +1345,7 @@ fn add_at_path_merge_subselections() { &path_to_c, Some( &SelectionSet::parse( - schema.clone(), + schema, ObjectTypeDefinitionPosition::new(name!("C")).into(), "e(arg: 1)", ) diff --git a/apollo-federation/src/query_graph/build_query_graph.rs b/apollo-federation/src/query_graph/build_query_graph.rs index dfe8a9af9f..3c01d519f5 100644 --- a/apollo-federation/src/query_graph/build_query_graph.rs +++ b/apollo-federation/src/query_graph/build_query_graph.rs @@ -134,7 +134,7 @@ impl BaseQueryGraphBuilder { .insert(source.clone(), IndexMap::default()); query_graph .root_kinds_to_nodes_by_source - .insert(source.clone(), IndexMap::default()); + .insert(source, IndexMap::default()); Self { query_graph } } @@ -382,12 +382,12 @@ impl SchemaQueryGraphBuilder { if self.subgraph.is_some() { self.maybe_add_interface_fields_edges(pos.clone(), node)?; } - self.add_abstract_type_edges(pos.clone().into(), node)?; + self.add_abstract_type_edges(pos.into(), node)?; } OutputTypeDefinitionPosition::Union(pos) => { // Add the special-case __typename edge for unions. self.add_edge_for_field(pos.introspection_typename_field().into(), node, false)?; - self.add_abstract_type_edges(pos.clone().into(), node)?; + self.add_abstract_type_edges(pos.into(), node)?; } // Any other case (scalar or enum; input objects are not possible here) is terminal and // has no edges to consider. @@ -1170,6 +1170,7 @@ impl FederatedQueryGraphBuilder { schema, type_pos.type_name().clone(), application.fields, + true, )?); // Note that each subgraph has a key edge to itself (when head == tail below). @@ -1295,6 +1296,7 @@ impl FederatedQueryGraphBuilder { .type_name() .clone(), application.fields, + true, ) else { // Ignored on purpose: it just means the key is not usable on this // subgraph. @@ -1361,6 +1363,7 @@ impl FederatedQueryGraphBuilder { &self.supergraph_schema, field_definition_position.parent().type_name().clone(), application.fields, + true, )?; all_conditions.push(conditions); } @@ -1709,6 +1712,7 @@ impl FederatedQueryGraphBuilder { schema, field_type_pos.type_name().clone(), application.fields, + true, )?; all_conditions.push(conditions); } @@ -2127,6 +2131,10 @@ impl FederatedQueryGraphBuilder { schema, type_in_supergraph_pos.type_name.clone(), "__typename", + // We don't validate here because __typename queried against a composite type is + // guaranteed to be valid. If the field set becomes non-trivial in the future, + // this should be updated accordingly. + false, )?); for implementation_type_in_supergraph_pos in self .supergraph_schema @@ -2517,7 +2525,7 @@ mod tests { field_pos.get(schema.schema())?; let expected_field_transition = QueryGraphEdgeTransition::FieldCollection { source: SCHEMA_NAME.into(), - field_definition_position: field_pos.clone().into(), + field_definition_position: field_pos.into(), is_part_of_provides: false, }; let mut tails = query_graph diff --git a/apollo-federation/src/query_graph/graph_path.rs b/apollo-federation/src/query_graph/graph_path.rs index 44db681298..bf042f813f 100644 --- a/apollo-federation/src/query_graph/graph_path.rs +++ b/apollo-federation/src/query_graph/graph_path.rs @@ -3572,7 +3572,7 @@ impl OpGraphPath { type_condition_pos.clone().try_into().ok(); if let Some(type_condition_pos) = abstract_type_condition_pos { if supergraph_schema - .possible_runtime_types(type_condition_pos.clone().into())? + .possible_runtime_types(type_condition_pos.into())? .contains(tail_type_pos) { debug!("Type is a super-type of the current type. No edge to take"); diff --git a/apollo-federation/src/query_graph/mod.rs b/apollo-federation/src/query_graph/mod.rs index 0787a24a22..1d225a9567 100644 --- a/apollo-federation/src/query_graph/mod.rs +++ b/apollo-federation/src/query_graph/mod.rs @@ -742,6 +742,7 @@ impl QueryGraph { subgraph_schema, composite_type_position.type_name().clone(), key_value.fields, + true, ) }) .find_ok(|selection| !external_metadata.selects_any_external_field(selection)) @@ -979,7 +980,7 @@ impl QueryGraph { key.specified_argument_by_name("fields") .and_then(|arg| arg.as_str()) }) - .map(|value| parse_field_set(schema, ty.name().clone(), value)) + .map(|value| parse_field_set(schema, ty.name().clone(), value, true)) .find_ok(|selection| { !metadata .external_metadata() diff --git a/apollo-federation/src/query_graph/path_tree.rs b/apollo-federation/src/query_graph/path_tree.rs index 79147d647a..78400a0575 100644 --- a/apollo-federation/src/query_graph/path_tree.rs +++ b/apollo-federation/src/query_graph/path_tree.rs @@ -730,8 +730,7 @@ mod tests { (&path1, Some(&selection_set)), (&path2, Some(&selection_set)), ]; - let path_tree = - OpPathTree::from_op_paths(query_graph.to_owned(), NodeIndex::new(0), &paths).unwrap(); + let path_tree = OpPathTree::from_op_paths(query_graph, NodeIndex::new(0), &paths).unwrap(); let computed = path_tree.to_string(); let expected = r#"Query(Test): -> [3] t = T(Test): diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph.rs b/apollo-federation/src/query_plan/fetch_dependency_graph.rs index d83f38cb2a..fd41c8139e 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph.rs @@ -915,7 +915,7 @@ impl FetchDependencyGraph { parent_node_id, child_id, Arc::new(FetchDependencyGraphEdge { - path: path_in_parent.clone(), + path: path_in_parent, }), ); } @@ -3034,7 +3034,7 @@ fn operation_for_entities_fetch( .into()); } - let entities = FieldDefinitionPosition::Object(query_type.field(ENTITIES_QUERY.clone())); + let entities = FieldDefinitionPosition::Object(query_type.field(ENTITIES_QUERY)); let entities_call = Selection::from_element( OpPathElement::Field(Field { @@ -4233,7 +4233,7 @@ fn wrap_selection_with_type_and_conditions( InlineFragment { schema: supergraph_schema.clone(), parent_type_position: wrapping_type.clone(), - type_condition_position: Some(type_condition.clone()), + type_condition_position: Some(type_condition), directives: Default::default(), // None selection_id: SelectionId::new(), }, @@ -4461,7 +4461,7 @@ fn handle_conditions_tree( }; let defer_ref = fetch_node.defer_ref.clone(); let copied_node_id = - dependency_graph.new_key_node(&subgraph_name, merge_at, defer_ref.clone())?; + dependency_graph.new_key_node(&subgraph_name, merge_at, defer_ref)?; dependency_graph.add_parent(copied_node_id, parent.clone()); dependency_graph.copy_inputs(copied_node_id, fetch_node_id)?; Some((copied_node_id, parent)) @@ -4738,7 +4738,7 @@ fn create_post_requires_node( // should already have a key in its inputs, so we don't need to add that). let inputs = inputs_for_require( dependency_graph, - entity_type_position.clone(), + entity_type_position, entity_type_schema, query_graph_edge_id, context, @@ -4816,7 +4816,7 @@ fn create_post_requires_node( created_nodes.insert(post_requires_node_id); let initial_fetch_path = create_fetch_initial_path( &dependency_graph.supergraph_schema, - &entity_type_position.clone().into(), + &entity_type_position.into(), context, )?; let new_path = fetch_node_path.for_new_key_fetch(initial_fetch_path); @@ -4874,7 +4874,7 @@ fn create_post_requires_node( created_nodes.insert(post_requires_node_id); let initial_fetch_path = create_fetch_initial_path( &dependency_graph.supergraph_schema, - &entity_type_position.clone().into(), + &entity_type_position.into(), context, )?; let new_path = fetch_node_path.for_new_key_fetch(initial_fetch_path); @@ -5118,7 +5118,7 @@ mod tests { ) .unwrap(); - let valid_schema = ValidFederationSchema::new(schema.clone()).unwrap(); + let valid_schema = ValidFederationSchema::new(schema).unwrap(); let foo = object_field_element(&valid_schema, name!("Query"), name!("foo")); let frag = inline_fragment_element(&valid_schema, name!("Foo"), Some(name!("Foo_1"))); @@ -5180,7 +5180,7 @@ mod tests { ) .unwrap(); - let valid_schema = ValidFederationSchema::new(schema.clone()).unwrap(); + let valid_schema = ValidFederationSchema::new(schema).unwrap(); let foo = object_field_element(&valid_schema, name!("Query"), name!("foo")); let frag = inline_fragment_element(&valid_schema, name!("Foo"), Some(name!("Foo_1"))); diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs b/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs index 1d4251ed8e..00ec60f267 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs @@ -457,7 +457,7 @@ fn flat_wrap_nodes( let mut iter = nodes.into_iter().flatten(); let first = iter.next()?; let Some(second) = iter.next() else { - return Some(first.clone()); + return Some(first); }; let mut nodes = Vec::new(); for node in [first, second].into_iter().chain(iter) { diff --git a/apollo-federation/src/schema/blueprint.rs b/apollo-federation/src/schema/blueprint.rs index f045267f98..02fc5e5f46 100644 --- a/apollo-federation/src/schema/blueprint.rs +++ b/apollo-federation/src/schema/blueprint.rs @@ -1,11 +1,13 @@ use std::collections::HashMap; use apollo_compiler::Name; +use apollo_compiler::Node; use apollo_compiler::Schema; use apollo_compiler::ast::Directive; use apollo_compiler::ast::NamedType; use apollo_compiler::ast::OperationType; use apollo_compiler::ty; +use itertools::Itertools; use crate::bail; use crate::error::FederationError; @@ -14,23 +16,35 @@ use crate::error::SingleFederationError; use crate::link::DEFAULT_LINK_NAME; use crate::link::Import; use crate::link::Purpose; +use crate::link::cost_spec_definition::COST_VERSIONS; use crate::link::federation_spec_definition::FEDERATION_FIELDS_ARGUMENT_NAME; use crate::link::federation_spec_definition::FEDERATION_KEY_DIRECTIVE_NAME_IN_SPEC; use crate::link::federation_spec_definition::FEDERATION_PROVIDES_DIRECTIVE_NAME_IN_SPEC; use crate::link::federation_spec_definition::FEDERATION_REQUIRES_DIRECTIVE_NAME_IN_SPEC; use crate::link::federation_spec_definition::get_federation_spec_definition_from_subgraph; use crate::link::link_spec_definition::LinkSpecDefinition; +use crate::link::spec::Identity; use crate::link::spec::Url; use crate::link::spec_definition::SpecDefinition; use crate::schema::FederationSchema; +use crate::schema::ValidFederationSchema; use crate::schema::compute_subgraph_metadata; +use crate::schema::field_set::parse_field_set; use crate::schema::position::DirectiveDefinitionPosition; +use crate::schema::position::InterfaceTypeDefinitionPosition; +use crate::schema::subgraph_metadata::SubgraphMetadata; +use crate::schema::validators::cost::validate_cost_directives; +use crate::schema::validators::external::validate_external_directives; use crate::schema::validators::key::validate_key_directives; +use crate::schema::validators::list_size::validate_list_size_directives; use crate::schema::validators::provides::validate_provides_directives; use crate::schema::validators::requires::validate_requires_directives; use crate::supergraph::GRAPHQL_MUTATION_TYPE_NAME; use crate::supergraph::GRAPHQL_QUERY_TYPE_NAME; use crate::supergraph::GRAPHQL_SUBSCRIPTION_TYPE_NAME; +use crate::utils::human_readable::HumanReadableListOptions; +use crate::utils::human_readable::HumanReadableListPrefix; +use crate::utils::human_readable::human_readable_list; #[allow(dead_code)] struct CoreFeature { @@ -55,11 +69,14 @@ impl FederationBlueprint { pub(crate) fn on_missing_directive_definition( schema: &mut FederationSchema, - directive: &Directive, + directive: &Node, ) -> Result, FederationError> { if directive.name == DEFAULT_LINK_NAME { - // TODO (FED-428): pass `alias` and `imports` - LinkSpecDefinition::latest().add_definitions_to_schema(schema, /*alias*/ None)?; + let (alias, imports) = + LinkSpecDefinition::extract_alias_and_imports_on_missing_link_directive_definition( + directive, + )?; + LinkSpecDefinition::latest().add_definitions_to_schema(schema, alias, imports)?; Ok(schema.get_directive_definition(&directive.name)) } else { Ok(None) @@ -98,8 +115,8 @@ impl FederationBlueprint { pub(crate) fn on_validation( &self, - schema: &mut FederationSchema, - ) -> Result<(), FederationError> { + mut schema: FederationSchema, + ) -> Result { let mut error_collector = MultipleFederationErrors { errors: Vec::new() }; if self.with_root_type_renaming { let mut operation_types_to_rename = HashMap::new(); @@ -120,10 +137,13 @@ impl FederationBlueprint { } } for (current_name, new_name) in operation_types_to_rename { - schema.get_type(current_name)?.rename(schema, new_name)?; + schema + .get_type(current_name)? + .rename(&mut schema, new_name)?; } } + let schema = schema.validate_or_return_self().map_err(|e| e.1)?; let Some(meta) = schema.subgraph_metadata() else { bail!("Federation schema should have had its metadata set on construction"); }; @@ -131,16 +151,26 @@ impl FederationBlueprint { // accepted, and some of those issues are fixed by `SchemaUpgrader`. So insofar as any fed 1 schma is ultimately converted // to a fed 2 one before composition, then skipping some validation on fed 1 schema is fine. if !meta.is_fed_2_schema() { - return error_collector.into_result(); + return error_collector.into_result().map(|_| schema); } - validate_key_directives(schema, &mut error_collector)?; - validate_provides_directives(schema, meta, &mut error_collector)?; - validate_requires_directives(schema, meta, &mut error_collector)?; + validate_key_directives(&schema, meta, &mut error_collector)?; + validate_provides_directives(&schema, meta, &mut error_collector)?; + validate_requires_directives(&schema, meta, &mut error_collector)?; + validate_external_directives(&schema, meta, &mut error_collector)?; // TODO: Remaining validations + Self::validate_keys_on_interfaces_are_also_on_all_implementations( + &schema, + meta, + &mut error_collector, + )?; + Self::validate_interface_objects_are_on_entities(&schema, meta, &mut error_collector)?; - error_collector.into_result() + validate_cost_directives(&schema, &mut error_collector)?; + validate_list_size_directives(&schema, &mut error_collector)?; + + error_collector.into_result().map(|_| schema) } fn on_apollo_rs_validation_error( @@ -227,9 +257,166 @@ impl FederationBlueprint { return Ok(()); }; - for _link in links_metadata.links.clone() { + for link in links_metadata.links.clone() { // TODO: Pick out known features by link identity and call `add_elements_to_schema`. // JS calls coreFeatureDefinitionIfKnown here, but we don't have a feature registry yet. + + if link.url.identity == Identity::cost_identity() { + let spec = COST_VERSIONS + .find(&link.url.version) + .ok_or_else(|| SingleFederationError::UnknownLinkVersion { + message: format!("Detected unsupported cost specification version {}. Please upgrade to a composition version which supports that version, or select one of the following supported versions: {}.", link.url.version, COST_VERSIONS.versions().join(", ")) + })?; + spec.add_elements_to_schema(schema)?; + } + } + Ok(()) + } + + fn validate_keys_on_interfaces_are_also_on_all_implementations( + schema: &ValidFederationSchema, + metadata: &SubgraphMetadata, + error_collector: &mut MultipleFederationErrors, + ) -> Result<(), FederationError> { + let key_directive_definition_name = &metadata + .federation_spec_definition() + .key_directive_definition(schema)? + .name; + for type_pos in schema.get_types() { + let Ok(type_pos): Result = type_pos.try_into() + else { + continue; + }; + let implementation_types = schema.possible_runtime_types(type_pos.clone().into())?; + let type_ = type_pos.get(schema.schema())?; + for application in type_.directives.get_all(key_directive_definition_name) { + let arguments = metadata + .federation_spec_definition() + .key_directive_arguments(application)?; + // Note that we will have validated all @key field sets by this point, so we skip + // re-validating here. + let fields = parse_field_set(schema, type_.name.clone(), arguments.fields, false)?; + let mut implementations_with_non_resolvable_keys = vec![]; + let mut implementations_with_missing_keys = vec![]; + for implementation_type_pos in &implementation_types { + let implementation_type = implementation_type_pos.get(schema.schema())?; + let mut matching_application_arguments = None; + for implementation_application in implementation_type + .directives + .get_all(key_directive_definition_name) + { + let implementation_arguments = metadata + .federation_spec_definition() + .key_directive_arguments(implementation_application)?; + let implementation_fields = parse_field_set( + schema, + implementation_type.name.clone(), + implementation_arguments.fields, + false, + )?; + if implementation_fields == fields { + matching_application_arguments = Some(implementation_arguments); + break; + } + } + if let Some(matching_application_arguments) = matching_application_arguments { + // TODO: This code assumes there's at most one matching application for a + // given fieldset, but I'm not sure whether other validation code guarantees + // this. + if arguments.resolvable && !matching_application_arguments.resolvable { + implementations_with_non_resolvable_keys.push(implementation_type_pos); + } + } else { + implementations_with_missing_keys.push(implementation_type_pos); + } + + if !implementations_with_missing_keys.is_empty() { + let types_list = human_readable_list( + implementations_with_missing_keys + .iter() + .map(|pos| format!("\"{}\"", pos)), + HumanReadableListOptions { + prefix: Some(HumanReadableListPrefix { + singular: "type", + plural: "types", + }), + ..Default::default() + }, + ); + error_collector.errors.push( + SingleFederationError::InterfaceKeyNotOnImplementation { + message: format!( + "Key {} on interface type \"{}\" is missing on implementation {}", + application.serialize(), + type_pos, + types_list, + ) + } + ) + } else if !implementations_with_non_resolvable_keys.is_empty() { + let types_list = human_readable_list( + implementations_with_non_resolvable_keys + .iter() + .map(|pos| format!("\"{}\"", pos)), + HumanReadableListOptions { + prefix: Some(HumanReadableListPrefix { + singular: "type", + plural: "types", + }), + ..Default::default() + }, + ); + error_collector.errors.push( + SingleFederationError::InterfaceKeyNotOnImplementation { + message: format!( + "Key {} on interface type \"{}\" should be resolvable on all implementation types, but is declared with argument \"@key(resolvable:)\" set to false in {}", + application.serialize(), + type_pos, + types_list, + ) + } + ) + } + } + } + } + Ok(()) + } + + fn validate_interface_objects_are_on_entities( + schema: &ValidFederationSchema, + metadata: &SubgraphMetadata, + error_collector: &mut MultipleFederationErrors, + ) -> Result<(), FederationError> { + let Some(interface_object_directive_definition) = &metadata + .federation_spec_definition() + .interface_object_directive_definition(schema)? + else { + return Ok(()); + }; + let key_directive_definition_name = &metadata + .federation_spec_definition() + .key_directive_definition(schema)? + .name; + for type_pos in &schema + .referencers + .get_directive(&interface_object_directive_definition.name)? + .object_types + { + if !type_pos + .get(schema.schema())? + .directives + .has(key_directive_definition_name) + { + error_collector.errors.push( + SingleFederationError::InterfaceObjectUsageError { + message: format!( + "The @interfaceObject directive can only be applied to entity types but type \"{}\" has no @key in this subgraph.", + type_pos + ) + } + ) + } } Ok(()) } diff --git a/apollo-federation/src/schema/field_set.rs b/apollo-federation/src/schema/field_set.rs index b61960259f..d8420080c1 100644 --- a/apollo-federation/src/schema/field_set.rs +++ b/apollo-federation/src/schema/field_set.rs @@ -57,15 +57,25 @@ pub(crate) fn parse_field_set( schema: &ValidFederationSchema, parent_type_name: NamedType, field_set: &str, + validate: bool, ) -> Result { // Note this parsing takes care of adding curly braces ("{" and "}") if they aren't in the // string. - let field_set = FieldSet::parse_and_validate( - schema.schema(), - parent_type_name, - field_set, - "field_set.graphql", - )?; + let field_set = if validate { + FieldSet::parse_and_validate( + schema.schema(), + parent_type_name, + field_set, + "field_set.graphql", + )? + } else { + Valid::assume_valid(FieldSet::parse( + schema.schema(), + parent_type_name, + field_set, + "field_set.graphql", + )?) + }; // A field set should not contain any named fragments. let fragments = Default::default(); @@ -75,7 +85,9 @@ pub(crate) fn parse_field_set( Ok(()))?; // Validate that the field set has no aliases. - check_absence_of_aliases(&selection_set)?; + if validate { + check_absence_of_aliases(&selection_set)?; + } Ok(selection_set) } @@ -225,8 +237,9 @@ mod tests { let supergraph = Supergraph::new(schema_str).expect("Expected supergraph schema to parse"); // Note: `Supergraph::new` does not error out on aliases in field sets. // We call `parse_field_set` directly to test the alias error. - let err = super::parse_field_set(&supergraph.schema, Name::new("T").unwrap(), "r1: r") - .expect_err("Expected alias error"); + let err = + super::parse_field_set(&supergraph.schema, Name::new("T").unwrap(), "r1: r", true) + .expect_err("Expected alias error"); assert_eq!( err.to_string(), r#"Cannot use alias "r1" in "r1: r": aliases are not currently supported in the used directive"# diff --git a/apollo-federation/src/schema/mod.rs b/apollo-federation/src/schema/mod.rs index 0f53a8f60a..ac394b0b9f 100644 --- a/apollo-federation/src/schema/mod.rs +++ b/apollo-federation/src/schema/mod.rs @@ -7,12 +7,12 @@ use apollo_compiler::Name; use apollo_compiler::Node; use apollo_compiler::Schema; use apollo_compiler::ast::Directive; +use apollo_compiler::ast::FieldDefinition; use apollo_compiler::collections::IndexSet; use apollo_compiler::executable::FieldSet; use apollo_compiler::schema::ExtendedType; use apollo_compiler::validation::Valid; use apollo_compiler::validation::WithErrors; -use position::ObjectFieldDefinitionPosition; use position::ObjectOrInterfaceTypeDefinitionPosition; use position::TagDirectiveTargetPosition; use referencer::Referencers; @@ -23,6 +23,8 @@ use crate::error::SingleFederationError; use crate::internal_error; use crate::link::Link; use crate::link::LinksMetadata; +use crate::link::cost_spec_definition; +use crate::link::cost_spec_definition::CostSpecDefinition; use crate::link::federation_spec_definition::ContextDirectiveArguments; use crate::link::federation_spec_definition::FEDERATION_ENTITY_TYPE_NAME_IN_SPEC; use crate::link::federation_spec_definition::FEDERATION_FIELDSET_TYPE_NAME_IN_SPEC; @@ -41,6 +43,7 @@ use crate::schema::position::DirectiveDefinitionPosition; use crate::schema::position::EnumTypeDefinitionPosition; use crate::schema::position::InputObjectTypeDefinitionPosition; use crate::schema::position::InterfaceTypeDefinitionPosition; +use crate::schema::position::ObjectOrInterfaceFieldDefinitionPosition; use crate::schema::position::ObjectTypeDefinitionPosition; use crate::schema::position::ScalarTypeDefinitionPosition; use crate::schema::position::TypeDefinitionPosition; @@ -100,10 +103,6 @@ impl FederationSchema { &self.referencers } - pub(crate) fn subgraph_metadata(&self) -> Option<&SubgraphMetadata> { - self.subgraph_metadata.as_deref() - } - /// Returns all the types in the schema, minus builtins. pub(crate) fn get_types(&self) -> impl Iterator { self.schema @@ -490,7 +489,8 @@ impl FederationSchema { .get_directive(&provides_directive_definition.name)?; let mut applications: Vec> = Vec::new(); - for field_definition_position in &provides_directive_referencers.object_fields { + for field_definition_position in provides_directive_referencers.object_or_interface_fields() + { match field_definition_position.get(self.schema()) { Ok(field_definition) => { let directives = &field_definition.directives; @@ -501,7 +501,7 @@ impl FederationSchema { .provides_directive_arguments(provides_directive_application); applications.push(arguments.map(|args| ProvidesDirective { arguments: args, - target: field_definition_position, + target: field_definition_position.clone(), target_return_type: field_definition.ty.inner_named_type(), })); } @@ -522,18 +522,19 @@ impl FederationSchema { .get_directive(&requires_directive_definition.name)?; let mut applications = Vec::new(); - for field_definition_position in &requires_directive_referencers.object_fields { + for field_definition_position in requires_directive_referencers.object_or_interface_fields() + { match field_definition_position.get(self.schema()) { Ok(field_definition) => { let directives = &field_definition.directives; - for provides_directive_application in + for directive_application in directives.get_all(&requires_directive_definition.name) { - let arguments = federation_spec - .requires_directive_arguments(provides_directive_application); + let arguments = + federation_spec.requires_directive_arguments(directive_application); applications.push(arguments.map(|args| RequiresDirective { arguments: args, - target: field_definition_position, + target: field_definition_position.clone(), })); } } @@ -597,6 +598,47 @@ impl FederationSchema { Ok(applications) } + pub(crate) fn list_size_directive_applications( + &self, + ) -> FallibleDirectiveIterator { + let Some(list_size_directive_name) = CostSpecDefinition::list_size_directive_name(self)? + else { + return Ok(Vec::new()); + }; + let Ok(list_size_directive_referencers) = self + .referencers() + .get_directive(list_size_directive_name.as_str()) + else { + return Ok(Vec::new()); + }; + + let mut applications = Vec::new(); + for field_definition_position in + list_size_directive_referencers.object_or_interface_fields() + { + let field_definition = field_definition_position.get(self.schema())?; + match CostSpecDefinition::list_size_directive_from_field_definition( + self, + field_definition, + ) { + Ok(Some(list_size_directive)) => { + applications.push(Ok(ListSizeDirective { + directive: list_size_directive, + parent_type: field_definition_position.type_name().clone(), + target: field_definition, + })); + } + Ok(None) => { + // No listSize directive found, continue + } + Err(error) => { + applications.push(Err(error)); + } + } + } + Ok(applications) + } + pub(crate) fn is_interface(&self, type_name: &Name) -> bool { self.referencers().interface_types.contains_key(type_name) } @@ -643,11 +685,22 @@ impl KeyDirective<'_> { } } +pub(crate) struct ListSizeDirective<'schema> { + /// The parsed directive + directive: cost_spec_definition::ListSizeDirective, + /// The parent type of `target` + parent_type: Name, + /// The schema position to which this directive is applied + target: &'schema FieldDefinition, +} + pub(crate) struct ProvidesDirective<'schema> { /// The parsed arguments of this `@provides` application arguments: ProvidesDirectiveArguments<'schema>, /// The schema position to which this directive is applied - target: &'schema ObjectFieldDefinitionPosition, + /// - Although the directive is not allowed on interfaces, we still need to collect them + /// for validation purposes. + target: ObjectOrInterfaceFieldDefinitionPosition, /// The return type of the target field target_return_type: &'schema Name, } @@ -668,7 +721,9 @@ pub(crate) struct RequiresDirective<'schema> { /// The parsed arguments of this `@requires` application arguments: RequiresDirectiveArguments<'schema>, /// The schema position to which this directive is applied - target: &'schema ObjectFieldDefinitionPosition, + /// - Although the directive is not allowed on interfaces, we still need to collect them + /// for validation purposes. + target: ObjectOrInterfaceFieldDefinitionPosition, } impl HasFields for RequiresDirective<'_> { @@ -677,7 +732,7 @@ impl HasFields for RequiresDirective<'_> { } fn target_type(&self) -> &Name { - &self.target.type_name + self.target.type_name() } } diff --git a/apollo-federation/src/schema/position.rs b/apollo-federation/src/schema/position.rs index 5a14abb7ca..a94d73afb2 100644 --- a/apollo-federation/src/schema/position.rs +++ b/apollo-federation/src/schema/position.rs @@ -260,7 +260,7 @@ impl TypeDefinitionPosition { } if let Some(existing_type) = schema.schema.types.swap_remove(self.type_name()) { - schema.schema.types.insert(new_name.clone(), existing_type); + schema.schema.types.insert(new_name, existing_type); } Ok(()) @@ -279,6 +279,110 @@ impl TypeDefinitionPosition { TypeDefinitionPosition::InputObject(type_) => type_.remove_extensions(schema), } } + + pub(crate) fn has_applied_directive( + &self, + schema: &FederationSchema, + directive_name: &Name, + ) -> bool { + match self { + TypeDefinitionPosition::Scalar(type_) => { + type_.has_applied_directive(schema, directive_name) + } + TypeDefinitionPosition::Object(type_) => { + type_.has_applied_directive(schema, directive_name) + } + TypeDefinitionPosition::Interface(type_) => { + type_.has_applied_directive(schema, directive_name) + } + TypeDefinitionPosition::Union(type_) => { + type_.has_applied_directive(schema, directive_name) + } + TypeDefinitionPosition::Enum(type_) => { + type_.has_applied_directive(schema, directive_name) + } + TypeDefinitionPosition::InputObject(type_) => { + type_.has_applied_directive(schema, directive_name) + } + } + } + + pub(crate) fn get_applied_directives<'schema>( + &self, + schema: &'schema FederationSchema, + directive_name: &Name, + ) -> Vec<&'schema Component> { + match self { + TypeDefinitionPosition::Scalar(type_) => { + type_.get_applied_directives(schema, directive_name) + } + TypeDefinitionPosition::Object(type_) => { + type_.get_applied_directives(schema, directive_name) + } + TypeDefinitionPosition::Interface(type_) => { + type_.get_applied_directives(schema, directive_name) + } + TypeDefinitionPosition::Union(type_) => { + type_.get_applied_directives(schema, directive_name) + } + TypeDefinitionPosition::Enum(type_) => { + type_.get_applied_directives(schema, directive_name) + } + TypeDefinitionPosition::InputObject(type_) => { + type_.get_applied_directives(schema, directive_name) + } + } + } + + /// Remove a directive application. + #[allow(unused)] + pub(crate) fn remove_directive( + &self, + schema: &mut FederationSchema, + directive: &Component, + ) { + match self { + TypeDefinitionPosition::Scalar(type_) => type_.remove_directive(schema, directive), + TypeDefinitionPosition::Object(type_) => type_.remove_directive(schema, directive), + TypeDefinitionPosition::Interface(type_) => type_.remove_directive(schema, directive), + TypeDefinitionPosition::Union(type_) => type_.remove_directive(schema, directive), + TypeDefinitionPosition::Enum(type_) => type_.remove_directive(schema, directive), + TypeDefinitionPosition::InputObject(type_) => type_.remove_directive(schema, directive), + } + } +} + +impl From<&ExtendedType> for TypeDefinitionPosition { + fn from(ty: &ExtendedType) -> Self { + match ty { + ExtendedType::Scalar(v) => { + TypeDefinitionPosition::Scalar(ScalarTypeDefinitionPosition { + type_name: v.name.clone(), + }) + } + ExtendedType::Object(v) => { + TypeDefinitionPosition::Object(ObjectTypeDefinitionPosition { + type_name: v.name.clone(), + }) + } + ExtendedType::Interface(v) => { + TypeDefinitionPosition::Interface(InterfaceTypeDefinitionPosition { + type_name: v.name.clone(), + }) + } + ExtendedType::Union(v) => TypeDefinitionPosition::Union(UnionTypeDefinitionPosition { + type_name: v.name.clone(), + }), + ExtendedType::Enum(v) => TypeDefinitionPosition::Enum(EnumTypeDefinitionPosition { + type_name: v.name.clone(), + }), + ExtendedType::InputObject(v) => { + TypeDefinitionPosition::InputObject(InputObjectTypeDefinitionPosition { + type_name: v.name.clone(), + }) + } + } + } } fallible_conversions!(TypeDefinitionPosition::Scalar -> ScalarTypeDefinitionPosition); @@ -644,6 +748,33 @@ impl FieldDefinitionPosition { } } + pub(crate) fn get_applied_directives<'schema>( + &self, + schema: &'schema FederationSchema, + directive_name: &Name, + ) -> Vec<&'schema Node> { + match self { + FieldDefinitionPosition::Object(field) => { + field.get_applied_directives(schema, directive_name) + } + FieldDefinitionPosition::Interface(field) => { + field.get_applied_directives(schema, directive_name) + } + FieldDefinitionPosition::Union(_) => vec![], + } + } + + pub(crate) fn remove_directive( + &self, + schema: &mut FederationSchema, + directive: &Node, + ) { + match self { + FieldDefinitionPosition::Object(field) => field.remove_directive(schema, directive), + FieldDefinitionPosition::Interface(field) => field.remove_directive(schema, directive), + FieldDefinitionPosition::Union(_) => (), + } + } pub(crate) fn get<'schema>( &self, schema: &'schema Schema, @@ -1455,6 +1586,54 @@ impl ScalarTypeDefinitionPosition { } Ok(()) } + + pub(crate) fn has_applied_directive( + &self, + schema: &FederationSchema, + directive_name: &Name, + ) -> bool { + if let Some(type_) = self.try_get(schema.schema()) { + return type_ + .directives + .iter() + .any(|directive| &directive.name == directive_name); + } + false + } + + pub(crate) fn get_applied_directives<'schema>( + &self, + schema: &'schema FederationSchema, + directive_name: &Name, + ) -> Vec<&'schema Component> { + if let Some(field) = self.try_get(&schema.schema) { + field + .directives + .iter() + .filter(|directive| &directive.name == directive_name) + .collect() + } else { + Vec::new() + } + } + + pub(crate) fn remove_directive( + &self, + schema: &mut FederationSchema, + directive: &Component, + ) { + let Some(obj) = self.try_make_mut(&mut schema.schema) else { + return; + }; + if !obj.directives.iter().any(|other_directive| { + (other_directive.name == directive.name) && !other_directive.ptr_eq(directive) + }) { + self.remove_directive_name_references(&mut schema.referencers, &directive.name); + } + obj.make_mut() + .directives + .retain(|other_directive| !other_directive.ptr_eq(directive)); + } } impl Display for ScalarTypeDefinitionPosition { @@ -1969,6 +2148,54 @@ impl ObjectTypeDefinitionPosition { } Ok(()) } + + pub(crate) fn has_applied_directive( + &self, + schema: &FederationSchema, + directive_name: &Name, + ) -> bool { + if let Some(type_) = self.try_get(schema.schema()) { + return type_ + .directives + .iter() + .any(|directive| &directive.name == directive_name); + } + false + } + + pub(crate) fn get_applied_directives<'schema>( + &self, + schema: &'schema FederationSchema, + directive_name: &Name, + ) -> Vec<&'schema Component> { + if let Some(field) = self.try_get(&schema.schema) { + field + .directives + .iter() + .filter(|directive| &directive.name == directive_name) + .collect() + } else { + Vec::new() + } + } + + pub(crate) fn remove_directive( + &self, + schema: &mut FederationSchema, + directive: &Component, + ) { + let Some(obj) = self.try_make_mut(&mut schema.schema) else { + return; + }; + if !obj.directives.iter().any(|other_directive| { + (other_directive.name == directive.name) && !other_directive.ptr_eq(directive) + }) { + self.remove_directive_name_references(&mut schema.referencers, &directive.name); + } + obj.make_mut() + .directives + .retain(|other_directive| !other_directive.ptr_eq(directive)); + } } impl Display for ObjectTypeDefinitionPosition { @@ -3124,6 +3351,54 @@ impl InterfaceTypeDefinitionPosition { } Ok(()) } + + pub(crate) fn has_applied_directive( + &self, + schema: &FederationSchema, + directive_name: &Name, + ) -> bool { + if let Some(type_) = self.try_get(schema.schema()) { + return type_ + .directives + .iter() + .any(|directive| &directive.name == directive_name); + } + false + } + + pub(crate) fn get_applied_directives<'schema>( + &self, + schema: &'schema FederationSchema, + directive_name: &Name, + ) -> Vec<&'schema Component> { + if let Some(field) = self.try_get(&schema.schema) { + field + .directives + .iter() + .filter(|directive| &directive.name == directive_name) + .collect() + } else { + Vec::new() + } + } + + pub(crate) fn remove_directive( + &self, + schema: &mut FederationSchema, + directive: &Component, + ) { + let Some(obj) = self.try_make_mut(&mut schema.schema) else { + return; + }; + if !obj.directives.iter().any(|other_directive| { + (other_directive.name == directive.name) && !other_directive.ptr_eq(directive) + }) { + self.remove_directive_name_references(&mut schema.referencers, &directive.name); + } + obj.make_mut() + .directives + .retain(|other_directive| !other_directive.ptr_eq(directive)); + } } impl Display for InterfaceTypeDefinitionPosition { @@ -3492,8 +3767,8 @@ impl InterfaceFieldDefinitionPosition { ) -> Result<(), FederationError> { let field = self.make_mut(&mut schema.schema)?.make_mut(); match field.ty.clone() { - ast::Type::Named(_) => field.ty = ast::Type::Named(new_name.clone()), - ast::Type::NonNullNamed(_) => field.ty = ast::Type::NonNullNamed(new_name.clone()), + ast::Type::Named(_) => field.ty = ast::Type::Named(new_name), + ast::Type::NonNullNamed(_) => field.ty = ast::Type::NonNullNamed(new_name), ast::Type::List(_) => todo!(), ast::Type::NonNullList(_) => todo!(), } @@ -3759,9 +4034,9 @@ impl InterfaceFieldArgumentDefinitionPosition { ) -> Result<(), FederationError> { let argument = self.make_mut(&mut schema.schema)?.make_mut(); match argument.ty.as_ref() { - ast::Type::Named(_) => *argument.ty.make_mut() = ast::Type::Named(new_name.clone()), + ast::Type::Named(_) => *argument.ty.make_mut() = ast::Type::Named(new_name), ast::Type::NonNullNamed(_) => { - *argument.ty.make_mut() = ast::Type::NonNullNamed(new_name.clone()) + *argument.ty.make_mut() = ast::Type::NonNullNamed(new_name) } ast::Type::List(_) => todo!(), ast::Type::NonNullList(_) => todo!(), @@ -4139,6 +4414,54 @@ impl UnionTypeDefinitionPosition { .collect(); Ok(()) } + + pub(crate) fn has_applied_directive( + &self, + schema: &FederationSchema, + directive_name: &Name, + ) -> bool { + if let Some(type_) = self.try_get(schema.schema()) { + return type_ + .directives + .iter() + .any(|directive| &directive.name == directive_name); + } + false + } + + pub(crate) fn get_applied_directives<'schema>( + &self, + schema: &'schema FederationSchema, + directive_name: &Name, + ) -> Vec<&'schema Component> { + if let Some(field) = self.try_get(&schema.schema) { + field + .directives + .iter() + .filter(|directive| &directive.name == directive_name) + .collect() + } else { + Vec::new() + } + } + + pub(crate) fn remove_directive( + &self, + schema: &mut FederationSchema, + directive: &Component, + ) { + let Some(obj) = self.try_make_mut(&mut schema.schema) else { + return; + }; + if !obj.directives.iter().any(|other_directive| { + (other_directive.name == directive.name) && !other_directive.ptr_eq(directive) + }) { + self.remove_directive_name_references(&mut schema.referencers, &directive.name); + } + obj.make_mut() + .directives + .retain(|other_directive| !other_directive.ptr_eq(directive)); + } } impl Display for UnionTypeDefinitionPosition { @@ -4513,6 +4836,54 @@ impl EnumTypeDefinitionPosition { } Ok(()) } + + pub(crate) fn has_applied_directive( + &self, + schema: &FederationSchema, + directive_name: &Name, + ) -> bool { + if let Some(type_) = self.try_get(schema.schema()) { + return type_ + .directives + .iter() + .any(|directive| &directive.name == directive_name); + } + false + } + + pub(crate) fn get_applied_directives<'schema>( + &self, + schema: &'schema FederationSchema, + directive_name: &Name, + ) -> Vec<&'schema Component> { + if let Some(field) = self.try_get(&schema.schema) { + field + .directives + .iter() + .filter(|directive| &directive.name == directive_name) + .collect() + } else { + Vec::new() + } + } + + pub(crate) fn remove_directive( + &self, + schema: &mut FederationSchema, + directive: &Component, + ) { + let Some(obj) = self.try_make_mut(&mut schema.schema) else { + return; + }; + if !obj.directives.iter().any(|other_directive| { + (other_directive.name == directive.name) && !other_directive.ptr_eq(directive) + }) { + self.remove_directive_name_references(&mut schema.referencers, &directive.name); + } + obj.make_mut() + .directives + .retain(|other_directive| !other_directive.ptr_eq(directive)); + } } impl Display for EnumTypeDefinitionPosition { @@ -5023,6 +5394,54 @@ impl InputObjectTypeDefinitionPosition { } Ok(()) } + + pub(crate) fn has_applied_directive( + &self, + schema: &FederationSchema, + directive_name: &Name, + ) -> bool { + if let Some(type_) = self.try_get(schema.schema()) { + return type_ + .directives + .iter() + .any(|directive| &directive.name == directive_name); + } + false + } + + pub(crate) fn get_applied_directives<'schema>( + &self, + schema: &'schema FederationSchema, + directive_name: &Name, + ) -> Vec<&'schema Component> { + if let Some(field) = self.try_get(&schema.schema) { + field + .directives + .iter() + .filter(|directive| &directive.name == directive_name) + .collect() + } else { + Vec::new() + } + } + + pub(crate) fn remove_directive( + &self, + schema: &mut FederationSchema, + directive: &Component, + ) { + let Some(obj) = self.try_make_mut(&mut schema.schema) else { + return; + }; + if !obj.directives.iter().any(|other_directive| { + (other_directive.name == directive.name) && !other_directive.ptr_eq(directive) + }) { + self.remove_directive_name_references(&mut schema.referencers, &directive.name); + } + obj.make_mut() + .directives + .retain(|other_directive| !other_directive.ptr_eq(directive)); + } } impl Display for InputObjectTypeDefinitionPosition { @@ -5310,7 +5729,7 @@ impl InputObjectFieldDefinitionPosition { schema: &mut FederationSchema, new_name: Name, ) -> Result<(), FederationError> { - self.make_mut(&mut schema.schema)?.make_mut().name = new_name.clone(); + self.make_mut(&mut schema.schema)?.make_mut().name = new_name; Ok(()) } } diff --git a/apollo-federation/src/schema/referencer.rs b/apollo-federation/src/schema/referencer.rs index 5cb03c5354..e50c8c6bd6 100644 --- a/apollo-federation/src/schema/referencer.rs +++ b/apollo-federation/src/schema/referencer.rs @@ -15,6 +15,7 @@ use crate::schema::position::InterfaceFieldDefinitionPosition; use crate::schema::position::InterfaceTypeDefinitionPosition; use crate::schema::position::ObjectFieldArgumentDefinitionPosition; use crate::schema::position::ObjectFieldDefinitionPosition; +use crate::schema::position::ObjectOrInterfaceFieldDefinitionPosition; use crate::schema::position::ObjectTypeDefinitionPosition; use crate::schema::position::ScalarTypeDefinitionPosition; use crate::schema::position::SchemaDefinitionPosition; @@ -151,3 +152,18 @@ pub(crate) struct DirectiveReferencers { pub(crate) input_object_fields: IndexSet, pub(crate) directive_arguments: IndexSet, } + +impl DirectiveReferencers { + pub(crate) fn object_or_interface_fields( + &self, + ) -> impl Iterator { + self.object_fields + .iter() + .map(|pos| ObjectOrInterfaceFieldDefinitionPosition::Object(pos.clone())) + .chain( + self.interface_fields + .iter() + .map(|pos| ObjectOrInterfaceFieldDefinitionPosition::Interface(pos.clone())), + ) + } +} diff --git a/apollo-federation/src/schema/schema_upgrader.rs b/apollo-federation/src/schema/schema_upgrader.rs index 7139fa9727..c849382884 100644 --- a/apollo-federation/src/schema/schema_upgrader.rs +++ b/apollo-federation/src/schema/schema_upgrader.rs @@ -5,32 +5,41 @@ use apollo_compiler::Node; use apollo_compiler::ast::Directive; use apollo_compiler::ast::Value; use apollo_compiler::collections::HashMap; +use apollo_compiler::collections::IndexMap; +use apollo_compiler::collections::IndexSet; +use apollo_compiler::name; +use apollo_compiler::schema::Component; use apollo_compiler::schema::ExtendedType; +use apollo_compiler::validation::Valid; use super::FederationSchema; use super::TypeDefinitionPosition; +use super::compute_subgraph_metadata; +use super::field_set::collect_target_fields_from_field_set; +use super::position::DirectiveDefinitionPosition; use super::position::FieldDefinitionPosition; use super::position::InterfaceFieldDefinitionPosition; use super::position::InterfaceTypeDefinitionPosition; -use super::position::ObjectFieldDefinitionPosition; use super::position::ObjectTypeDefinitionPosition; use crate::error::FederationError; use crate::error::MultipleFederationErrors; use crate::error::SingleFederationError; +use crate::internal_error; use crate::schema::SubgraphMetadata; use crate::schema::position::ObjectOrInterfaceFieldDefinitionPosition; use crate::schema::position::ObjectOrInterfaceTypeDefinitionPosition; use crate::subgraph::typestate::Expanded; +use crate::subgraph::typestate::Raw; use crate::subgraph::typestate::Subgraph; +use crate::supergraph::GRAPHQL_SUBSCRIPTION_TYPE_NAME; use crate::supergraph::remove_inactive_requires_and_provides_from_subgraph; use crate::utils::FallibleIterator; -#[derive(Clone, Debug)] +#[derive(Debug)] struct SchemaUpgrader<'a> { schema: FederationSchema, - original_subgraph: &'a Subgraph, - subgraphs: &'a [&'a mut Subgraph], - #[allow(unused)] + expanded_info: ExpandedSubgraphInfo, + subgraphs: &'a IndexMap>, object_type_map: &'a HashMap>, } @@ -41,18 +50,28 @@ struct TypeInfo { metadata: SubgraphMetadata, } +#[derive(Debug)] +struct ExpandedSubgraphInfo { + subgraph_name: String, + subgraph_url: String, + key_directive_name: Option, + requires_directive_name: Option, + provides_directive_name: Option, + extends_directive_name: Option, +} + #[allow(unused)] // PORT_NOTE: In JS, this returns upgraded subgraphs along with a set of messages about what changed. // However, those messages were never used, so we have omitted them here. pub(crate) fn upgrade_subgraphs_if_necessary( - subgraphs: &[&mut Subgraph], -) -> Result<(), FederationError> { + subgraphs: Vec>, +) -> Result>, FederationError> { // if all subgraphs are fed 2, there is no upgrade to be done if subgraphs .iter() .all(|subgraph| subgraph.metadata().is_fed_2_schema()) { - return Ok(()); + return Ok(subgraphs); } let mut object_type_map: HashMap> = Default::default(); @@ -75,14 +94,69 @@ pub(crate) fn upgrade_subgraphs_if_necessary( } } } - for subgraph in subgraphs.iter() { - if !subgraph.schema().is_fed_2() { - let mut upgrader = SchemaUpgrader::new(subgraph, subgraphs, &object_type_map)?; + + // insertion order is preserved with IndexMap + let subgraphs: IndexMap> = subgraphs + .into_iter() + .map(|subgraph| (subgraph.name.clone(), subgraph)) + .collect(); + let mut subgraphs_using_interface_object: IndexSet = Default::default(); + + let mut upgraded: HashMap> = Default::default(); + for (name, subgraph) in subgraphs.iter() { + if !subgraph.metadata().is_fed_2_schema() { + let mut upgrader = SchemaUpgrader::new(subgraph, &subgraphs, &object_type_map)?; upgrader.upgrade()?; + let new_subgraph = Subgraph::::new( + subgraph.name.as_str(), + subgraph.url.as_str(), + upgrader.schema.schema().clone(), + ) + .assume_expanded()?; + upgraded.insert(subgraph.name.clone(), new_subgraph); + } else if let Some(interface_object_def) = subgraph + .metadata() + .federation_spec_definition() + .interface_object_directive_definition(subgraph.schema())? + { + let referencers = subgraph + .schema() + .referencers() + .get_directive(interface_object_def.name.as_str())?; + if !referencers.object_types.is_empty() { + subgraphs_using_interface_object.insert(name.clone()); + } } } - // TODO: Return federation_subgraphs - todo!(); + + if !subgraphs_using_interface_object.is_empty() { + // TODO: Make this a composition error and make the strings "human readable" + let cond_1_str = subgraphs_using_interface_object + .iter() + .cloned() + .map(|k| format!("\"{k}\"")) + .collect::>() + .join(" "); + let cond_2_str = upgraded + .keys() + .map(|k| format!("\"{k}\"")) + .collect::>() + .join(" "); + return Err(internal_error!( + "The @interfaceObject directive can only be used if all subgraphs have federation 2 subgraph schema (schema with a `@link` to \"https://specs.apollo.dev/federation\" version 2.0 or newer): @interfaceObject is used in subgraph {} but subgraph {} is not a federation 2 subgraph schema.", + cond_1_str, + cond_2_str + )); + } + Ok(subgraphs + .into_iter() + .map(|(name, subgraph)| { + if !subgraph.metadata().is_fed_2_schema() { + return upgraded.remove(&name).unwrap(); + } + subgraph + }) + .collect()) } // Extensions for FederationError to provide additional error types @@ -97,19 +171,117 @@ impl<'a> SchemaUpgrader<'a> { #[allow(unused)] fn new( original_subgraph: &'a Subgraph, - subgraphs: &'a [&'a mut Subgraph], + subgraphs: &'a IndexMap>, object_type_map: &'a HashMap>, ) -> Result { + let schema = original_subgraph.schema().clone(); Ok(SchemaUpgrader { - schema: original_subgraph.schema().clone(), // TODO: Don't think we should be cloning here - original_subgraph, + schema, + expanded_info: ExpandedSubgraphInfo { + subgraph_name: original_subgraph.name.clone(), + subgraph_url: original_subgraph.url.clone(), + key_directive_name: original_subgraph.key_directive_name()?.clone(), + requires_directive_name: original_subgraph.requires_directive_name()?.clone(), + provides_directive_name: original_subgraph.provides_directive_name()?.clone(), + extends_directive_name: original_subgraph.extends_directive_name()?.clone(), + }, subgraphs, object_type_map, }) } + // because the schema may have been changed since the last time metadata was calculated, we need to create it every time it's needed. + fn subgraph_metadata(&self) -> Result { + compute_subgraph_metadata(&self.schema)?.ok_or_else(|| { + internal_error!( + "Unable to detect federation version used in subgraph '{}'", + self.expanded_info.subgraph_name + ) + }) + } + + fn remove_links_and_reexpand(&mut self) -> Result<(), FederationError> { + // for @core, we want to remove both the definition and all references, but for other + // federation directives, just remove the definitions + let directives_to_remove = [ + name!("extends"), + name!("key"), + name!("provides"), + name!("requires"), + name!("external"), + name!("tag"), + ]; + + let definitions: Vec = + self.schema.get_directive_definitions().collect(); + for definition in &definitions { + if directives_to_remove.contains(&definition.directive_name) { + self.schema + .schema + .directive_definitions + .shift_remove(&definition.directive_name); + self.schema + .referencers + .directives + .shift_remove(&definition.directive_name) + .ok_or_else(|| SingleFederationError::Internal { + message: format!( + "Schema missing referencers for directive \"{}\"", + &definition.directive_name + ), + })?; + } else if definition.directive_name == name!("core") { + definition.remove(&mut self.schema)?; + } + } + + // now remove other federation types + let schema = &mut self.schema; + if let Some(TypeDefinitionPosition::Enum(enum_obj)) = + schema.try_get_type(name!("core__Purpose")) + { + enum_obj.remove(schema)?; + } + if let Some(TypeDefinitionPosition::Scalar(scalar_obj)) = + schema.try_get_type(name!("core__Import")) + { + scalar_obj.remove(schema)?; + } + if let Some(TypeDefinitionPosition::Scalar(scalar_obj)) = + schema.try_get_type(name!("_FieldSet")) + { + scalar_obj.remove(schema)?; + } + if let Some(TypeDefinitionPosition::Scalar(scalar_obj)) = schema.try_get_type(name!("_Any")) + { + scalar_obj.remove(schema)?; + } + if let Some(TypeDefinitionPosition::Object(obj)) = schema.try_get_type(name!("_Service")) { + obj.remove(schema)?; + } + if let Some(TypeDefinitionPosition::Union(union_obj)) = + schema.try_get_type(name!("_Entity")) + { + union_obj.remove(schema)?; + } + let subgraph = Subgraph::new( + self.expanded_info.subgraph_name.as_str(), + self.expanded_info.subgraph_url.as_str(), + schema.schema.clone(), // TODO: It's unfortunate that we have to do multiple clones here. Ideally we'd allow subgraph to accept and release ownership + ) + .into_fed2_subgraph()? + .expand_links()?; + self.schema = subgraph.schema().clone(); + Ok(()) + } + + // function to get subgraph from list of subgraphs by name. Right now it will just iterate, but perhaps the struct should be a HashMap eventually + fn get_subgraph_by_name(&self, name: &String) -> Option<&Subgraph> { + self.subgraphs.get(name) + } + #[allow(unused)] - fn upgrade(&mut self) -> Result, FederationError> { + fn upgrade(&mut self) -> Result<(), FederationError> { // Run pre-upgrade validations to check for issues that would prevent upgrade self.pre_upgrade_validations()?; @@ -117,6 +289,8 @@ impl<'a> SchemaUpgrader<'a> { // Note: Implementation simplified for compilation purposes self.fix_federation_directives_arguments()?; + self.remove_links_and_reexpand()?; + self.remove_external_on_interface(); self.remove_external_on_object_types(); @@ -141,7 +315,7 @@ impl<'a> SchemaUpgrader<'a> { self.remove_tag_on_external()?; - todo!(); + Ok(()) } // integrates checkForExtensionWithNoBase from the JS code @@ -168,10 +342,14 @@ impl<'a> SchemaUpgrader<'a> { .iter() .filter(|(subgraph_name, _)| { // Fixed: dereference the string for comparison - subgraph_name.as_str() != self.original_subgraph.name.as_str() + subgraph_name.as_str() != self.expanded_info.subgraph_name.as_str() }) - .fallible_any(|(_, type_info)| { - let extended_type = type_info.pos.get(schema.schema())?; + .fallible_any(|(other_name, type_info)| { + let Some(other_subgraph) = self.get_subgraph_by_name(other_name) else { + return Ok(false); + }; + let extended_type = + type_info.pos.get(other_subgraph.schema().schema())?; Ok::(Self::has_non_extension_elements( extended_type, )) @@ -217,18 +395,13 @@ impl<'a> SchemaUpgrader<'a> { for field in &referencers.object_fields.clone() { let field_type = field.make_mut(&mut schema.schema)?.make_mut(); - for directive in field_type.directives.0.iter_mut() { - if directive.name == directive_name { - for arg in directive.make_mut().arguments.iter_mut() { - if arg.name == "fields" { - if let Some(new_fields_string) = - Self::make_fields_string_if_not(&arg.value)? - { - *arg.make_mut().value.make_mut() = - Value::String(new_fields_string); - } - break; - } + for directive in field_type.directives.get_all_mut(directive_name) { + if let Some(arg) = directive + .make_mut() + .specified_argument_by_name_mut("fields") + { + if let Some(new_fields_string) = Self::make_fields_string_if_not(arg)? { + *arg.make_mut() = Value::String(new_fields_string); } } } @@ -241,17 +414,13 @@ impl<'a> SchemaUpgrader<'a> { for field in &referencers.object_types.clone() { let field_type = field.make_mut(&mut schema.schema)?.make_mut(); - for directive in field_type.directives.0.iter_mut() { - if directive.name == "key" { - for arg in directive.make_mut().arguments.iter_mut() { - if arg.name == "fields" { - if let Some(new_fields_string) = - Self::make_fields_string_if_not(&arg.value)? - { - *arg.make_mut().value.make_mut() = Value::String(new_fields_string); - } - break; - } + for directive in field_type.directives.iter_mut().filter(|d| d.name == "key") { + if let Some(arg) = directive + .make_mut() + .specified_argument_by_name_mut("fields") + { + if let Some(new_fields_string) = Self::make_fields_string_if_not(arg)? { + *arg.make_mut() = Value::String(new_fields_string); } } } @@ -305,22 +474,16 @@ impl<'a> SchemaUpgrader<'a> { else { return Ok(()); }; - let mut to_delete: Vec<(ObjectFieldDefinitionPosition, Node)> = vec![]; - for (obj_name, ty) in schema.schema().types.iter() { - let ExtendedType::Object(obj) = ty else { + let mut to_delete: Vec<(ObjectTypeDefinitionPosition, Component)> = vec![]; + for (obj_name, ty) in &schema.schema().types { + let ExtendedType::Object(_) = ty else { continue; }; + let object_pos = ObjectTypeDefinitionPosition::new(obj_name.clone()); - for (field_name, field) in &obj.fields { - let pos = object_pos.field(field_name.clone()); - let external_directive = field - .node - .directives - .iter() - .find(|d| d.name == external_directive.name); - if let Some(external_directive) = external_directive { - to_delete.push((pos, external_directive.clone())); - } + let directives = object_pos.get_applied_directives(schema, &external_directive.name); + if !directives.is_empty() { + to_delete.push((object_pos, directives[0].clone())); } } for (pos, directive) in to_delete { @@ -330,12 +493,110 @@ impl<'a> SchemaUpgrader<'a> { } fn remove_external_on_type_extensions(&mut self) -> Result<(), FederationError> { - todo!(); + let Some(metadata) = &self.schema.subgraph_metadata else { + return Ok(()); + }; + let types: Vec<_> = self.schema.get_types().collect(); + let key_directive = metadata + .federation_spec_definition() + .key_directive_definition(&self.schema)?; + let external_directive = metadata + .federation_spec_definition() + .external_directive_definition(&self.schema)?; + + let mut to_remove = vec![]; + for ty in &types { + if !ty.is_composite_type() + || (!self.is_federation_type_extension(ty)? && !self.is_root_type_extension(ty)) + { + continue; + } + + let key_applications = ty.get_applied_directives(&self.schema, &key_directive.name); + if !key_applications.is_empty() { + for directive in key_applications { + let args = metadata + .federation_spec_definition() + .key_directive_arguments(directive)?; + for field in collect_target_fields_from_field_set( + Valid::assume_valid_ref(self.schema.schema()), + ty.type_name().clone(), + args.fields, + false, + )? { + let external = + field.get_applied_directives(&self.schema, &external_directive.name); + if !external.is_empty() { + to_remove.push((field.clone(), external[0].clone())); + } + } + } + } else { + // ... but if the extension does _not_ have a key, then if the extension has a field that is + // part of the _1st_ key on the subgraph owning the type, then this field is not considered + // external (yes, it's pretty damn random, and it's even worst in that even if the extension + // does _not_ have the "field of the _1st_ key on the subgraph owning the type", then the + // query planner will still request it to the subgraph, generating an invalid query; but + // we ignore that here). Note however that because other subgraphs may have already been + // upgraded, we don't know which is the "type owner", so instead we look up at the first + // key of every other subgraph. It's not 100% what fed1 does, but we're in very-strange + // case territory in the first place, so this is probably good enough (that is, there is + // customer schema for which what we do here matter but not that I know of for which it's + // not good enough). + let Some(entries) = self.object_type_map.get(ty.type_name()) else { + continue; + }; + for (subgraph_name, info) in entries.iter() { + if subgraph_name == self.expanded_info.subgraph_name.as_str() { + continue; + } + let Some(other_schema) = self.get_subgraph_by_name(subgraph_name) else { + continue; + }; + let keys_in_other = info.pos.get_applied_directives( + other_schema.schema(), + &info + .metadata + .federation_spec_definition() + .key_directive_definition(other_schema.schema())? + .name, + ); + if keys_in_other.is_empty() { + continue; + } + let directive = keys_in_other[0]; + let args = metadata + .federation_spec_definition() + .key_directive_arguments(directive)?; + for field in collect_target_fields_from_field_set( + Valid::assume_valid_ref(self.schema.schema()), + ty.type_name().clone(), + args.fields, + false, + )? { + if TypeDefinitionPosition::from(field.parent()) != info.pos { + continue; + } + let external = + field.get_applied_directives(&self.schema, &external_directive.name); + if !external.is_empty() { + to_remove.push((field.clone(), external[0].clone())); + } + } + } + } + } + + for (pos, directive) in &to_remove { + pos.remove_directive(&mut self.schema, directive); + } + Ok(()) } fn fix_inactive_provides_and_requires(&mut self) -> Result<(), FederationError> { + let cloned_schema = self.schema.clone(); remove_inactive_requires_and_provides_from_subgraph( - self.original_subgraph.schema(), + &cloned_schema, // TODO: I don't know what this value should be &mut self.schema, ) } @@ -367,8 +628,9 @@ impl<'a> SchemaUpgrader<'a> { ) -> Result { let type_ = ty.get(self.schema.schema())?; let has_extend = self - .original_subgraph - .extends_directive_name()? + .expanded_info + .extends_directive_name + .as_ref() .is_some_and(|extends| type_.directives().has(extends.as_str())); Ok((Self::has_extension_elements(type_) || has_extend) && (type_.is_object() || type_.is_interface()) @@ -418,25 +680,26 @@ impl<'a> SchemaUpgrader<'a> { /// Whether the type is a root type but is declared only as an extension, which federation 1 actually accepts. fn is_root_type_extension(&self, pos: &TypeDefinitionPosition) -> bool { - if !matches!(pos, TypeDefinitionPosition::Object(_)) || !self.is_root_type(pos) { + if !matches!(pos, TypeDefinitionPosition::Object(_)) + || !Self::is_root_type(&self.schema, pos) + { return false; } let Ok(ty) = pos.get(self.schema.schema()) else { return false; }; let has_extends_directive = self - .original_subgraph - .extends_directive_name() - .ok() - .flatten() + .expanded_info + .extends_directive_name + .as_ref() .is_some_and(|extends| ty.directives().has(extends.as_str())); has_extends_directive || (Self::has_extension_elements(ty) && !Self::has_non_extension_elements(ty)) } - fn is_root_type(&self, ty: &TypeDefinitionPosition) -> bool { - self.schema + fn is_root_type(schema: &FederationSchema, ty: &TypeDefinitionPosition) -> bool { + schema .schema() .schema_definition .iter_root_operations() @@ -444,23 +707,23 @@ impl<'a> SchemaUpgrader<'a> { } fn remove_directives_on_interface(&mut self) -> Result<(), FederationError> { - if let Some(key) = self.original_subgraph.key_directive_name()? { + if let Some(key) = &self.expanded_info.key_directive_name { for pos in &self .schema .referencers() - .get_directive(&key)? + .get_directive(key)? .interface_types .clone() { - pos.remove_directive_name(&mut self.schema, &key); + pos.remove_directive_name(&mut self.schema, key); let fields: Vec<_> = pos.fields(self.schema.schema())?.collect(); for field in fields { - if let Some(provides) = self.original_subgraph.provides_directive_name()? { - field.remove_directive_name(&mut self.schema, &provides); + if let Some(provides) = &self.expanded_info.provides_directive_name { + field.remove_directive_name(&mut self.schema, provides); } - if let Some(requires) = self.original_subgraph.requires_directive_name()? { - field.remove_directive_name(&mut self.schema, &requires); + if let Some(requires) = &self.expanded_info.requires_directive_name { + field.remove_directive_name(&mut self.schema, requires); } } } @@ -510,11 +773,9 @@ impl<'a> SchemaUpgrader<'a> { for field in pos.fields(self.schema.schema())? { has_fields = true; let field_def = FieldDefinitionPosition::from(field.clone()); - if self - .original_subgraph - .metadata() - .is_field_external(&field_def) - && !self.original_subgraph.metadata().is_field_used(&field_def) + + let metadata = self.subgraph_metadata()?; + if metadata.is_field_external(&field_def) && !metadata.is_field_used(&field_def) { fields_to_remove.insert(field); } @@ -557,7 +818,103 @@ impl<'a> SchemaUpgrader<'a> { } fn add_shareable(&mut self) -> Result<(), FederationError> { - todo!(); + let Some(metadata) = &self.schema.subgraph_metadata else { + return Ok(()); + }; + + let Some(key_directive_name) = &self.expanded_info.key_directive_name else { + return Ok(()); + }; + + let shareable_directive_name = metadata + .federation_spec_definition() + .shareable_directive_definition(&self.schema)? + .name + .clone(); + + let mut fields_to_add_shareable = vec![]; + let mut types_to_add_shareable = vec![]; + for type_pos in self.schema.get_types() { + let has_key_directive = + type_pos.has_applied_directive(&self.schema, key_directive_name); + let is_root_type = Self::is_root_type(&self.schema, &type_pos); + let TypeDefinitionPosition::Object(obj_pos) = type_pos else { + continue; + }; + let obj_name = &obj_pos.type_name; + // Skip Subscription root type - no shareable needed + if obj_pos.type_name == GRAPHQL_SUBSCRIPTION_TYPE_NAME { + continue; + } + if has_key_directive || is_root_type { + for field in obj_pos.fields(self.schema.schema())? { + let obj_field = FieldDefinitionPosition::Object(field.clone()); + if metadata.is_field_shareable(&obj_field) { + continue; + } + let Some(entries) = self.object_type_map.get(obj_name) else { + continue; + }; + let type_in_other_subgraphs = entries.iter().any(|(subgraph_name, info)| { + let field_exists = self + .get_subgraph_by_name(subgraph_name) + .unwrap() + .schema() + .schema() + .type_field(&field.type_name, &field.field_name) + .is_ok(); + + if (subgraph_name != self.expanded_info.subgraph_name.as_str()) + && field_exists + && (!info.metadata.is_field_external(&obj_field) + || info.metadata.is_field_partially_external(&obj_field)) + { + return true; + } + false + }); + if type_in_other_subgraphs + && !obj_field.has_applied_directive(&self.schema, &shareable_directive_name) + { + fields_to_add_shareable.push(field.clone()); + } + } + } else { + let Some(entries) = self.object_type_map.get(obj_name) else { + continue; + }; + let type_in_other_subgraphs = entries.iter().any(|(subgraph_name, _info)| { + if subgraph_name != self.expanded_info.subgraph_name.as_str() { + return true; + } + false + }); + if type_in_other_subgraphs + && !obj_pos.has_applied_directive(&self.schema, &shareable_directive_name) + { + types_to_add_shareable.push(obj_pos.clone()); + } + } + } + for pos in &fields_to_add_shareable { + pos.insert_directive( + &mut self.schema, + Node::new(Directive { + name: shareable_directive_name.clone(), + arguments: vec![], + }), + )?; + } + for pos in &types_to_add_shareable { + pos.insert_directive( + &mut self.schema, + Component::new(Directive { + name: shareable_directive_name.clone(), + arguments: vec![], + }), + )?; + } + Ok(()) } fn remove_tag_on_external(&mut self) -> Result<(), FederationError> { @@ -568,7 +925,7 @@ impl<'a> SchemaUpgrader<'a> { applications .iter() .try_for_each(|application| -> Result<(), FederationError> { - if let Ok(application) = (*application).as_ref() { + if let Ok(application) = application { if let Ok(target) = FieldDefinitionPosition::try_from(application.target.clone()) { if metadata .external_metadata() @@ -576,8 +933,8 @@ impl<'a> SchemaUpgrader<'a> { { let used_in_other_definitions = self.subgraphs.iter().fallible_any( - |subgraph| -> Result { - if self.original_subgraph.name != subgraph.name { + |(name, subgraph)| -> Result { + if &self.expanded_info.subgraph_name != name { // check to see if the field is external in the other subgraphs if let Some(other_metadata) = &subgraph.schema().subgraph_metadata @@ -617,7 +974,7 @@ impl<'a> SchemaUpgrader<'a> { if used_in_other_definitions? { // remove @tag to_delete.push(( - target.clone(), + target, application.directive.clone(), )); } @@ -648,12 +1005,11 @@ impl<'a> SchemaUpgrader<'a> { mod tests { use super::*; - const FEDERATION2_LINK_WITH_AUTO_EXPANDED_IMPORTS_UPGRADED: &str = r#"@link(url: "https://specs.apollo.dev/federation/v2.4", import: ["@key", "@requires", "@provides", "@external", "@tag", "@extends", "@shareable", "@inaccessible", "@override", "@composeDirective", "@interfaceObject"])"#; + const FEDERATION2_LINK_WITH_AUTO_EXPANDED_IMPORTS_UPGRADED: &str = r#"@link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.4", import: ["@key", "@requires", "@provides", "@external", "@tag", "@extends", "@shareable", "@inaccessible", "@override", "@composeDirective", "@interfaceObject"])"#; - #[ignore = "not yet implemented"] #[test] fn upgrades_complex_schema() { - let mut s1 = Subgraph::parse( + let s1 = Subgraph::parse( "s1", "", r#" @@ -675,7 +1031,7 @@ mod tests { # A type with a genuine 'graphqQL' extension, to ensure the extend don't get removed. type Random { - x: Int @provides(fields: "x") + x: Int } extend type Random { @@ -689,7 +1045,7 @@ mod tests { // Note that no changes are really expected on that 2nd schema: it is just there to make the example not throw due to // then Product type extension having no "base". - let mut s2 = Subgraph::parse( + let s2 = Subgraph::parse( "s2", "", r#" @@ -704,53 +1060,92 @@ mod tests { .expand_links() .expect("expands schema"); - upgrade_subgraphs_if_necessary(&[&mut s1, &mut s2]).expect("upgrades schema"); + let [s1, _s2]: [Subgraph; 2] = upgrade_subgraphs_if_necessary(vec![s1, s2]) + .expect("upgrades schema") + .try_into() + .expect("Expected 2 elements"); insta::assert_snapshot!( - s1.schema().schema().to_string(), - r#" - schema - FEDERATION2_LINK_WITH_AUTO_EXPANDED_IMPORTS_UPGRADED - { - query: Query + s1.schema().schema().to_string(), @r###" + schema @link(url: "https://specs.apollo.dev/federation/v2.4", import: ["@key", "@requires", "@provides", "@external", "@shareable", "@override", "@tag", "@composeDirective", "@interfaceObject"]) @link(url: "https://specs.apollo.dev/link/v1.0") { + query: Query } + directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + + directive @key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + + directive @requires(fields: federation__FieldSet!) on FIELD_DEFINITION + + directive @provides(fields: federation__FieldSet!) on FIELD_DEFINITION + + directive @external(reason: String) on OBJECT | FIELD_DEFINITION + + directive @shareable repeatable on OBJECT | FIELD_DEFINITION + + directive @override(from: String!) on FIELD_DEFINITION + + directive @tag repeatable on ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + + directive @composeDirective(name: String!) repeatable on SCHEMA + + directive @interfaceObject on OBJECT + type Query { - products: [Product!]! @provides(fields: "description") + products: [Product!]! @provides(fields: "description") + _entities(representations: [_Any!]!): [_Entity]! @shareable + _service: _Service! @shareable } interface I { - upc: ID! - description: String - } - - type Product implements I - @key(fields: "upc") - { - upc: ID! - inventory: Int - description: String @external + upc: ID! + description: String } type Random { - x: Int + x: Int } extend type Random { - y: Int + y: Int } - "# - .replace( - "FEDERATION2_LINK_WITH_AUTO_EXPANDED_IMPORTS_UPGRADED", - FEDERATION2_LINK_WITH_AUTO_EXPANDED_IMPORTS_UPGRADED - ) + + type Product implements I @key(fields: "upc") { + upc: ID! + inventory: Int + description: String @external + } + + enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION + } + + scalar link__Import + + scalar federation__FieldSet + + scalar _Any + + type _Service @shareable { + sdl: String + } + + union _Entity = Product + "### ); } - #[ignore = "not yet implemented"] #[test] + #[ignore] fn update_federation_directive_non_string_arguments() { - let mut s = Subgraph::parse( + let s = Subgraph::parse( "s", "", r#" @@ -768,7 +1163,10 @@ mod tests { .expand_links() .expect("expands schema"); - upgrade_subgraphs_if_necessary(&[&mut s]).expect("upgrades schema"); + let [s]: [Subgraph; 1] = upgrade_subgraphs_if_necessary(vec![s]) + .expect("upgrades schema") + .try_into() + .expect("Expected 1 element"); insta::assert_snapshot!( s.schema().schema().to_string(), @@ -795,10 +1193,9 @@ mod tests { ); } - #[ignore = "not yet implemented"] #[test] fn remove_tag_on_external_field_if_found_on_definition() { - let mut s1 = Subgraph::parse( + let s1 = Subgraph::parse( "s1", "", r#" @@ -817,7 +1214,7 @@ mod tests { .expand_links() .expect("expands schema"); - let mut s2 = Subgraph::parse( + let s2 = Subgraph::parse( "s2", "", r#" @@ -831,10 +1228,27 @@ mod tests { .expand_links() .expect("expands schema"); - upgrade_subgraphs_if_necessary(&[&mut s1, &mut s2]).expect("upgrades schema"); + let [s1, s2]: [Subgraph; 2] = upgrade_subgraphs_if_necessary(vec![s1, s2]) + .expect("upgrades schema") + .try_into() + .expect("Expected 2 elements"); - let type_a_in_s1 = s1.schema().schema().get_object("A").unwrap(); - let type_a_in_s2 = s2.schema().schema().get_object("A").unwrap(); + let type_a_in_s1 = s1 + .schema() + .schema() + .get_object("A") + .unwrap() + .fields + .get("y") + .unwrap(); + let type_a_in_s2 = s2 + .schema() + .schema() + .get_object("A") + .unwrap() + .fields + .get("y") + .unwrap(); assert_eq!(type_a_in_s1.directives.get_all("tag").count(), 0); assert_eq!( @@ -847,7 +1261,6 @@ mod tests { ); } - #[ignore = "not yet implemented"] #[test] fn reject_interface_object_usage_if_not_all_subgraphs_are_fed2() { // Note that this test both validates the rejection of fed1 subgraph when @interfaceObject is used somewhere, but also @@ -856,7 +1269,7 @@ mod tests { // work, it would be really confusing to not reject the example below right away, since it "looks" like it the @key on // the interface in the 2nd subgraph should work, but it actually won't. - let mut s1 = Subgraph::parse("s1", "", r#" + let s1 = Subgraph::parse("s1", "", r#" extend schema @link(url: "https://specs.apollo.dev/federation/v2.3", import: [ "@key", "@interfaceObject"]) @@ -873,7 +1286,7 @@ mod tests { .expand_links() .expect("expands schema"); - let mut s2 = Subgraph::parse( + let s2 = Subgraph::parse( "s2", "", r#" @@ -892,18 +1305,19 @@ mod tests { .expand_links() .expect("expands schema"); - let errors = upgrade_subgraphs_if_necessary(&[&mut s1, &mut s2]).expect_err("should fail"); + let errors = upgrade_subgraphs_if_necessary(vec![s1, s2]).expect_err("should fail"); assert_eq!( errors.to_string(), - r#"The @interfaceObject directive can only be used if all subgraphs have federation 2 subgraph schema (schema with a `@link` to "https://specs.apollo.dev/federation" version 2.0 or newer): @interfaceObject is used in subgraph "s1" but subgraph "s2" is not a federation 2 subgraph schema."# + r#"An internal error has occurred, please report this bug to Apollo. + +Details: The @interfaceObject directive can only be used if all subgraphs have federation 2 subgraph schema (schema with a `@link` to "https://specs.apollo.dev/federation" version 2.0 or newer): @interfaceObject is used in subgraph "s1" but subgraph "s2" is not a federation 2 subgraph schema."# ); } - #[ignore = "not yet implemented"] #[test] fn handles_addition_of_shareable_when_external_is_used_on_type() { - let mut s1 = Subgraph::parse( + let s1 = Subgraph::parse( "s1", "", r#" @@ -921,7 +1335,7 @@ mod tests { .expand_links() .expect("expands schema"); - let mut s2 = Subgraph::parse( + let s2 = Subgraph::parse( "s2", "", r#" @@ -938,12 +1352,14 @@ mod tests { .expand_links() .expect("expands schema"); - upgrade_subgraphs_if_necessary(&[&mut s1, &mut s2]).expect("upgrades schema"); + let [s1, s2]: [Subgraph; 2] = upgrade_subgraphs_if_necessary(vec![s1, s2]) + .expect("upgrades schema") + .try_into() + .expect("Expected 2 elements"); // 2 things must happen here: // 1. the @external on type `T` in s2 should be removed, as @external on types were no-ops in fed1 (but not in fed2 anymore, hence the removal) // 2. field `T.x` in s1 must be marked @shareable since it is resolved by s2 (since again, it's @external annotation is ignored). - assert!( s2.schema() .schema() @@ -959,10 +1375,9 @@ mod tests { ); } - #[ignore = "not yet implemented"] #[test] fn fully_upgrades_schema_with_no_link_directives() { - let mut subgraph = Subgraph::parse( + let subgraph = Subgraph::parse( "subgraph", "", r#" @@ -975,7 +1390,10 @@ mod tests { .expand_links() .expect("expands schema"); - upgrade_subgraphs_if_necessary(&[&mut subgraph]).expect("upgrades schema"); + let [subgraph]: [Subgraph; 1] = upgrade_subgraphs_if_necessary(vec![subgraph]) + .expect("upgrades schema") + .try_into() + .expect("Expected 1 element"); // Note: this test mostly exists for dev awareness. By design, this will // always require updating when the fed spec version is updated, so hopefully // you're reading this comment. Existing schemas which don't include a @link @@ -995,21 +1413,63 @@ mod tests { // - Trevor insta::assert_snapshot!( subgraph.schema().schema().to_string(), - r#" - schema - @link(url: "https://specs.apollo.dev/link/v1.0") - @link(url: "https://specs.apollo.dev/federation/v2.4", import: ["@key", "@requires", "@provides", "@external", "@tag", "@extends", "@shareable", "@inaccessible", "@override", "@composeDirective", "@interfaceObject"]) - { - query: Query + @r###" + schema @link(url: "https://specs.apollo.dev/federation/v2.4", import: ["@key", "@requires", "@provides", "@external", "@shareable", "@override", "@tag", "@composeDirective", "@interfaceObject"]) @link(url: "https://specs.apollo.dev/link/v1.0") { + query: Query } - "# + + directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + + directive @key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + + directive @requires(fields: federation__FieldSet!) on FIELD_DEFINITION + + directive @provides(fields: federation__FieldSet!) on FIELD_DEFINITION + + directive @external(reason: String) on OBJECT | FIELD_DEFINITION + + directive @shareable repeatable on OBJECT | FIELD_DEFINITION + + directive @override(from: String!) on FIELD_DEFINITION + + directive @tag repeatable on ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + + directive @composeDirective(name: String!) repeatable on SCHEMA + + directive @interfaceObject on OBJECT + + type Query { + hello: String + _service: _Service! + } + + enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION + } + + scalar link__Import + + scalar federation__FieldSet + + scalar _Any + + type _Service { + sdl: String + } + "### ); } - #[ignore = "not yet implemented"] #[test] fn does_not_add_shareable_to_subscriptions() { - let mut subgraph1 = Subgraph::parse( + let subgraph1 = Subgraph::parse( "subgraph1", "", r#" @@ -1026,7 +1486,7 @@ mod tests { .expand_links() .expect("expands schema"); - let mut subgraph2 = Subgraph::parse( + let subgraph2 = Subgraph::parse( "subgraph2", "", r#" @@ -1043,8 +1503,11 @@ mod tests { .expand_links() .expect("expands schema"); - upgrade_subgraphs_if_necessary(&[&mut subgraph1, &mut subgraph2]).expect("upgrades schema"); - + let [subgraph1, subgraph2]: [Subgraph; 2] = + upgrade_subgraphs_if_necessary(vec![subgraph1, subgraph2]) + .expect("upgrades schema") + .try_into() + .expect("Expected 2 elements"); assert!( !subgraph1 .schema() diff --git a/apollo-federation/src/schema/subgraph_metadata.rs b/apollo-federation/src/schema/subgraph_metadata.rs index 44e689d415..932fc7a05c 100644 --- a/apollo-federation/src/schema/subgraph_metadata.rs +++ b/apollo-federation/src/schema/subgraph_metadata.rs @@ -173,7 +173,7 @@ impl SubgraphMetadata { for requires_directive in applications.into_iter().filter_map(|d| d.ok()) { required_fields.extend(collect_target_fields_from_field_set( unwrap_schema(schema), - requires_directive.target.type_name.clone(), + requires_directive.target.type_name().clone(), requires_directive.arguments.fields, false, )?); diff --git a/apollo-federation/src/schema/type_and_directive_specification.rs b/apollo-federation/src/schema/type_and_directive_specification.rs index c92af72f73..136ed0d790 100644 --- a/apollo-federation/src/schema/type_and_directive_specification.rs +++ b/apollo-federation/src/schema/type_and_directive_specification.rs @@ -287,7 +287,7 @@ impl TypeAndDirectiveSpecification for UnionTypeSpecification { } let type_pos = UnionTypeDefinitionPosition { - type_name: actual_name.clone(), + type_name: actual_name, }; type_pos.pre_insert(schema)?; type_pos.insert( @@ -339,22 +339,22 @@ impl TypeAndDirectiveSpecification for EnumTypeSpecification { .iter() .map(|val| val.0.clone()) .collect(); - let actual_value_set: IndexSet = + let expected_value_set: IndexSet = self.values.iter().map(|val| val.name.clone()).collect(); - if existing_value_set != actual_value_set { + if existing_value_set != expected_value_set { let enum_type_name = &self.name; - let expected_value_names: Vec = existing_value_set + let expected_value_names: Vec = expected_value_set .iter() .sorted_by(|a, b| a.cmp(b)) .map(|name| name.to_string()) .collect(); - let actual_value_names: Vec = actual_value_set + let actual_value_names: Vec = existing_value_set .iter() .sorted_by(|a, b| a.cmp(b)) .map(|name| name.to_string()) .collect(); return Err(SingleFederationError::TypeDefinitionInvalid { - message: format!("Invalid definition of type {enum_type_name}: expected values [{}] but found [{}].", + message: format!(r#"Invalid definition for type "{enum_type_name}": expected values [{}] but found [{}]."#, expected_value_names.join(", "), actual_value_names.join(", ")) }.into()); } @@ -599,11 +599,9 @@ impl TypeAndDirectiveSpecification for DirectiveSpecification { ////////////////////////////////////////////////////////////////////////////// // Helper functions for TypeSpecification implementations // Argument naming conventions: -// - `expected`: the expected definition either by the Federation assumption or the existing -// definition in the schema. -// - `actual`: the definition from the TypeAndDirectiveSpecification, which is being checked. -// PORT_NOTE: The JS code uses the terms `actual`, `expected` and `existing` slightly differently. -// But, the new convention seems easier to understand. +// - `existing` or `actual`: the existing definition as defined in the schema. +// - `expected`: the expected definition either by the Federation assumption or from the +// TypeAndDirectiveSpecification. // TODO: Consider moving this to the schema module. #[derive(Clone, PartialEq, Eq, Hash, derive_more::Display)] @@ -705,8 +703,8 @@ fn default_value_message(value: Option<&Value>) -> String { } fn ensure_same_arguments( - expected: &[Node], - actual: &[ResolvedArgumentSpecification], + expected: &[ResolvedArgumentSpecification], + actual: &[Node], schema: &FederationSchema, what: &str, generate_error: fn(&str) -> SingleFederationError, @@ -730,7 +728,7 @@ fn ensure_same_arguments( // ensure expected argument and actual argument have the same type. // TODO: Make it easy to get a cloned (inner) type from a Node. - let mut actual_type = actual_arg.ty.clone(); + let mut actual_type = actual_arg.ty.as_ref().clone(); if actual_type.is_non_null() && !expected_arg.ty.is_non_null() { // It's ok to redefine an optional argument as mandatory. For instance, if you want to force people on your team to provide a "deprecation reason", you can // redefine @deprecated as `directive @deprecated(reason: String!)...` to get validation. In other words, you are allowed to always pass an argument that @@ -739,22 +737,22 @@ fn ensure_same_arguments( } // ensure argument type is compatible with the expected one and // argument's default value (if any) is compatible with the expected one - if *expected_arg.ty != actual_type - && is_valid_input_type_redefinition(&expected_arg.ty, &actual_type, schema) + if expected_arg.ty != actual_type + && !is_valid_input_type_redefinition(&expected_arg.ty, &actual_type, schema) { let arg_name = &expected_arg.name; let expected_type = &expected_arg.ty; errors.push(generate_error(&format!( - r#"Invalid definition for {what}: Argument "{arg_name}" should have type {expected_type} but found type {actual_type}"# + r#"Invalid definition for {what}: argument "{arg_name}" should have type "{expected_type}" but found type "{actual_type}""# ))); } else if !actual_type.is_non_null() - && expected_arg.default_value.as_deref() != actual_arg.default_value.as_ref() + && expected_arg.default_value.as_ref() != actual_arg.default_value.as_deref() { let arg_name = &expected_arg.name; - let expected_value = default_value_message(expected_arg.default_value.as_deref()); - let actual_value = default_value_message(actual_arg.default_value.as_ref()); + let expected_value = default_value_message(expected_arg.default_value.as_ref()); + let actual_value = default_value_message(actual_arg.default_value.as_deref()); errors.push(generate_error(&format!( - r#"Invalid definition for {what}: Argument "{arg_name}" should have {expected_value} but found {actual_value}"# + r#"Invalid definition for {what}: argument "{arg_name}" should have {expected_value} but found {actual_value}"# ))); } } @@ -774,23 +772,26 @@ fn ensure_same_arguments( errors } +// The `existing_obj_type` is the definition that is defined in the schema. +// And the `expected_fields` are the expected fields from the specification. +// The existing (= actual) field definitions must be compatible with the expected ones. fn ensure_same_fields( existing_obj_type: &ObjectType, - actual_fields: &[FieldSpecification], + expected_fields: &[FieldSpecification], schema: &FederationSchema, ) -> Vec { let obj_type_name = existing_obj_type.name.clone(); let mut errors = vec![]; - // ensure all actual fields are a subset of the existing object type's fields. - for actual_field_def in actual_fields { - let actual_field_name = &actual_field_def.name; - let expected_field = existing_obj_type.fields.get(actual_field_name); - let Some(expected_field) = expected_field else { + // ensure all expected fields are a subset of the existing object type's fields. + for expected_field_def in expected_fields { + let field_name = &expected_field_def.name; + let existing_field = existing_obj_type.fields.get(field_name); + let Some(existing_field) = existing_field else { errors.push(SingleFederationError::TypeDefinitionInvalid { message: format!( "Invalid definition of type {}: missing field {}", - obj_type_name, actual_field_name + obj_type_name, field_name ), }); continue; @@ -800,23 +801,23 @@ fn ensure_same_fields( // We allow adding non-nullability because we've seen redefinition of the federation // _Service type with type String! for the `sdl` field and we don't want to break backward // compatibility as this doesn't feel too harmful. - let mut expected_field_type = expected_field.ty.clone(); - if !actual_field_def.ty.is_non_null() && expected_field_type.is_non_null() { - expected_field_type = expected_field_type.nullable(); + let mut existing_field_type = existing_field.ty.clone(); + if !expected_field_def.ty.is_non_null() && existing_field_type.is_non_null() { + existing_field_type = existing_field_type.nullable(); } - if actual_field_def.ty != expected_field_type { - let actual_field_type = &actual_field_def.ty; + if expected_field_def.ty != existing_field_type { + let expected_field_type = &expected_field_def.ty; errors.push(SingleFederationError::TypeDefinitionInvalid { - message: format!("Invalid definition for field {actual_field_name} of type {obj_type_name}: should have type {expected_field_type} but found type {actual_field_type}") + message: format!("Invalid definition for field {field_name} of type {obj_type_name}: should have type {expected_field_type} but found type {existing_field_type}") }); } // ensure field arguments are as expected let mut arg_errors = ensure_same_arguments( - &expected_field.arguments, - &actual_field_def.arguments, + &expected_field_def.arguments, + &existing_field.arguments, schema, - &format!(r#"field "{}.{}""#, obj_type_name, expected_field.name), + &format!(r#"field "{}.{}""#, obj_type_name, existing_field.name), |s| SingleFederationError::TypeDefinitionInvalid { message: s.to_string(), }, @@ -827,6 +828,9 @@ fn ensure_same_fields( errors } +// The `existing_directive` is the definition that is defined in the schema. +// And the rest of arguments are the expected directive definition from the specification. +// The existing (= actual) definition must be compatible with the expected one. fn ensure_same_directive_structure( existing_directive: &DirectiveDefinition, name: &Name, @@ -837,20 +841,20 @@ fn ensure_same_directive_structure( ) -> Result<(), FederationError> { let directive_name = format!("@{name}"); let mut arg_errors = ensure_same_arguments( - &existing_directive.arguments, args, + &existing_directive.arguments, schema, - &format!(r#"directive {directive_name}"#), + &format!(r#"directive "{directive_name}""#), |s| SingleFederationError::DirectiveDefinitionInvalid { message: s.to_string(), }, ); // It's ok to say you'll never repeat a repeatable directive. It's not ok to repeat one that isn't. - if !existing_directive.repeatable && repeatable { + if existing_directive.repeatable && !repeatable { arg_errors.push(SingleFederationError::DirectiveDefinitionInvalid { message: format!( - "Invalid definition for directive {directive_name}: {directive_name} should not be repeatable" + r#"Invalid definition for directive "{directive_name}": "{directive_name}" should not be repeatable"# ), }); } @@ -858,11 +862,12 @@ fn ensure_same_directive_structure( // Similarly, it's ok to say that you will never use a directive in some locations, but not that // you will use it in places not allowed by what is expected. // Ensure `locations` is a subset of `existing_directive.locations`. - if !locations + if !existing_directive + .locations .iter() - .all(|loc| existing_directive.locations.contains(loc)) + .all(|loc| locations.contains(loc)) { - let actual_locations: Vec = locations.iter().map(|loc| loc.to_string()).collect(); + let expected_locations: Vec = locations.iter().map(|loc| loc.to_string()).collect(); let existing_locations: Vec = existing_directive .locations .iter() @@ -870,8 +875,8 @@ fn ensure_same_directive_structure( .collect(); arg_errors.push(SingleFederationError::DirectiveDefinitionInvalid { message: format!( - "Invalid definition for directive {directive_name}: {directive_name} should have locations [{}] but found [{}]", - existing_locations.join(", "), actual_locations.join(", ") + r#"Invalid definition for directive "{directive_name}": "{directive_name}" should have locations {}, but found (non-subset) {}"#, + expected_locations.join(", "), existing_locations.join(", ") ), }); } diff --git a/apollo-federation/src/schema/validators/cost.rs b/apollo-federation/src/schema/validators/cost.rs new file mode 100644 index 0000000000..8b7c813364 --- /dev/null +++ b/apollo-federation/src/schema/validators/cost.rs @@ -0,0 +1,30 @@ +use crate::error::FederationError; +use crate::error::MultipleFederationErrors; +use crate::error::SingleFederationError; +use crate::link::cost_spec_definition::CostSpecDefinition; +use crate::schema::FederationSchema; + +pub(crate) fn validate_cost_directives( + schema: &FederationSchema, + errors: &mut MultipleFederationErrors, +) -> Result<(), FederationError> { + let Some(cost_directive_name) = CostSpecDefinition::cost_directive_name(schema)? else { + return Ok(()); + }; + let Ok(cost_directive_referencers) = schema + .referencers() + .get_directive(cost_directive_name.as_str()) + else { + // This just returns an Err if the directive is not found, which is fine in this case. + return Ok(()); + }; + for interface_field in &cost_directive_referencers.interface_fields { + errors + .errors + .push(SingleFederationError::CostAppliedToInterfaceField { + interface: interface_field.type_name.clone(), + field: interface_field.field_name.clone(), + }); + } + Ok(()) +} diff --git a/apollo-federation/src/schema/validators/external.rs b/apollo-federation/src/schema/validators/external.rs new file mode 100644 index 0000000000..e08de4e8bb --- /dev/null +++ b/apollo-federation/src/schema/validators/external.rs @@ -0,0 +1,72 @@ +// the `@external` directive validation + +use crate::error::FederationError; +use crate::error::MultipleFederationErrors; +use crate::error::SingleFederationError; +use crate::schema::FederationSchema; +use crate::schema::position::InterfaceTypeDefinitionPosition; +use crate::schema::position::ObjectOrInterfaceTypeDefinitionPosition; +use crate::schema::subgraph_metadata::SubgraphMetadata; + +pub(crate) fn validate_external_directives( + schema: &FederationSchema, + metadata: &SubgraphMetadata, + errors: &mut MultipleFederationErrors, +) -> Result<(), FederationError> { + validate_no_external_on_interface_fields(schema, metadata, errors)?; + validate_all_external_fields_used(schema, metadata, errors)?; + Ok(()) +} + +fn validate_no_external_on_interface_fields( + schema: &FederationSchema, + metadata: &SubgraphMetadata, + errors: &mut MultipleFederationErrors, +) -> Result<(), FederationError> { + for type_name in schema.referencers().interface_types.keys() { + let type_pos: InterfaceTypeDefinitionPosition = + schema.get_type(type_name.clone())?.try_into()?; + for field_pos in type_pos.fields(schema.schema())? { + let is_external = metadata + .external_metadata() + .is_external(&field_pos.clone().into()); + if is_external { + errors.push(SingleFederationError::ExternalOnInterface { + message: format!( + r#"Interface type field "{field_pos}" is marked @external but @external is not allowed on interface fields."# + ), + }.into()) + } + } + } + Ok(()) +} + +// Checks that all fields marked @external is used in a federation directive (@key, @provides or +// @requires) _or_ to satisfy an interface implementation. Otherwise, the field declaration is +// somewhat useless. +fn validate_all_external_fields_used( + schema: &FederationSchema, + metadata: &SubgraphMetadata, + errors: &mut MultipleFederationErrors, +) -> Result<(), FederationError> { + for type_pos in schema.get_types() { + let Ok(type_pos): Result = type_pos.try_into() + else { + continue; + }; + type_pos.fields(schema.schema())? + .for_each(|field| { + let field = field.into(); + if !metadata.is_field_external(&field) || metadata.is_field_used(&field) { + return; + } + errors.push(SingleFederationError::ExternalUnused { + message: format!( + r#"Field "{field}" is marked @external but is not used in any federation directive (@key, @provides, @requires) or to satisfy an interface; the field declaration has no use and should be removed (or the field should not be @external)."# + ), + }.into()); + }); + } + Ok(()) +} diff --git a/apollo-federation/src/schema/validators/key.rs b/apollo-federation/src/schema/validators/key.rs index 20479ff640..b903d27a99 100644 --- a/apollo-federation/src/schema/validators/key.rs +++ b/apollo-federation/src/schema/validators/key.rs @@ -7,15 +7,26 @@ use itertools::Itertools; use crate::error::FederationError; use crate::error::MultipleFederationErrors; use crate::error::SingleFederationError; +use crate::link::federation_spec_definition::FEDERATION_KEY_DIRECTIVE_NAME_IN_SPEC; +use crate::link::spec::Version; +use crate::link::spec_definition::SpecDefinition; use crate::schema::FederationSchema; use crate::schema::HasFields; +use crate::schema::subgraph_metadata::SubgraphMetadata; use crate::schema::validators::DenyFieldsWithDirectiveApplications; use crate::schema::validators::SchemaFieldSetValidator; +use crate::schema::validators::deny_unsupported_directive_on_interface_type; pub(crate) fn validate_key_directives( schema: &FederationSchema, + meta: &SubgraphMetadata, errors: &mut MultipleFederationErrors, ) -> Result<(), FederationError> { + let directive_name = meta + .federation_spec_definition() + .directive_name_in_schema(schema, &FEDERATION_KEY_DIRECTIVE_NAME_IN_SPEC)? + .unwrap_or(FEDERATION_KEY_DIRECTIVE_NAME_IN_SPEC); + let fieldset_rules: Vec> = vec![ Box::new(DenyUnionAndInterfaceFields::new(schema.schema())), Box::new(DenyAliases::new()), @@ -23,16 +34,29 @@ pub(crate) fn validate_key_directives( Box::new(DenyFieldsWithArguments::new()), ]; + let allow_on_interface = + meta.federation_spec_definition().version() >= &Version { major: 2, minor: 3 }; + for key_directive in schema.key_directive_applications()? { match key_directive { - Ok(key) => match key.parse_fields(schema.schema()) { - Ok(fields) => { - for rule in fieldset_rules.iter() { - rule.visit(key.target.type_name(), &fields, errors); + Ok(key) => { + if !allow_on_interface { + deny_unsupported_directive_on_interface_type( + &directive_name, + &key, + schema, + errors, + ); + } + match key.parse_fields(schema.schema()) { + Ok(fields) => { + for rule in fieldset_rules.iter() { + rule.visit(key.target.type_name(), &fields, errors); + } } + Err(e) => errors.push(e.into()), } - Err(e) => errors.push(e.into()), - }, + } Err(e) => errors.push(e), } } diff --git a/apollo-federation/src/schema/validators/list_size.rs b/apollo-federation/src/schema/validators/list_size.rs new file mode 100644 index 0000000000..fb0da3caca --- /dev/null +++ b/apollo-federation/src/schema/validators/list_size.rs @@ -0,0 +1,154 @@ +use apollo_compiler::schema::ExtendedType; +use apollo_compiler::ty; + +use crate::error::FederationError; +use crate::error::MultipleFederationErrors; +use crate::error::SingleFederationError; +use crate::schema::FederationSchema; +use crate::schema::ListSizeDirective; + +pub(crate) fn validate_list_size_directives( + schema: &FederationSchema, + errors: &mut MultipleFederationErrors, +) -> Result<(), FederationError> { + for list_size_directive in schema.list_size_directive_applications()? { + match list_size_directive { + Ok(list_size) => { + validate_applied_to_list(&list_size, errors); + validate_assumed_size_not_negative(&list_size, errors); + validate_slicing_arguments_are_valid_integers(&list_size, errors); + validate_sized_fields_are_valid_lists(schema, &list_size, errors); + } + Err(e) => errors.push(e), + } + } + Ok(()) +} + +/// Validate that `@listSize` is only applied to lists per +/// https://ibm.github.io/graphql-specs/cost-spec.html#sec-Valid-List-Size-Target +fn validate_applied_to_list(list_size: &ListSizeDirective, errors: &mut MultipleFederationErrors) { + let has_sized_fields = list_size + .directive + .sized_fields + .as_ref() + .is_some_and(|s| !s.is_empty()); + if !has_sized_fields && !list_size.target.ty.is_list() { + errors + .errors + .push(SingleFederationError::ListSizeAppliedToNonList { + message: format!( + "\"{}.{}\" is not a list", + list_size.parent_type, list_size.target.name + ), + }); + } +} + +/// Validate assumed size, but we differ from https://ibm.github.io/graphql-specs/cost-spec.html#sec-Valid-Assumed-Size. +/// Assumed size is used as a backup for slicing arguments in the event they are both specified. +/// The spec aims to rule out cases when the assumed size will never be used because there is always +/// a slicing argument. Two applications which are compliant with that validation rule can be merged +/// into an application which is not compliant, thus we need to handle this case gracefully at runtime regardless. +/// We omit this check to keep the validations to those that will otherwise cause runtime failures. +/// +/// With all that said, assumed size should not be negative. +fn validate_assumed_size_not_negative( + list_size: &ListSizeDirective, + errors: &mut MultipleFederationErrors, +) { + if let Some(size) = list_size.directive.assumed_size { + if size < 0 { + errors + .errors + .push(SingleFederationError::ListSizeInvalidAssumedSize { + message: format!( + "Assumed size of \"{}.{}\" cannot be negative", + list_size.parent_type, list_size.target.name + ), + }); + } + } +} + +/// Validate `slicingArguments` select valid integer arguments on the target type per +/// https://ibm.github.io/graphql-specs/cost-spec.html#sec-Valid-Slicing-Arguments-Target +fn validate_slicing_arguments_are_valid_integers( + list_size: &ListSizeDirective, + errors: &mut MultipleFederationErrors, +) { + let Some(slicing_argument_names) = list_size.directive.slicing_argument_names.as_ref() else { + return; + }; + for arg_name in slicing_argument_names { + if let Some(slicing_argument) = list_size.target.argument_by_name(arg_name.as_str()) { + if *slicing_argument.ty != ty!(Int) && *slicing_argument.ty != ty!(Int!) { + errors + .errors + .push(SingleFederationError::ListSizeInvalidSlicingArgument { + message: format!( + "Slicing argument \"{}.{}({}:)\" must be Int or Int!", + list_size.parent_type, list_size.target.name, arg_name, + ), + }); + } + } else { + errors + .errors + .push(SingleFederationError::ListSizeInvalidSlicingArgument { + message: format!( + "Slicing argument \"{arg_name}\" is not an argument of \"{}.{}\"", + list_size.parent_type, list_size.target.name + ), + }); + } + } +} + +/// Validate `sizedFields` select valid list fields on the target type per +/// https://ibm.github.io/graphql-specs/cost-spec.html#sec-Valid-Sized-Fields-Target +fn validate_sized_fields_are_valid_lists( + schema: &FederationSchema, + list_size: &ListSizeDirective, + errors: &mut MultipleFederationErrors, +) { + let Some(sized_field_names) = list_size.directive.sized_fields.as_ref() else { + return; + }; + let target_type = list_size.target.ty.inner_named_type(); + let fields = match schema.schema().types.get(target_type) { + Some(ExtendedType::Object(obj)) => &obj.fields, + Some(ExtendedType::Interface(itf)) => &itf.fields, + _ => { + errors + .errors + .push(SingleFederationError::ListSizeInvalidSizedField { + message: format!( + "Sized fields cannot be used because \"{target_type}\" is not a composite type" + ), + }); + return; + } + }; + for field_name in sized_field_names { + if let Some(field) = fields.get(field_name.as_str()) { + if !field.ty.is_list() { + errors + .errors + .push(SingleFederationError::ListSizeAppliedToNonList { + message: format!( + "Sized field \"{target_type}.{field_name}\" is not a list" + ), + }); + } + } else { + errors + .errors + .push(SingleFederationError::ListSizeInvalidSizedField { + message: format!( + "Sized field \"{field_name}\" is not a field on type \"{target_type}\"" + ), + }) + } + } +} diff --git a/apollo-federation/src/schema/validators/mod.rs b/apollo-federation/src/schema/validators/mod.rs index 0af9a8f046..bd719b664e 100644 --- a/apollo-federation/src/schema/validators/mod.rs +++ b/apollo-federation/src/schema/validators/mod.rs @@ -9,12 +9,19 @@ use apollo_compiler::executable::SelectionSet; use crate::error::MultipleFederationErrors; use crate::error::SingleFederationError; use crate::schema::FederationSchema; +use crate::schema::KeyDirective; +use crate::schema::ProvidesDirective; +use crate::schema::RequiresDirective; use crate::schema::position::FieldDefinitionPosition; use crate::schema::position::InterfaceFieldDefinitionPosition; use crate::schema::position::ObjectFieldDefinitionPosition; +use crate::schema::position::ObjectOrInterfaceFieldDefinitionPosition; use crate::schema::subgraph_metadata::SubgraphMetadata; +pub(crate) mod cost; +pub(crate) mod external; pub(crate) mod key; +pub(crate) mod list_size; pub(crate) mod provides; pub(crate) mod requires; @@ -135,3 +142,84 @@ trait DenyNonExternalLeafFields<'a>: SchemaFieldSetValidator { } } } + +pub(crate) trait AppliesOnType { + fn applied_type(&self) -> &Name; + fn unsupported_on_interface_error(message: String) -> SingleFederationError; +} + +impl AppliesOnType for KeyDirective<'_> { + fn applied_type(&self) -> &Name { + self.target.type_name() + } + + fn unsupported_on_interface_error(message: String) -> SingleFederationError { + SingleFederationError::KeyUnsupportedOnInterface { message } + } +} + +pub(crate) trait AppliesOnField { + fn applied_field(&self) -> &ObjectOrInterfaceFieldDefinitionPosition; + fn unsupported_on_interface_error(message: String) -> SingleFederationError; +} + +impl AppliesOnField for RequiresDirective<'_> { + fn applied_field(&self) -> &ObjectOrInterfaceFieldDefinitionPosition { + &self.target + } + + fn unsupported_on_interface_error(message: String) -> SingleFederationError { + SingleFederationError::RequiresUnsupportedOnInterface { message } + } +} + +impl AppliesOnField for ProvidesDirective<'_> { + fn applied_field(&self) -> &ObjectOrInterfaceFieldDefinitionPosition { + &self.target + } + + fn unsupported_on_interface_error(message: String) -> SingleFederationError { + SingleFederationError::ProvidesUnsupportedOnInterface { message } + } +} + +pub(crate) fn deny_unsupported_directive_on_interface_type( + directive_name: &Name, + directive_application: &D, + schema: &FederationSchema, + errors: &mut MultipleFederationErrors, +) { + let applied_type = directive_application.applied_type(); + if schema.is_interface(applied_type) { + let directive_display = format!("@{directive_name}"); + errors.push( + D::unsupported_on_interface_error( + format!( + r#"Cannot use {directive_display} on interface "{applied_type}": {directive_display} is not yet supported on interfaces"#, + ), + ) + .into(), + ); + } +} + +pub(crate) fn deny_unsupported_directive_on_interface_field( + directive_name: &Name, + directive_application: &D, + schema: &FederationSchema, + errors: &mut MultipleFederationErrors, +) { + let applied_field = directive_application.applied_field(); + let parent_type = applied_field.parent(); + if schema.is_interface(parent_type.type_name()) { + let directive_display = format!("@{directive_name}"); + errors.push( + D::unsupported_on_interface_error( + format!( + r#"Cannot use {directive_display} on field "{applied_field}" of parent type "{parent_type}": {directive_display} is not yet supported within interfaces"#, + ), + ) + .into(), + ); + } +} diff --git a/apollo-federation/src/schema/validators/provides.rs b/apollo-federation/src/schema/validators/provides.rs index 77d385cbfb..daff6535bd 100644 --- a/apollo-federation/src/schema/validators/provides.rs +++ b/apollo-federation/src/schema/validators/provides.rs @@ -10,11 +10,11 @@ use crate::link::federation_spec_definition::FEDERATION_PROVIDES_DIRECTIVE_NAME_ use crate::link::spec_definition::SpecDefinition; use crate::schema::FederationSchema; use crate::schema::HasFields; -use crate::schema::position::FieldDefinitionPosition; use crate::schema::subgraph_metadata::SubgraphMetadata; use crate::schema::validators::DenyFieldsWithDirectiveApplications; use crate::schema::validators::DenyNonExternalLeafFields; use crate::schema::validators::SchemaFieldSetValidator; +use crate::schema::validators::deny_unsupported_directive_on_interface_field; pub(crate) fn validate_provides_directives( schema: &FederationSchema, @@ -40,24 +40,30 @@ pub(crate) fn validate_provides_directives( for provides_directive in schema.provides_directive_applications()? { match provides_directive { Ok(provides) => { + deny_unsupported_directive_on_interface_field( + &provides_directive_name, + &provides, + schema, + errors, + ); + // PORT NOTE: In JS, these two checks are done inside the `targetTypeExtractor`. - if metadata - .is_field_external(&FieldDefinitionPosition::Object(provides.target.clone())) - { + if metadata.is_field_external(&provides.target.clone().into()) { errors.errors.push( SingleFederationError::ExternalCollisionWithAnotherDirective { message: format!( "Cannot have both @provides and @external on field \"{}.{}\"", - provides.target.type_name, provides.target.field_name + provides.target.type_name(), + provides.target.field_name() ), }, ) } if !schema - .get_type(provides.target.type_name.clone()) + .get_type(provides.target.type_name().clone()) .is_ok_and(|ty| ty.is_composite_type()) { - errors.errors.push(SingleFederationError::ProvidesOnNonObjectField { message: format!("Invalid @provides directive on field \"{}.{}\": field has type \"{}\"", provides.target.type_name, provides.target.field_name, provides.target_return_type) }) + errors.errors.push(SingleFederationError::ProvidesOnNonObjectField { message: format!("Invalid @provides directive on field \"{}.{}\": field has type \"{}\"", provides.target.type_name(), provides.target.field_name(), provides.target_return_type) }) } // PORT NOTE: Think of this as `validateFieldSet`, but the set of rules are already filtered to account for what were boolean flags in JS diff --git a/apollo-federation/src/schema/validators/requires.rs b/apollo-federation/src/schema/validators/requires.rs index 91e14d7b9f..e2c89c3fd0 100644 --- a/apollo-federation/src/schema/validators/requires.rs +++ b/apollo-federation/src/schema/validators/requires.rs @@ -14,6 +14,7 @@ use crate::schema::subgraph_metadata::SubgraphMetadata; use crate::schema::validators::DenyFieldsWithDirectiveApplications; use crate::schema::validators::DenyNonExternalLeafFields; use crate::schema::validators::SchemaFieldSetValidator; +use crate::schema::validators::deny_unsupported_directive_on_interface_field; pub(crate) fn validate_requires_directives( schema: &FederationSchema, @@ -37,14 +38,22 @@ pub(crate) fn validate_requires_directives( for requires_directive in schema.requires_directive_applications()? { match requires_directive { - Ok(requires) => match requires.parse_fields(schema.schema()) { - Ok(fields) => { - for rule in fieldset_rules.iter() { - rule.visit(&requires.target.type_name, &fields, errors); + Ok(requires) => { + deny_unsupported_directive_on_interface_field( + &requires_directive_name, + &requires, + schema, + errors, + ); + match requires.parse_fields(schema.schema()) { + Ok(fields) => { + for rule in fieldset_rules.iter() { + rule.visit(requires.target.type_name(), &fields, errors); + } } + Err(e) => errors.push(e.into()), } - Err(e) => errors.push(e.into()), - }, + } Err(e) => errors.push(e), } } diff --git a/apollo-federation/src/sources/connect/expand/carryover.rs b/apollo-federation/src/sources/connect/expand/carryover.rs index 92d8e0c893..fc3e34a4bb 100644 --- a/apollo-federation/src/sources/connect/expand/carryover.rs +++ b/apollo-federation/src/sources/connect/expand/carryover.rs @@ -1,3 +1,5 @@ +mod inputs; + use apollo_compiler::Name; use apollo_compiler::Node; use apollo_compiler::ast::Argument; @@ -5,6 +7,8 @@ use apollo_compiler::ast::Directive; use apollo_compiler::ast::Value; use apollo_compiler::collections::HashSet; use apollo_compiler::name; +use inputs::copy_input_types; +use multimap::MultiMap; use crate::error::FederationError; use crate::link::DEFAULT_LINK_NAME; @@ -45,6 +49,7 @@ pub(super) fn carryover_directives( from: &FederationSchema, to: &mut FederationSchema, specs: impl Iterator, + subgraph_name_replacements: &MultiMap<&str, String>, ) -> Result<(), FederationError> { let Some(metadata) = from.metadata() else { return Ok(()); @@ -56,6 +61,15 @@ pub(super) fn carryover_directives( SchemaDefinitionPosition.insert_directive(to, spec.join_directive_application().into())?; } + // @link for connect + if let Some(link) = metadata.for_identity(&ConnectSpec::identity()) { + SchemaDefinitionPosition.insert_directive(to, link.to_directive_application().into())?; + } + + // before copying over directive definitions, we need to ensure we copy over + // any input types (scalars, enums, input objects) they use + copy_input_types(from, to, subgraph_name_replacements)?; + // @inaccessible if let Some(link) = metadata.for_identity(&Identity::inaccessible_identity()) { @@ -131,21 +145,6 @@ pub(super) fn carryover_directives( SchemaDefinitionPosition .insert_directive(to, link.to_directive_application().into())?; - let scalar_type_pos = ScalarTypeDefinitionPosition { - type_name: link.type_name_in_schema(&name!(Scope)), - }; - - // The scalar might already exist if a subgraph defined it - if scalar_type_pos.get(to.schema()).is_err() { - scalar_type_pos - .get(from.schema()) - .map_err(From::from) - .and_then(|def| { - scalar_type_pos.pre_insert(to)?; - scalar_type_pos.insert(to, def.clone()) - })?; - } - copy_directive_definition(from, to, directive_name.clone())?; } referencers.copy_directives(from, to, &directive_name) @@ -166,21 +165,6 @@ pub(super) fn carryover_directives( SchemaDefinitionPosition .insert_directive(to, link.to_directive_application().into())?; - let scalar_type_pos = ScalarTypeDefinitionPosition { - type_name: link.type_name_in_schema(&name!(Policy)), - }; - - // The scalar might already exist if a subgraph defined it - if scalar_type_pos.get(to.schema()).is_err() { - scalar_type_pos - .get(from.schema()) - .map_err(From::from) - .and_then(|def| { - scalar_type_pos.pre_insert(to)?; - scalar_type_pos.insert(to, def.clone()) - })?; - } - copy_directive_definition(from, to, directive_name.clone())?; } referencers.copy_directives(from, to, &directive_name) @@ -665,7 +649,7 @@ mod tests { #[test] fn test_carryover() { - let sdl = include_str!("./tests/schemas/ignore/directives.graphql"); + let sdl = include_str!("./tests/schemas/expand/directives.graphql"); let schema = Schema::parse(sdl, "directives.graphql").expect("parse failed"); let supergraph_schema = FederationSchema::new(schema).expect("federation schema failed"); let subgraphs = extract_subgraphs_from_supergraph(&supergraph_schema, None) @@ -678,6 +662,7 @@ mod tests { &supergraph_schema, &mut schema, [ConnectSpec::V0_1].into_iter(), + &Default::default(), ) .expect("carryover failed"); assert_snapshot!(schema.schema().serialize().to_string()); diff --git a/apollo-federation/src/sources/connect/expand/carryover/inputs.rs b/apollo-federation/src/sources/connect/expand/carryover/inputs.rs new file mode 100644 index 0000000000..5ad5f15dea --- /dev/null +++ b/apollo-federation/src/sources/connect/expand/carryover/inputs.rs @@ -0,0 +1,323 @@ +use apollo_compiler::Name; +use apollo_compiler::Node; +use apollo_compiler::ast; +use apollo_compiler::ast::Value; +use apollo_compiler::collections::HashMap; +use apollo_compiler::name; +use apollo_compiler::schema::DirectiveList; +use apollo_compiler::schema::EnumType; +use apollo_compiler::schema::ExtendedType; +use apollo_compiler::schema::InputObjectType; +use apollo_compiler::schema::ScalarType; +use itertools::Itertools; +use multimap::MultiMap; + +use crate::error::FederationError; +use crate::schema::FederationSchema; +use crate::schema::position::EnumTypeDefinitionPosition; +use crate::schema::position::InputObjectTypeDefinitionPosition; +use crate::schema::position::ScalarTypeDefinitionPosition; + +/// merge.rs doesn't have any logic for `@composeDirective` directives, so we +/// need to carry those directives AND their associated input types over into +/// the new supergraph. +/// +/// However, we can't just copy the definitions as-is, because their join__* +/// directives may reference subgraphs that no longer exist (were replaced by +/// "expanded" subgraphs/connectors). Each time we encounter a join__* directive +/// with a `graph:` argument referring to a missing subgraph, we'll need to +/// replace it with **one or more** new directives, one for each "expanded" +/// subgraph. +pub(super) fn copy_input_types( + from: &FederationSchema, + to: &mut FederationSchema, + subgraph_name_replacements: &MultiMap<&str, String>, +) -> Result<(), FederationError> { + let from_join_graph_enum = from + .schema() + .get_enum(&name!(join__Graph)) + .ok_or_else(|| FederationError::internal("Cannot find join__graph enum"))?; + let to_join_graph_enum = to + .schema() + .get_enum(&name!(join__Graph)) + .ok_or_else(|| FederationError::internal("Cannot find join__graph enum"))?; + let subgraph_enum_replacements = subgraph_replacements( + from_join_graph_enum, + to_join_graph_enum, + subgraph_name_replacements, + ) + .map_err(|e| FederationError::internal(format!("Failed to get subgraph replacements: {e}")))?; + + for (name, ty) in &from.schema().types { + if to.schema().types.contains_key(name) { + continue; + } + match ty { + ExtendedType::Scalar(node) => { + let pos = ScalarTypeDefinitionPosition { + type_name: node.name.clone(), + }; + let node = + strip_invalid_join_directives_from_scalar(node, &subgraph_enum_replacements); + pos.pre_insert(to).ok(); + pos.insert(to, node).ok(); + } + ExtendedType::Enum(node) => { + let pos = EnumTypeDefinitionPosition { + type_name: node.name.clone(), + }; + let node = + strip_invalid_join_directives_from_enum(node, &subgraph_enum_replacements); + pos.pre_insert(to).ok(); + pos.insert(to, node).ok(); + } + ExtendedType::InputObject(node) => { + let pos = InputObjectTypeDefinitionPosition { + type_name: node.name.clone(), + }; + let node = strip_invalid_join_directives_from_input_type( + node, + &subgraph_enum_replacements, + ); + pos.pre_insert(to).ok(); + pos.insert(to, node).ok(); + } + _ => {} + } + } + + Ok(()) +} + +/// Given an original join__Graph enum: +/// ```graphql +/// enum join__Graph { +/// REGULAR_SUBGRAPH @join__graph(name: "regular-subgraph") +/// CONNECTORS_SUBGRAPH @join__graph(name: "connectors-subgraph") +/// } +/// ``` +/// +/// and a new join__Graph enum: +/// ```graphql +/// enum join__Graph { +/// REGULAR_SUBGRAPH @join__graph(name: "regular-subgraph") +/// CONNECTORS_SUBGRAPH_QUERY_USER_0 @join__graph(name: "connectors-subgraph_Query_user_0") +/// CONNECTORS_SUBGRAPH_QUERY_USERS_0 @join__graph(name: "connectors-subgraph_Query_users_0") +/// } +/// ``` +/// +/// and a map of original subgraph names to new subgraph names: +/// ``` +/// { +/// "connectors-subgraph" => vec!["connectors-subgraph_Query_user_0", "connectors-subgraph_Query_users_0"] +/// } +/// ``` +/// +/// Return a map of enum value replacements: +/// ``` +/// { +/// "CONNECTORS_SUBGRAPH" => vec!["CONNECTORS_SUBGRAPH_QUERY_USER_0", "CONNECTORS_SUBGRAPH_QUERY_USERS_0"], +/// } +/// ``` +fn subgraph_replacements( + from_join_graph_enum: &EnumType, + to_join_graph_enum: &EnumType, + replaced_subgraph_names: &MultiMap<&str, String>, +) -> Result, String> { + let mut replacements = MultiMap::new(); + + fn subgraph_names_to_enum_values(enum_type: &EnumType) -> Result, &str> { + enum_type + .values + .iter() + .map(|(name, value)| { + value + .directives + .iter() + .find(|d| d.name == name!(join__graph)) + .and_then(|d| { + d.arguments + .iter() + .find(|a| a.name == name!(name)) + .and_then(|a| a.value.as_str()) + }) + .ok_or("no name argument on join__graph") + .map(|new_subgraph_name| (new_subgraph_name, name)) + }) + .try_collect() + } + + let new_subgraph_names_to_enum_values = subgraph_names_to_enum_values(to_join_graph_enum)?; + + let original_subgraph_names_to_enum_values = + subgraph_names_to_enum_values(from_join_graph_enum)?; + + for (original_subgraph_name, new_subgraph_names) in replaced_subgraph_names.iter_all() { + if let Some(original_enum_value) = original_subgraph_names_to_enum_values + .get(original_subgraph_name) + .cloned() + { + for new_subgraph_name in new_subgraph_names { + if let Some(new_enum_value) = new_subgraph_names_to_enum_values + .get(new_subgraph_name.as_str()) + .cloned() + { + replacements.insert(original_enum_value.clone(), new_enum_value.clone()); + } + } + } + } + + Ok(replacements) +} + +/// Given a list of directives and a directive name like `@join__type` or `@join__enumValue`, +/// replace the `graph:` argument with a new directive for each subgraph name in the +/// `replaced_subgraph_names` map. +fn replace_join_enum( + directives: &DirectiveList, + directive_name: &Name, + replaced_subgraph_names: &MultiMap, +) -> DirectiveList { + let mut new_directives = DirectiveList::new(); + for d in directives.iter() { + if &d.name == directive_name { + let Some(graph_arg) = d + .arguments + .iter() + .find(|a| a.name == name!(graph)) + .and_then(|a| a.value.as_enum()) + else { + continue; + }; + + let Some(replacements) = replaced_subgraph_names.get_vec(graph_arg) else { + new_directives.push(d.clone()); + continue; + }; + + for replacement in replacements { + let mut new_directive = d.clone(); + let new_directive = new_directive.make_mut(); + if let Some(a) = new_directive + .arguments + .iter_mut() + .find(|a| a.name == name!(graph)) + { + let a = a.make_mut(); + a.value = Value::Enum(replacement.clone()).into(); + }; + new_directives.push(new_directive.clone()); + } + } else { + new_directives.push(d.clone()); + } + } + new_directives +} + +/// Unfortunately, there are two different DirectiveList types, so this +/// function is duplicated. +fn replace_join_enum_ast( + directives: &ast::DirectiveList, + directive_name: &Name, + replaced_subgraph_names: &MultiMap, +) -> ast::DirectiveList { + let mut new_directives = ast::DirectiveList::new(); + for d in directives.iter() { + if &d.name == directive_name { + let Some(graph_arg) = d + .arguments + .iter() + .find(|a| a.name == name!(graph)) + .and_then(|a| a.value.as_enum()) + else { + continue; + }; + + let Some(replacements) = replaced_subgraph_names.get_vec(graph_arg) else { + new_directives.push(d.clone()); + continue; + }; + + for replacement in replacements { + let mut new_directive = d.clone(); + let new_directive = new_directive.make_mut(); + if let Some(a) = new_directive + .arguments + .iter_mut() + .find(|a| a.name == name!(graph)) + { + let a = a.make_mut(); + a.value = Value::Enum(replacement.clone()).into(); + }; + new_directives.push(new_directive.clone()); + } + } else { + new_directives.push(d.clone()); + } + } + new_directives +} + +fn strip_invalid_join_directives_from_input_type( + node: &InputObjectType, + replaced_subgraph_names: &MultiMap, +) -> Node { + let mut node = node.clone(); + + node.directives = replace_join_enum( + &node.directives, + &name!(join__type), + replaced_subgraph_names, + ); + + for (_, field) in node.fields.iter_mut() { + let field = field.make_mut(); + field.directives = replace_join_enum_ast( + &field.directives, + &name!(join__field), + replaced_subgraph_names, + ); + } + + node.into() +} + +fn strip_invalid_join_directives_from_enum( + node: &EnumType, + replaced_subgraph_names: &MultiMap, +) -> Node { + let mut node = node.clone(); + + node.directives = replace_join_enum( + &node.directives, + &name!(join__type), + replaced_subgraph_names, + ); + + for (_, value) in node.values.iter_mut() { + let value = value.make_mut(); + value.directives = replace_join_enum_ast( + &value.directives, + &name!(join__enumValue), + replaced_subgraph_names, + ); + } + node.into() +} + +fn strip_invalid_join_directives_from_scalar( + node: &ScalarType, + replaced_subgraph_names: &MultiMap, +) -> Node { + let mut node = node.clone(); + + node.directives = replace_join_enum( + &node.directives, + &name!(join__type), + replaced_subgraph_names, + ); + + node.into() +} diff --git a/apollo-federation/src/sources/connect/expand/mod.rs b/apollo-federation/src/sources/connect/expand/mod.rs index 5a46cfc0ab..a2659169b0 100644 --- a/apollo-federation/src/sources/connect/expand/mod.rs +++ b/apollo-federation/src/sources/connect/expand/mod.rs @@ -6,6 +6,7 @@ use apollo_compiler::validation::Valid; use carryover::carryover_directives; use indexmap::IndexMap; use itertools::Itertools; +use multimap::MultiMap; use crate::ApiSchemaOptions; use crate::Supergraph; @@ -93,11 +94,22 @@ pub fn expand_connectors( FederationError::internal(format!("could not merge expanded subgraphs: {e:?}")) })?; + let subgraph_name_replacements = expanded_subgraphs + .iter() + .map(|(connector, _)| { + ( + connector.id.subgraph_name.as_str(), + connector.id.synthetic_name(), + ) + }) + .collect::>(); + let mut new_supergraph = FederationSchema::new(new_supergraph.schema.into_inner())?; carryover_directives( &supergraph.schema, &mut new_supergraph, spec_versions.into_iter(), + &subgraph_name_replacements, ) .map_err(|e| FederationError::internal(format!("could not carry over directives: {e:?}")))?; @@ -254,7 +266,7 @@ mod helpers { } impl<'a> Expander<'a> { - pub(super) fn new(link: &Link, subgraph: &'a ValidFederationSubgraph) -> Expander<'a> { + pub(super) fn new(link: &Link, subgraph: &'a ValidFederationSubgraph) -> Self { let connect_name = ConnectSpec::connect_directive_name(link); let source_name = ConnectSpec::source_directive_name(link); @@ -273,14 +285,16 @@ mod helpers { .schema .metadata() .and_then(|m| m.for_identity(&Identity::federation_identity())) - .map(|f| f.directive_name_in_schema(&KEY_DIRECTIVE_NAME)) - .unwrap_or(KEY_DIRECTIVE_NAME); + .map_or(KEY_DIRECTIVE_NAME, |f| { + f.directive_name_in_schema(&KEY_DIRECTIVE_NAME) + }); let interface_object_name = subgraph .schema .metadata() .and_then(|m| m.for_identity(&Identity::federation_identity())) - .map(|f| f.directive_name_in_schema(&INTF_OBJECT_DIRECTIVE_NAME)) - .unwrap_or(INTF_OBJECT_DIRECTIVE_NAME); + .map_or(INTF_OBJECT_DIRECTIVE_NAME, |f| { + f.directive_name_in_schema(&INTF_OBJECT_DIRECTIVE_NAME) + }); let extra_excluded = [EXTERNAL_DIRECTIVE_NAME, REQUIRES_DIRECTIVE_NAME] .into_iter() .map(|d| { @@ -361,9 +375,15 @@ mod helpers { ) .walk(( object, - connector.selection.next_subselection().cloned().ok_or( - FederationError::internal("empty selections are not allowed"), - )?, + connector + .selection + .next_subselection() + .cloned() + .ok_or_else(|| { + FederationError::internal( + "empty selections are not allowed", + ) + })?, ))?; } @@ -431,9 +451,13 @@ mod helpers { ObjectTypeDefinitionPosition { type_name: type_def.name.clone(), }, - connector.selection.next_subselection().cloned().ok_or( - FederationError::internal("empty selections are not allowed"), - )?, + connector + .selection + .next_subselection() + .cloned() + .ok_or_else(|| { + FederationError::internal("empty selections are not allowed") + })?, ))?; // we need a Query root field to be valid @@ -508,7 +532,7 @@ mod helpers { }; let parent_type = self.original_schema.get_type(parent_type_name)?; - let output_type = to_schema.get_type(output_type_name.clone())?; + let output_type = to_schema.get_type(output_type_name)?; let key_for_type = match &connector.entity_resolver { Some(EntityResolver::Explicit) => output_type, _ => parent_type, @@ -537,9 +561,7 @@ mod helpers { parsed .next_subselection() .cloned() - .ok_or(FederationError::internal( - "empty selections are not allowed", - ))?, + .ok_or_else(|| FederationError::internal("empty selections are not allowed"))?, ))?; // This actually adds the key fields if necessary, which is only diff --git a/apollo-federation/src/sources/connect/expand/snapshots/apollo_federation__sources__connect__expand__carryover__tests__carryover.snap b/apollo-federation/src/sources/connect/expand/snapshots/apollo_federation__sources__connect__expand__carryover__tests__carryover.snap index 8de6c07c42..d132b4a2f9 100644 --- a/apollo-federation/src/sources/connect/expand/snapshots/apollo_federation__sources__connect__expand__carryover__tests__carryover.snap +++ b/apollo-federation/src/sources/connect/expand/snapshots/apollo_federation__sources__connect__expand__carryover__tests__carryover.snap @@ -2,7 +2,7 @@ source: apollo-federation/src/sources/connect/expand/carryover.rs expression: schema.schema().serialize().to_string() --- -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1"}) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) @link(url: "https://specs.apollo.dev/tag/v0.3") @link(url: "https://specs.apollo.dev/authenticated/v0.1", for: SECURITY) @link(url: "https://specs.apollo.dev/requiresScopes/v0.1", for: SECURITY) @link(url: "https://specs.apollo.dev/policy/v0.1", for: SECURITY) @link(url: "https://specs.apollo.dev/cost/v0.1", import: ["@listSize"]) @link(url: "http://specs.example.org/custom/v0.1", import: ["@custom1", "@custom2", {name: "@originalName", as: "@custom3"}]) @link(url: "http://bugfix/weird/v1.0", import: ["@weird"]) @link(url: "https://specs.apollo.dev/context/v0.1", for: SECURITY) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1"}) @link(url: "https://specs.apollo.dev/connect/v0.2", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) @link(url: "https://specs.apollo.dev/tag/v0.3") @link(url: "https://specs.apollo.dev/authenticated/v0.1", for: SECURITY) @link(url: "https://specs.apollo.dev/requiresScopes/v0.1", for: SECURITY) @link(url: "https://specs.apollo.dev/policy/v0.1", for: SECURITY) @link(url: "https://specs.apollo.dev/cost/v0.1", import: ["@listSize"]) @link(url: "http://specs.example.org/custom/v0.1", import: ["@custom1", "@custom2", {name: "@originalName", as: "@custom3"}]) @link(url: "http://bugfix/weird/v1.0", import: ["@weird"]) @link(url: "https://specs.apollo.dev/context/v0.1", for: SECURITY) { query: Query } @@ -103,6 +103,8 @@ type Z @join__type(graph: TWO, key: "id") @context(name: "two__ctx") { x: X @join__field(graph: TWO, type: "X") } -scalar requiresScopes__Scope +scalar context__ContextFieldValue scalar policy__Policy + +scalar requiresScopes__Scope diff --git a/apollo-federation/src/sources/connect/expand/tests/schemas/expand/carryover.graphql b/apollo-federation/src/sources/connect/expand/tests/schemas/expand/carryover.graphql index eac50490a4..dd48bf1e8b 100644 --- a/apollo-federation/src/sources/connect/expand/tests/schemas/expand/carryover.graphql +++ b/apollo-federation/src/sources/connect/expand/tests/schemas/expand/carryover.graphql @@ -8,7 +8,9 @@ schema @link(url: "https://specs.apollo.dev/policy/v0.1", for: SECURITY) @link(url: "https://specs.apollo.dev/context/v0.1", for: SECURITY) @link(url: "http://specs.example.org/custom/v0.1", import: ["@custom"]) - @link(url: "https://specs.apollo.dev/connect/v0.1", for: EXECUTION) + @link(url: "http://specs.example.org/custom2/v0.1", import: ["@custom2"]) + @link(url: "http://specs.example.org/custom3/v0.1", import: ["@custom3"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", for: EXECUTION) @join__directive(graphs: [ONE], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect", "@source"]}) @join__directive(graphs: [ONE], name: "source", args: {name: "json", http: {baseURL: "http://example/"}}) { @@ -21,7 +23,11 @@ directive @context(name: String!) repeatable on INTERFACE | OBJECT | UNION directive @context__fromContext(field: context__ContextFieldValue) on ARGUMENT_DEFINITION -directive @custom on OBJECT | FIELD_DEFINITION +directive @custom(s: custom__Scalar, e: custom__Enum, i: custom__Input) on OBJECT | FIELD_DEFINITION + +directive @custom2(s: custom__Scalar2, e: custom__Enum2, i: custom__Input2) on OBJECT | FIELD_DEFINITION + +directive @custom3(s: custom__Scalar3, e: custom__Enum3, i: custom__Input3) on OBJECT | FIELD_DEFINITION directive @inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION @@ -49,6 +55,60 @@ directive @tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFAC scalar context__ContextFieldValue +enum custom__Enum + @join__type(graph: ONE) + @join__type(graph: TWO) +{ + ONE @join__enumValue(graph: ONE) @join__enumValue(graph: TWO) + TWO @join__enumValue(graph: ONE) @join__enumValue(graph: TWO) +} + +enum custom__Enum2 + @join__type(graph: ONE) +{ + ONE @join__enumValue(graph: ONE) + TWO @join__enumValue(graph: ONE) +} + +enum custom__Enum3 + @join__type(graph: TWO) +{ + ONE @join__enumValue(graph: TWO) + TWO @join__enumValue(graph: TWO) +} + +input custom__Input + @join__type(graph: ONE) + @join__type(graph: TWO) +{ + one: String + two: String +} + +input custom__Input2 + @join__type(graph: ONE) +{ + one: String + two: String +} + +input custom__Input3 + @join__type(graph: TWO) +{ + one: String + two: String +} + +scalar custom__Scalar + @join__type(graph: ONE) + @join__type(graph: TWO) + +scalar custom__Scalar2 + @join__type(graph: ONE) + +scalar custom__Scalar3 + @join__type(graph: TWO) + input join__ContextArgument { name: String! type: String! @@ -107,7 +167,7 @@ type T id: ID! tagged: String @join__field(graph: ONE) @tag(name: "tag") hidden: String @inaccessible @join__field(graph: ONE) - custom: String @join__field(graph: ONE) @custom + custom: String @join__field(graph: ONE) @custom @custom2 authenticated: String @join__field(graph: ONE) @authenticated requiresScopes: String @join__field(graph: ONE) @requiresScopes(scopes: ["scope"]) policy: String @join__field(graph: ONE) @policy(policies: [["admin"]]) @@ -127,6 +187,6 @@ type Z @context(name: "two__ctx") { id: ID! - y: String - x: X + y: String @custom(s: "x", e: ONE, i: {one: "one"}) + x: X @custom3(s: "x", e: ONE, i: {one: "one"}) } diff --git a/apollo-federation/src/sources/connect/expand/tests/schemas/expand/carryover.yaml b/apollo-federation/src/sources/connect/expand/tests/schemas/expand/carryover.yaml index c99b66b2a5..079726c0d3 100644 --- a/apollo-federation/src/sources/connect/expand/tests/schemas/expand/carryover.yaml +++ b/apollo-federation/src/sources/connect/expand/tests/schemas/expand/carryover.yaml @@ -12,10 +12,12 @@ subgraphs: ] ) @link(url: "http://specs.example.org/custom/v0.1", import: ["@custom"]) + @link(url: "http://specs.example.org/custom2/v0.1", import: ["@custom2"]) @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect", "@source"]) @composeDirective(name: "@custom") + @composeDirective(name: "@custom2") @source(name: "json" http: { baseURL: "http://example/" }) - directive @custom on OBJECT | FIELD_DEFINITION + type Query { ts: [T] @connect( source: "json" @@ -52,7 +54,7 @@ subgraphs: id: ID! tagged: String @tag(name: "tag") hidden: String @inaccessible - custom: String @custom + custom: String @custom @custom2 authenticated: String @authenticated requiresScopes: String @requiresScopes(scopes: ["scope"]) policy: String @policy(policies: [["admin"]]) @@ -67,12 +69,54 @@ subgraphs: type R { id: ID! } + + # bug fix: this won't compose until it's fixed and released in federation + # the graphql file is currently hand-edited to add these definitions + # + # @custom appears in both subgraphs, so will be merged appropriately, and it will attributed only to the non-connector subgraph + # @custom2 appears in the connector subgraph, so we have to add it and rewrite the join__* directives + # @custom3 appears in the non-connector subgraph, so it's composed appropriately + # + # this won't compose until after 2.11.0-preview.3 + + directive @custom(s: custom__Scalar, e: custom__Enum, i: custom__Input) on OBJECT | FIELD_DEFINITION + + scalar custom__Scalar + + enum custom__Enum { + ONE + TWO + } + + input custom__Input { + one: String + two: String + } + + directive @custom2(s: custom__Scalar2, e: custom__Enum2, i: custom__Input2) on OBJECT | FIELD_DEFINITION + + scalar custom__Scalar2 + + enum custom__Enum2 { + ONE + TWO + } + + input custom__Input2 { + one: String + two: String + } two: routing_url: none schema: sdl: | extend schema - @link(url: "https://specs.apollo.dev/federation/v2.8", import: ["@key", "@context", "@fromContext"]) + @link(url: "https://specs.apollo.dev/federation/v2.8", import: ["@key", "@context", "@fromContext", "@composeDirective"]) + @link(url: "http://specs.example.org/custom/v0.1", import: ["@custom"]) + @link(url: "http://specs.example.org/custom3/v0.1", import: ["@custom3"]) + @composeDirective(name: "@custom") + @composeDirective(name: "@custom3") + type T @key(fields: "id") { id: ID! overridden: String @@ -84,11 +128,38 @@ subgraphs: type Z @key(fields: "id") @context(name: "ctx") { id: ID! - y: String - x: X + y: String @custom(s: "x", e: ONE, i: { one: "one" }) + x: X @custom3(s: "x", e: ONE, i: { one: "one" }) } type X @key(fields: "id") { id: ID! w(z: String @fromContext(field: "$$ctx { y }")): String - } \ No newline at end of file + } + + directive @custom(s: custom__Scalar, e: custom__Enum, i: custom__Input) on OBJECT | FIELD_DEFINITION + scalar custom__Scalar + + enum custom__Enum { + ONE + TWO + } + + input custom__Input { + one: String + two: String + } + + directive @custom3(s: custom__Scalar3, e: custom__Enum3, i: custom__Input3) on OBJECT | FIELD_DEFINITION + + scalar custom__Scalar3 + + enum custom__Enum3 { + ONE + TWO + } + + input custom__Input3 { + one: String + two: String + } diff --git a/apollo-federation/src/sources/connect/expand/tests/schemas/ignore/directives.graphql b/apollo-federation/src/sources/connect/expand/tests/schemas/expand/directives.graphql similarity index 98% rename from apollo-federation/src/sources/connect/expand/tests/schemas/ignore/directives.graphql rename to apollo-federation/src/sources/connect/expand/tests/schemas/expand/directives.graphql index 90827644cb..6f100f5798 100644 --- a/apollo-federation/src/sources/connect/expand/tests/schemas/ignore/directives.graphql +++ b/apollo-federation/src/sources/connect/expand/tests/schemas/expand/directives.graphql @@ -1,6 +1,7 @@ schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) + @link(url: "https://specs.apollo.dev/connect/v0.2", for: EXECUTION) @link(url: "https://specs.apollo.dev/tag/v0.3") @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) @link(url: "https://specs.apollo.dev/authenticated/v0.1", for: SECURITY) diff --git a/apollo-federation/src/sources/connect/expand/tests/schemas/ignore/directives.yaml b/apollo-federation/src/sources/connect/expand/tests/schemas/expand/directives.yaml similarity index 100% rename from apollo-federation/src/sources/connect/expand/tests/schemas/ignore/directives.yaml rename to apollo-federation/src/sources/connect/expand/tests/schemas/expand/directives.yaml diff --git a/apollo-federation/src/sources/connect/expand/tests/snapshots/api@directives.graphql.snap b/apollo-federation/src/sources/connect/expand/tests/snapshots/api@directives.graphql.snap new file mode 100644 index 0000000000..2fd9e95ed7 --- /dev/null +++ b/apollo-federation/src/sources/connect/expand/tests/snapshots/api@directives.graphql.snap @@ -0,0 +1,33 @@ +--- +source: apollo-federation/src/sources/connect/expand/tests/mod.rs +expression: api_schema +input_file: apollo-federation/src/sources/connect/expand/tests/schemas/expand/directives.graphql +--- +directive @defer(label: String, if: Boolean! = true) on FRAGMENT_SPREAD | INLINE_FRAGMENT + +type Query { + tagged: String + custom: T + authenticated: String + requiresScopes: String + policy: String + overridden: String + weird: [String] + customAgain: String + z: Z +} + +type T { + field: String +} + +type X { + id: ID! + w: String +} + +type Z { + id: ID! + y: String + x: X +} diff --git a/apollo-federation/src/sources/connect/expand/tests/snapshots/connectors@directives.graphql.snap b/apollo-federation/src/sources/connect/expand/tests/snapshots/connectors@directives.graphql.snap new file mode 100644 index 0000000000..acf25e0bff --- /dev/null +++ b/apollo-federation/src/sources/connect/expand/tests/snapshots/connectors@directives.graphql.snap @@ -0,0 +1,6 @@ +--- +source: apollo-federation/src/sources/connect/expand/tests/mod.rs +expression: connectors.by_service_name +input_file: apollo-federation/src/sources/connect/expand/tests/schemas/expand/directives.graphql +--- +{} diff --git a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@batch.graphql.snap b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@batch.graphql.snap index b9916ca9ff..01dcfa96ce 100644 --- a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@batch.graphql.snap +++ b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@batch.graphql.snap @@ -3,7 +3,7 @@ source: apollo-federation/src/sources/connect/expand/tests/mod.rs expression: raw_sdl input_file: apollo-federation/src/sources/connect/expand/tests/schemas/expand/batch.graphql --- -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.2"}) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.2"}) @link(url: "https://specs.apollo.dev/connect/v0.2", for: EXECUTION) { query: Query } diff --git a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@carryover.graphql.snap b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@carryover.graphql.snap index f17bc8fea2..9306cde07b 100644 --- a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@carryover.graphql.snap +++ b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@carryover.graphql.snap @@ -3,7 +3,7 @@ source: apollo-federation/src/sources/connect/expand/tests/mod.rs expression: raw_sdl input_file: apollo-federation/src/sources/connect/expand/tests/schemas/expand/carryover.graphql --- -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1"}) @link(url: "https://specs.apollo.dev/tag/v0.3") @link(url: "https://specs.apollo.dev/authenticated/v0.1", for: SECURITY) @link(url: "https://specs.apollo.dev/requiresScopes/v0.1", for: SECURITY) @link(url: "https://specs.apollo.dev/policy/v0.1", for: SECURITY) @link(url: "http://specs.example.org/custom/v0.1", import: ["@custom"]) @link(url: "https://specs.apollo.dev/context/v0.1", for: SECURITY) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1"}) @link(url: "https://specs.apollo.dev/connect/v0.2", for: EXECUTION) @link(url: "https://specs.apollo.dev/tag/v0.3") @link(url: "https://specs.apollo.dev/authenticated/v0.1", for: SECURITY) @link(url: "https://specs.apollo.dev/requiresScopes/v0.1", for: SECURITY) @link(url: "https://specs.apollo.dev/policy/v0.1", for: SECURITY) @link(url: "http://specs.example.org/custom/v0.1", import: ["@custom"]) @link(url: "http://specs.example.org/custom2/v0.1", import: ["@custom2"]) @link(url: "http://specs.example.org/custom3/v0.1", import: ["@custom3"]) @link(url: "https://specs.apollo.dev/context/v0.1", for: SECURITY) { query: Query } @@ -33,7 +33,11 @@ directive @requiresScopes(scopes: [[requiresScopes__Scope!]!]!) on FIELD_DEFINIT directive @policy(policies: [[policy__Policy!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM -directive @custom on OBJECT | FIELD_DEFINITION +directive @custom(s: custom__Scalar, e: custom__Enum, i: custom__Input) on OBJECT | FIELD_DEFINITION + +directive @custom2(s: custom__Scalar2, e: custom__Enum2, i: custom__Input2) on OBJECT | FIELD_DEFINITION + +directive @custom3(s: custom__Scalar3, e: custom__Enum3, i: custom__Input3) on OBJECT | FIELD_DEFINITION directive @context(name: String!) repeatable on INTERFACE | OBJECT | UNION @@ -70,7 +74,7 @@ enum join__Graph { type T @join__type(graph: ONE_QUERY_T_0, key: "id") @join__type(graph: ONE_QUERY_TS_0) @join__type(graph: ONE_T_R_0, key: "id") @join__type(graph: TWO, key: "id") { authenticated: String @join__field(graph: ONE_QUERY_T_0, type: "String") @join__field(graph: ONE_QUERY_TS_0, type: "String") @authenticated - custom: String @join__field(graph: ONE_QUERY_T_0, type: "String") @join__field(graph: ONE_QUERY_TS_0, type: "String") @custom + custom: String @join__field(graph: ONE_QUERY_T_0, type: "String") @join__field(graph: ONE_QUERY_TS_0, type: "String") @custom @custom2 hidden: String @join__field(graph: ONE_QUERY_T_0, type: "String") @join__field(graph: ONE_QUERY_TS_0, type: "String") @inaccessible id: ID! @join__field(graph: ONE_QUERY_T_0, type: "ID!") @join__field(graph: ONE_QUERY_TS_0, type: "ID!") @join__field(graph: ONE_T_R_0, type: "ID!") @join__field(graph: TWO, type: "ID!") overridden: String @join__field(graph: ONE_QUERY_T_0, override: "two", overrideLabel: "label", type: "String") @join__field(graph: ONE_QUERY_TS_0, override: "two", overrideLabel: "label", type: "String") @join__field(graph: TWO, type: "String") @@ -91,6 +95,30 @@ type R @join__type(graph: ONE_T_R_0) { id: ID! @join__field(graph: ONE_T_R_0, type: "ID!") } +enum custom__Enum @join__type(graph: TWO) { + ONE @join__enumValue(graph: TWO) + TWO @join__enumValue(graph: TWO) +} + +enum custom__Enum3 @join__type(graph: TWO) { + ONE @join__enumValue(graph: TWO) + TWO @join__enumValue(graph: TWO) +} + +input custom__Input @join__type(graph: TWO) { + one: String @join__field(graph: TWO, type: "String") + two: String @join__field(graph: TWO, type: "String") +} + +input custom__Input3 @join__type(graph: TWO) { + one: String @join__field(graph: TWO, type: "String") + two: String @join__field(graph: TWO, type: "String") +} + +scalar custom__Scalar @join__type(graph: TWO) + +scalar custom__Scalar3 @join__type(graph: TWO) + type X @join__type(graph: TWO, key: "id") { id: ID! @join__field(graph: TWO, type: "ID!") w: String @join__field(graph: TWO, type: "String", contextArguments: [{context: "two__ctx", name: "z", type: "String", selection: " { y }"}]) @@ -98,10 +126,24 @@ type X @join__type(graph: TWO, key: "id") { type Z @join__type(graph: TWO, key: "id") @context(name: "two__ctx") { id: ID! @join__field(graph: TWO, type: "ID!") - y: String @join__field(graph: TWO, type: "String") - x: X @join__field(graph: TWO, type: "X") + y: String @join__field(graph: TWO, type: "String") @custom(s: "x", e: ONE, i: {one: "one"}) + x: X @join__field(graph: TWO, type: "X") @custom3(s: "x", e: ONE, i: {one: "one"}) } -scalar requiresScopes__Scope +scalar context__ContextFieldValue + +enum custom__Enum2 @join__type(graph: ONE_QUERY_TS_0) @join__type(graph: ONE_QUERY_T_0) @join__type(graph: ONE_T_R_0) { + ONE @join__enumValue(graph: ONE_QUERY_TS_0) @join__enumValue(graph: ONE_QUERY_T_0) @join__enumValue(graph: ONE_T_R_0) + TWO @join__enumValue(graph: ONE_QUERY_TS_0) @join__enumValue(graph: ONE_QUERY_T_0) @join__enumValue(graph: ONE_T_R_0) +} + +input custom__Input2 @join__type(graph: ONE_QUERY_TS_0) @join__type(graph: ONE_QUERY_T_0) @join__type(graph: ONE_T_R_0) { + one: String + two: String +} + +scalar custom__Scalar2 @join__type(graph: ONE_QUERY_TS_0) @join__type(graph: ONE_QUERY_T_0) @join__type(graph: ONE_T_R_0) scalar policy__Policy + +scalar requiresScopes__Scope diff --git a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@directives.graphql.snap b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@directives.graphql.snap new file mode 100644 index 0000000000..57e0454f59 --- /dev/null +++ b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@directives.graphql.snap @@ -0,0 +1,111 @@ +--- +source: apollo-federation/src/sources/connect/expand/tests/mod.rs +expression: raw_sdl +input_file: apollo-federation/src/sources/connect/expand/tests/schemas/expand/directives.graphql +--- +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) @link(url: "https://specs.apollo.dev/connect/v0.2", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) @link(url: "https://specs.apollo.dev/tag/v0.3") @link(url: "https://specs.apollo.dev/authenticated/v0.1", for: SECURITY) @link(url: "https://specs.apollo.dev/requiresScopes/v0.1", for: SECURITY) @link(url: "https://specs.apollo.dev/policy/v0.1", for: SECURITY) @link(url: "https://specs.apollo.dev/cost/v0.1", import: ["@listSize"]) @link(url: "http://specs.example.org/custom/v0.1", import: ["@custom1", "@custom2", {name: "@originalName", as: "@custom3"}]) @link(url: "http://bugfix/weird/v1.0", import: ["@weird"]) @link(url: "https://specs.apollo.dev/context/v0.1", for: SECURITY) { + query: Query +} + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on ENUM | INPUT_OBJECT | INTERFACE | OBJECT | SCALAR | UNION + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, overrideLabel: String, usedOverridden: Boolean, contextArguments: [join__ContextArgument!]) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on INTERFACE | OBJECT + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__directive(graphs: [join__Graph!], name: String!, args: join__DirectiveArguments!) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + +directive @inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + +directive @tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + +directive @authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @requiresScopes(scopes: [[requiresScopes__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @policy(policies: [[policy__Policy!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + +directive @custom1 on OBJECT | FIELD_DEFINITION + +directive @custom2 on OBJECT | FIELD_DEFINITION + +directive @custom3 on OBJECT | FIELD_DEFINITION + +directive @weird on FIELD | FIELD_DEFINITION + +directive @context(name: String!) repeatable on INTERFACE | OBJECT | UNION + +enum link__Purpose { + """ + SECURITY features provide metadata necessary to securely resolve fields. + """ + SECURITY + """EXECUTION features provide metadata necessary for operation execution.""" + EXECUTION +} + +scalar link__Import + +scalar join__FieldSet + +scalar join__FieldValue + +input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! +} + +scalar join__DirectiveArguments + +enum join__Graph { + ONE @join__graph(name: "one", url: "none") + TWO @join__graph(name: "two", url: "none") +} + +type Query @join__type(graph: ONE) @join__type(graph: TWO) { + tagged: String @join__field(graph: ONE, type: "String") @tag(name: "tag") + hidden: String @join__field(graph: ONE, type: "String") @inaccessible + custom: T @join__field(graph: ONE, type: "T") @custom1 + authenticated: String @join__field(graph: ONE, type: "String") @authenticated + requiresScopes: String @join__field(graph: ONE, type: "String") @requiresScopes(scopes: ["scope"]) + policy: String @join__field(graph: ONE, type: "String") @policy(policies: [["admin"]]) + overridden: String @join__field(graph: ONE, override: "two", overrideLabel: "label", type: "String") @join__field(graph: TWO, type: "String") + weird: [String] @join__field(graph: ONE, type: "[String]") @listSize(assumedSize: 99) @weird + customAgain: String @join__field(graph: TWO, type: "String") @custom1 + z: Z @join__field(graph: TWO, type: "Z") +} + +type T @join__type(graph: ONE) @custom2 { + field: String @join__field(graph: ONE, type: "String") @cost(weight: 5) @custom3 +} + +type X @join__type(graph: TWO, key: "id") { + id: ID! @join__field(graph: TWO, type: "ID!") + w: String @join__field(graph: TWO, type: "String", contextArguments: [{context: "two__ctx", name: "z", type: "String", selection: " { y }"}]) +} + +type Z @join__type(graph: TWO, key: "id") @context(name: "two__ctx") { + id: ID! @join__field(graph: TWO, type: "ID!") + y: String @join__field(graph: TWO, type: "String") + x: X @join__field(graph: TWO, type: "X") +} + +scalar context__ContextFieldValue + +scalar policy__Policy + +scalar requiresScopes__Scope diff --git a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@interface-object.graphql.snap b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@interface-object.graphql.snap index b49592d57b..e9eee9dbcf 100644 --- a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@interface-object.graphql.snap +++ b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@interface-object.graphql.snap @@ -3,7 +3,7 @@ source: apollo-federation/src/sources/connect/expand/tests/mod.rs expression: raw_sdl input_file: apollo-federation/src/sources/connect/expand/tests/schemas/expand/interface-object.graphql --- -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1"}) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1"}) @link(url: "https://specs.apollo.dev/connect/v0.1", for: EXECUTION) { query: Query } diff --git a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@keys.graphql.snap b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@keys.graphql.snap index 1ea89952c8..7aa4e812e2 100644 --- a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@keys.graphql.snap +++ b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@keys.graphql.snap @@ -3,7 +3,7 @@ source: apollo-federation/src/sources/connect/expand/tests/mod.rs expression: raw_sdl input_file: apollo-federation/src/sources/connect/expand/tests/schemas/expand/keys.graphql --- -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1"}) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1"}) @link(url: "https://specs.apollo.dev/connect/v0.1", for: EXECUTION) { query: Query } diff --git a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@nested_inputs.graphql.snap b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@nested_inputs.graphql.snap index c03667e667..6ce83a84d5 100644 --- a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@nested_inputs.graphql.snap +++ b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@nested_inputs.graphql.snap @@ -3,7 +3,7 @@ source: apollo-federation/src/sources/connect/expand/tests/mod.rs expression: raw_sdl input_file: apollo-federation/src/sources/connect/expand/tests/schemas/expand/nested_inputs.graphql --- -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1"}) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1"}) @link(url: "https://specs.apollo.dev/connect/v0.1", for: EXECUTION) { query: Query } diff --git a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@normalize_names.graphql.snap b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@normalize_names.graphql.snap index 6f67561d09..5c6c4d6e00 100644 --- a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@normalize_names.graphql.snap +++ b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@normalize_names.graphql.snap @@ -3,7 +3,7 @@ source: apollo-federation/src/sources/connect/expand/tests/mod.rs expression: raw_sdl input_file: apollo-federation/src/sources/connect/expand/tests/schemas/expand/normalize_names.graphql --- -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1"}) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1"}) @link(url: "https://specs.apollo.dev/connect/v0.1", for: EXECUTION) { query: Query } diff --git a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@realistic.graphql.snap b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@realistic.graphql.snap index 2778ebc804..a74fa63cbd 100644 --- a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@realistic.graphql.snap +++ b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@realistic.graphql.snap @@ -3,7 +3,7 @@ source: apollo-federation/src/sources/connect/expand/tests/mod.rs expression: raw_sdl input_file: apollo-federation/src/sources/connect/expand/tests/schemas/expand/realistic.graphql --- -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1"}) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1"}) @link(url: "https://specs.apollo.dev/connect/v0.1", for: EXECUTION) { query: Query mutation: Mutation } diff --git a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@sibling_fields.graphql.snap b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@sibling_fields.graphql.snap index 11c39fbd07..8359bfc509 100644 --- a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@sibling_fields.graphql.snap +++ b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@sibling_fields.graphql.snap @@ -3,7 +3,7 @@ source: apollo-federation/src/sources/connect/expand/tests/mod.rs expression: raw_sdl input_file: apollo-federation/src/sources/connect/expand/tests/schemas/expand/sibling_fields.graphql --- -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1"}) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1"}) @link(url: "https://specs.apollo.dev/connect/v0.1", for: EXECUTION) { query: Query } diff --git a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@simple.graphql.snap b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@simple.graphql.snap index 62773c2e30..86b115729a 100644 --- a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@simple.graphql.snap +++ b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@simple.graphql.snap @@ -3,7 +3,7 @@ source: apollo-federation/src/sources/connect/expand/tests/mod.rs expression: raw_sdl input_file: apollo-federation/src/sources/connect/expand/tests/schemas/expand/simple.graphql --- -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1"}) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1"}) @link(url: "https://specs.apollo.dev/connect/v0.1", for: EXECUTION) { query: Query } diff --git a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@steelthread.graphql.snap b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@steelthread.graphql.snap index 3cb726816e..4e974cf228 100644 --- a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@steelthread.graphql.snap +++ b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@steelthread.graphql.snap @@ -3,7 +3,7 @@ source: apollo-federation/src/sources/connect/expand/tests/mod.rs expression: raw_sdl input_file: apollo-federation/src/sources/connect/expand/tests/schemas/expand/steelthread.graphql --- -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1"}) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1"}) @link(url: "https://specs.apollo.dev/connect/v0.1", for: EXECUTION) { query: Query } @@ -69,3 +69,5 @@ type Query @join__type(graph: CONNECTORS_QUERY_USER_0) @join__type(graph: CONNEC users: [User] @join__field(graph: CONNECTORS_QUERY_USERS_0, type: "[User]") _: ID @inaccessible @join__field(graph: CONNECTORS_USER_D_1, type: "ID") } + +scalar JSON @join__type(graph: CONNECTORS_QUERY_USERS_0) @join__type(graph: CONNECTORS_QUERY_USER_0) @join__type(graph: CONNECTORS_USER_D_1) diff --git a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@types_used_twice.graphql.snap b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@types_used_twice.graphql.snap index 761ad8c362..bfcf3ef030 100644 --- a/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@types_used_twice.graphql.snap +++ b/apollo-federation/src/sources/connect/expand/tests/snapshots/supergraph@types_used_twice.graphql.snap @@ -3,7 +3,7 @@ source: apollo-federation/src/sources/connect/expand/tests/mod.rs expression: raw_sdl input_file: apollo-federation/src/sources/connect/expand/tests/schemas/expand/types_used_twice.graphql --- -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1"}) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) @join__directive(graphs: [], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1"}) @link(url: "https://specs.apollo.dev/connect/v0.1", for: EXECUTION) { query: Query } diff --git a/apollo-federation/src/sources/connect/expand/visitors/input.rs b/apollo-federation/src/sources/connect/expand/visitors/input.rs index 541307877a..abbf38e961 100644 --- a/apollo-federation/src/sources/connect/expand/visitors/input.rs +++ b/apollo-federation/src/sources/connect/expand/visitors/input.rs @@ -23,9 +23,9 @@ impl FieldVisitor type Error = FederationError; fn visit<'a>(&mut self, field: InputObjectFieldDefinitionPosition) -> Result<(), Self::Error> { - let (_, r#type) = self.type_stack.last_mut().ok_or(FederationError::internal( - "tried to visit a field in a group not yet visited", - ))?; + let (_, r#type) = self.type_stack.last_mut().ok_or_else(|| { + FederationError::internal("tried to visit a field in a group not yet visited") + })?; // Extract the node info let field_def = field.get(self.original_schema.schema())?; @@ -122,9 +122,10 @@ impl GroupVisitor Result<(), FederationError> { - let (definition, r#type) = self.type_stack.pop().ok_or(FederationError::internal( - "tried to exit a group not yet visited", - ))?; + let (definition, r#type) = self + .type_stack + .pop() + .ok_or_else(|| FederationError::internal("tried to exit a group not yet visited"))?; // Now actually consolidate the object into our schema try_insert!(self.to_schema, definition, Node::new(r#type)) diff --git a/apollo-federation/src/sources/connect/expand/visitors/mod.rs b/apollo-federation/src/sources/connect/expand/visitors/mod.rs index ac211b99ae..5ff0ef53bb 100644 --- a/apollo-federation/src/sources/connect/expand/visitors/mod.rs +++ b/apollo-federation/src/sources/connect/expand/visitors/mod.rs @@ -178,7 +178,7 @@ impl<'a, Group, GroupType> SchemaVisitor<'a, Group, GroupType> { original_schema: &'a ValidFederationSchema, to_schema: &'a mut FederationSchema, directive_deny_list: &'a IndexSet, - ) -> SchemaVisitor<'a, Group, GroupType> { + ) -> Self { SchemaVisitor { directive_deny_list, original_schema, diff --git a/apollo-federation/src/sources/connect/expand/visitors/selection.rs b/apollo-federation/src/sources/connect/expand/visitors/selection.rs index fd0e82a102..e5b15e7fc5 100644 --- a/apollo-federation/src/sources/connect/expand/visitors/selection.rs +++ b/apollo-federation/src/sources/connect/expand/visitors/selection.rs @@ -33,9 +33,9 @@ impl FieldVisitor for SchemaVisitor<'_, ObjectTypeDefinitionPosi type Error = FederationError; fn visit<'a>(&mut self, field: NamedSelection) -> Result<(), Self::Error> { - let (definition, r#type) = self.type_stack.last_mut().ok_or(FederationError::internal( - "tried to visit a field in a group not yet entered", - ))?; + let (definition, r#type) = self.type_stack.last_mut().ok_or_else(|| { + FederationError::internal("tried to visit a field in a group not yet entered") + })?; // Get the type of the field so we know how to visit it for field_name in field.names() { @@ -138,9 +138,9 @@ impl GroupVisitor &self, field: &NamedSelection, ) -> Result, FederationError> { - let (definition, _) = self.type_stack.last().ok_or(FederationError::internal( - "tried to get fields on a group not yet visited", - ))?; + let (definition, _) = self.type_stack.last().ok_or_else(|| { + FederationError::internal("tried to get fields on a group not yet visited") + })?; match field.names().first() { Some(field_name) => { @@ -187,9 +187,10 @@ impl GroupVisitor } fn exit_group(&mut self) -> Result<(), FederationError> { - let (definition, r#type) = self.type_stack.pop().ok_or(FederationError::internal( - "tried to exit a group not yet entered", - ))?; + let (definition, r#type) = self + .type_stack + .pop() + .ok_or_else(|| FederationError::internal("tried to exit a group not yet entered"))?; try_insert!(self.to_schema, definition, Node::new(r#type)) } diff --git a/apollo-federation/src/sources/connect/id.rs b/apollo-federation/src/sources/connect/id.rs index e761c95ada..572bedc4c9 100644 --- a/apollo-federation/src/sources/connect/id.rs +++ b/apollo-federation/src/sources/connect/id.rs @@ -47,9 +47,9 @@ impl ConnectorPosition { None } }) - .ok_or(FederationError::internal( - "Parent type for connector not found", - ))?, + .ok_or_else(|| { + FederationError::internal("Parent type for connector not found") + })?, field_def: pos.field.get(schema).map_err(|_| { FederationError::internal("Field definition for connector not found") })?, @@ -72,7 +72,7 @@ impl ConnectorPosition { None } }) - .ok_or(FederationError::internal("Type for connector not found"))?, + .ok_or_else(|| FederationError::internal("Type for connector not found"))?, }), } } diff --git a/apollo-federation/src/sources/connect/json_selection/apply_to.rs b/apollo-federation/src/sources/connect/json_selection/apply_to.rs index 08710a0da4..0f1d16de78 100644 --- a/apollo-federation/src/sources/connect/json_selection/apply_to.rs +++ b/apollo-federation/src/sources/connect/json_selection/apply_to.rs @@ -95,13 +95,13 @@ impl JSONSelection { match self { Self::Named(selection) => selection.compute_output_shape( input_shape.clone(), - input_shape.clone(), + input_shape, named_var_shapes, source_id, ), Self::Path(path_selection) => path_selection.compute_output_shape( input_shape.clone(), - input_shape.clone(), + input_shape, named_var_shapes, source_id, ), @@ -173,7 +173,7 @@ pub struct ApplyToError { } impl ApplyToError { - pub(crate) fn new(message: String, path: Vec, range: OffsetRange) -> Self { + pub(crate) const fn new(message: String, path: Vec, range: OffsetRange) -> Self { Self { message, path, @@ -352,7 +352,7 @@ impl ApplyToInternal for NamedSelection { } else if *inline { match value_opt { Some(JSON::Object(map)) => { - output = Some(JSON::Object(map.clone())); + output = Some(JSON::Object(map)); } Some(JSON::Null) => { output = Some(JSON::Null); @@ -490,7 +490,7 @@ impl ApplyToInternal for PathSelection { // *and* dollar_shape to self.path.compute_output_shape. self.path.compute_output_shape( dollar_shape.clone(), - dollar_shape.clone(), + dollar_shape, named_var_shapes, source_id, ) @@ -575,11 +575,8 @@ impl ApplyToInternal for WithRange { )], ); } - - if let Some(child) = data.get(key.as_str()) { - tail.apply_to_path(child, vars, &input_path_with_key) - } else { - ( + let Some(child) = data.get(key.as_str()) else { + return ( None, vec![ApplyToError::new( format!( @@ -590,8 +587,9 @@ impl ApplyToInternal for WithRange { input_path_with_key.to_vec(), key.range(), )], - ) - } + ); + }; + tail.apply_to_path(child, vars, &input_path_with_key) } } PathList::Expr(expr, tail) => expr @@ -601,31 +599,39 @@ impl ApplyToInternal for WithRange { let method_path = input_path.append(JSON::String(format!("->{}", method_name.as_ref()).into())); - if let Some(method) = ArrowMethod::lookup(method_name) { - let (result_opt, errors) = - method.apply(method_name, method_args.as_ref(), data, vars, &method_path); + ArrowMethod::lookup(method_name).map_or_else( + || { + ( + None, + vec![ApplyToError::new( + format!("Method ->{} not found", method_name.as_ref()), + method_path.to_vec(), + method_name.range(), + )], + ) + }, + |method| { + let (result_opt, errors) = method.apply( + method_name, + method_args.as_ref(), + data, + vars, + &method_path, + ); - if let Some(result) = result_opt { - tail.apply_to_path(&result, vars, &method_path) - .prepend_errors(errors) - } else { - // If the method produced no output, assume the errors - // explain the None. Methods can legitimately produce - // None without errors (like ->first or ->last on an - // empty array), so we do not report any blanket error - // here when errors.is_empty(). - (None, errors) - } - } else { - ( - None, - vec![ApplyToError::new( - format!("Method ->{} not found", method_name.as_ref()), - method_path.to_vec(), - method_name.range(), - )], - ) - } + if let Some(result) = result_opt { + tail.apply_to_path(&result, vars, &method_path) + .prepend_errors(errors) + } else { + // If the method produced no output, assume the errors + // explain the None. Methods can legitimately produce + // None without errors (like ->first or ->last on an + // empty array), so we do not report any blanket error + // here when errors.is_empty(). + (None, errors) + } + }, + ) } PathList::Selection(selection) => selection.apply_to_path(data, vars, input_path), PathList::Empty => { @@ -696,7 +702,7 @@ impl ApplyToInternal for WithRange { } else { rest.compute_output_shape( field(tail, key, source_id), - dollar_shape.clone(), + dollar_shape, named_var_shapes, source_id, ) @@ -706,7 +712,7 @@ impl ApplyToInternal for WithRange { } else { rest.compute_output_shape( field(&input_shape, key, source_id), - dollar_shape.clone(), + dollar_shape, named_var_shapes, source_id, ) @@ -720,21 +726,21 @@ impl ApplyToInternal for WithRange { named_var_shapes, source_id, ), - dollar_shape.clone(), + dollar_shape, named_var_shapes, source_id, ), - PathList::Method(method_name, _method_args, _tail) => { - if let Some(_method) = ArrowMethod::lookup(method_name) { - // TODO: call method.shape here to re-enable method type-checking - // call for each inner type of a One - Shape::unknown(method_name.shape_location(source_id)) - } else { - let message = format!("Method ->{} not found", method_name.as_str()); - Shape::error(message.as_str(), method_name.shape_location(source_id)) - } - } + PathList::Method(method_name, _method_args, _tail) => ArrowMethod::lookup(method_name) + .map_or_else( + || { + Shape::error( + format!("Method ->{} not found", method_name.as_str()), + method_name.shape_location(source_id), + ) + }, + |_method| Shape::unknown(method_name.shape_location(source_id)), + ), PathList::Selection(selection) => selection.compute_output_shape( input_shape, @@ -939,6 +945,7 @@ impl ApplyToInternal for SubSelection { // The SubSelection rebinds the $ variable to the selected input object, // so we can ignore _previous_dollar_shape. + #[expect(clippy::redundant_clone)] let dollar_shape = input_shape.clone(); // Build up the merged object shape using Shape::all to merge the @@ -1630,7 +1637,7 @@ mod tests { assert_eq!( selection!("nested.path { id: $args.id name }").apply_to(&json!({ "nested": { - "path": data.clone(), + "path": data, }, })), ( @@ -1953,7 +1960,7 @@ mod tests { "# ) .apply_to(&data), - expected.clone(), + expected, ); assert_eq!( @@ -1969,7 +1976,7 @@ mod tests { "# ) .apply_to(&data), - expected.clone(), + expected, ); assert_eq!( @@ -1985,7 +1992,7 @@ mod tests { "# ) .apply_to(&data), - expected.clone(), + expected, ); } @@ -2012,7 +2019,7 @@ mod tests { "# ) .apply_to(&data), - expected.clone(), + expected, ); assert_eq!( @@ -2028,7 +2035,7 @@ mod tests { "# ) .apply_to(&data), - expected.clone(), + expected, ); assert_eq!( @@ -2044,7 +2051,7 @@ mod tests { "# ) .apply_to(&data), - expected.clone(), + expected, ); } @@ -2073,7 +2080,7 @@ mod tests { "# ) .apply_to(&data), - expected.clone(), + expected, ); assert_eq!( @@ -2092,7 +2099,7 @@ mod tests { "# ) .apply_to(&data), - expected.clone(), + expected, ); assert_eq!( @@ -2109,7 +2116,7 @@ mod tests { "# ) .apply_to(&data), - expected.clone(), + expected, ); assert_eq!( @@ -2128,7 +2135,7 @@ mod tests { "# ) .apply_to(&data), - expected.clone(), + expected, ); assert_eq!( @@ -2145,7 +2152,7 @@ mod tests { "# ) .apply_to(&data), - expected.clone(), + expected, ); } @@ -2197,7 +2204,7 @@ mod tests { "# ) .apply_with_vars(&data, &vars), - expected.clone(), + expected, ); assert_eq!( @@ -2209,7 +2216,7 @@ mod tests { "# ) .apply_with_vars(&data, &vars), - expected.clone(), + expected, ); assert_eq!( @@ -2221,7 +2228,7 @@ mod tests { "# ) .apply_with_vars(&data, &vars), - expected.clone(), + expected, ); assert_eq!( @@ -2233,7 +2240,7 @@ mod tests { "# ) .apply_with_vars(&data, &vars), - expected.clone(), + expected, ); assert_eq!( @@ -2354,7 +2361,7 @@ mod tests { "# ) .apply_to(&data), - expected.clone(), + expected, ); } diff --git a/apollo-federation/src/sources/connect/json_selection/helpers.rs b/apollo-federation/src/sources/connect/json_selection/helpers.rs index 0aeb70b3e3..90e18ae8f6 100644 --- a/apollo-federation/src/sources/connect/json_selection/helpers.rs +++ b/apollo-federation/src/sources/connect/json_selection/helpers.rs @@ -67,7 +67,7 @@ pub(crate) fn span_is_all_spaces_or_comments(input: Span) -> bool { } } -pub(crate) fn json_type_name(v: &JSON) -> &str { +pub(crate) const fn json_type_name(v: &JSON) -> &str { match v { JSON::Array(_) => "array", JSON::Object(_) => "object", @@ -128,7 +128,7 @@ pub(crate) fn json_merge(a: Option<&JSON>, b: Option<&JSON>) -> (Option, V ( Some(b.clone()), if json_type_of_a == json_type_of_b { - vec![] + Vec::new() } else { vec![format!( "Lossy merge replacing {} with {}", diff --git a/apollo-federation/src/sources/connect/json_selection/immutable.rs b/apollo-federation/src/sources/connect/json_selection/immutable.rs index c1e9acbb40..62fe47007e 100644 --- a/apollo-federation/src/sources/connect/json_selection/immutable.rs +++ b/apollo-federation/src/sources/connect/json_selection/immutable.rs @@ -15,8 +15,8 @@ struct AppendPath { } impl InputPath { - pub(crate) fn empty() -> InputPath { - InputPath { path: None } + pub(crate) const fn empty() -> Self { + Self { path: None } } pub(crate) fn append(&self, last: T) -> Self { diff --git a/apollo-federation/src/sources/connect/json_selection/lit_expr.rs b/apollo-federation/src/sources/connect/json_selection/lit_expr.rs index bcfcc8e913..3cd27e26fc 100644 --- a/apollo-federation/src/sources/connect/json_selection/lit_expr.rs +++ b/apollo-federation/src/sources/connect/json_selection/lit_expr.rs @@ -158,17 +158,26 @@ impl LitExpr { } number.push_str(num.as_str()); - if let Ok(lit_number) = number.parse().map(Self::Number) { - let range = merge_ranges(neg.and_then(|n| n.range()), num.range()); - Ok((suffix, WithRange::new(lit_number, range))) - } else { - Err(nom_error_message( - input, - // We could include the faulty number in the error message, but - // it will also appear at the beginning of the input span. - "Failed to parse numeric literal", - )) - } + number.parse().map(Self::Number).map_or_else( + |_| { + // CONSIDER USING THIS ERROR? now that we have access to them? + Err(nom_error_message( + input, + // We could include the faulty number in the error message, but + // it will also appear at the beginning of the input span. + "Failed to parse numeric literal", + )) + }, + |lit_number| { + Ok(( + suffix, + WithRange::new( + lit_number, + merge_ranges(neg.and_then(|n| n.range()), num.range()), + ), + )) + }, + ) } // LitObject ::= "{" (LitProperty ("," LitProperty)* ","?)? "}" @@ -487,7 +496,7 @@ mod tests { }); check_parse("a.b.c", expected.clone()); - check_parse(" a . b . c ", expected.clone()); + check_parse(" a . b . c ", expected); } { @@ -503,7 +512,7 @@ mod tests { .into_with_range(), }); check_parse("$.data", expected.clone()); - check_parse(" $ . data ", expected.clone()); + check_parse(" $ . data ", expected); } { @@ -568,7 +577,7 @@ mod tests { b . c , d . e . f , ]"#, - expected.clone(), + expected, ); } @@ -629,7 +638,7 @@ mod tests { a : $args . a , b : $this . b ,} "#, - expected.clone(), + expected, ); } } diff --git a/apollo-federation/src/sources/connect/json_selection/location.rs b/apollo-federation/src/sources/connect/json_selection/location.rs index f9f68d2fcf..94f7579d80 100644 --- a/apollo-federation/src/sources/connect/json_selection/location.rs +++ b/apollo-federation/src/sources/connect/json_selection/location.rs @@ -34,7 +34,7 @@ pub(crate) trait Ranged { fn range(&self) -> OffsetRange; fn shape_location(&self, source_id: &SourceId) -> Option { - self.range().map(|range| source_id.location(range.clone())) + self.range().map(|range| source_id.location(range)) } } diff --git a/apollo-federation/src/sources/connect/json_selection/methods.rs b/apollo-federation/src/sources/connect/json_selection/methods.rs index b5cd00eae8..7a718a3cb8 100644 --- a/apollo-federation/src/sources/connect/json_selection/methods.rs +++ b/apollo-federation/src/sources/connect/json_selection/methods.rs @@ -35,6 +35,7 @@ pub(super) enum ArrowMethod { Size, Entries, JsonStringify, + JoinNotNull, // Future methods: TypeOf, @@ -147,6 +148,7 @@ impl std::ops::Deref for ArrowMethod { Self::Size => &public::SizeMethod, Self::Entries => &public::EntriesMethod, Self::JsonStringify => &public::JsonStringifyMethod, + Self::JoinNotNull => &public::JoinNotNullMethod, // Future methods: Self::TypeOf => &future::TypeOfMethod, @@ -201,6 +203,7 @@ impl ArrowMethod { "or" => Some(Self::Or), "and" => Some(Self::And), "jsonStringify" => Some(Self::JsonStringify), + "joinNotNull" => Some(Self::JoinNotNull), _ => None, }; @@ -210,7 +213,7 @@ impl ArrowMethod { } } - pub(super) fn is_public(&self) -> bool { + pub(super) const fn is_public(&self) -> bool { // This set controls which ->methods are exposed for use in connector // schemas. Non-public methods are still implemented and tested, but // will not be returned from lookup_arrow_method outside of tests. @@ -225,6 +228,7 @@ impl ArrowMethod { | Self::Size | Self::Entries | Self::JsonStringify + | Self::JoinNotNull ) } } diff --git a/apollo-federation/src/sources/connect/json_selection/methods/future/and.rs b/apollo-federation/src/sources/connect/json_selection/methods/future/and.rs index 78d0fb2915..78bd596900 100644 --- a/apollo-federation/src/sources/connect/json_selection/methods/future/and.rs +++ b/apollo-federation/src/sources/connect/json_selection/methods/future/and.rs @@ -39,7 +39,7 @@ fn and_method( } let (value_opt, arg_errors) = arg.apply_to_path(data, vars, input_path); errors.extend(arg_errors); - result = value_opt.map(|value| is_truthy(&value)).unwrap_or(false); + result = value_opt.is_some_and(|value| is_truthy(&value)); } (Some(JSON::Bool(result)), errors) diff --git a/apollo-federation/src/sources/connect/json_selection/methods/future/eq.rs b/apollo-federation/src/sources/connect/json_selection/methods/future/eq.rs index 9a06fd6d38..b24cee443b 100644 --- a/apollo-federation/src/sources/connect/json_selection/methods/future/eq.rs +++ b/apollo-federation/src/sources/connect/json_selection/methods/future/eq.rs @@ -28,11 +28,8 @@ fn eq_method( if let Some(MethodArgs { args, .. }) = method_args { if args.len() == 1 { let (value_opt, arg_errors) = args[0].apply_to_path(data, vars, input_path); - let matches = if let Some(value) = value_opt { - data == &value - } else { - false - }; + let matches = value_opt.is_some_and(|value| &value == data); + return (Some(JSON::Bool(matches)), arg_errors); } } diff --git a/apollo-federation/src/sources/connect/json_selection/methods/future/get.rs b/apollo-federation/src/sources/connect/json_selection/methods/future/get.rs index 167329d188..3abbc21591 100644 --- a/apollo-federation/src/sources/connect/json_selection/methods/future/get.rs +++ b/apollo-federation/src/sources/connect/json_selection/methods/future/get.rs @@ -34,187 +34,177 @@ fn get_method( vars: &VarsWithPathsMap, input_path: &InputPath, ) -> (Option, Vec) { - if let Some(MethodArgs { args, .. }) = method_args { - if let Some(index_literal) = args.first() { - match index_literal.apply_to_path(data, vars, input_path) { - (Some(JSON::Number(n)), index_errors) => match (data, n.as_i64()) { - (JSON::Array(array), Some(i)) => { - // Negative indices count from the end of the array - if let Some(element) = array.get(if i < 0 { - (array.len() as i64 + i) as usize - } else { - i as usize - }) { - (Some(element.clone()), index_errors) - } else { - ( - None, - vec_push( - index_errors, - ApplyToError::new( - format!( - "Method ->{}({}) index out of bounds", - method_name.as_ref(), - i, - ), - input_path.to_vec(), - index_literal.range(), - ), - ), - ) - } - } - - (JSON::String(s), Some(i)) => { - let s_str = s.as_str(); - let ilen = s_str.len() as i64; - // Negative indices count from the end of the array - let index = if i < 0 { ilen + i } else { i }; - if index >= 0 && index < ilen { - let uindex = index as usize; - let single_char_string = s_str[uindex..uindex + 1].to_string(); - (Some(JSON::String(single_char_string.into())), index_errors) - } else { - ( - None, - vec_push( - index_errors, - ApplyToError::new( - format!( - "Method ->{}({}) index out of bounds", - method_name.as_ref(), - i, - ), - input_path.to_vec(), - index_literal.range(), - ), - ), - ) - } - } + let Some(index_literal) = method_args.and_then(|MethodArgs { args, .. }| args.first()) else { + return ( + None, + vec![ApplyToError::new( + format!("Method ->{} requires an argument", method_name.as_ref()), + input_path.to_vec(), + method_name.range(), + )], + ); + }; - (_, None) => ( + match index_literal.apply_to_path(data, vars, input_path) { + (Some(JSON::Number(n)), index_errors) => match (data, n.as_i64()) { + (JSON::Array(array), Some(i)) => { + // Negative indices count from the end of the array + if let Some(element) = array.get(if i < 0 { + (array.len() as i64 + i) as usize + } else { + i as usize + }) { + (Some(element.clone()), index_errors) + } else { + ( None, vec_push( index_errors, ApplyToError::new( format!( - "Method ->{} requires an integer index", - method_name.as_ref() + "Method ->{}({}) index out of bounds", + method_name.as_ref(), + i, ), input_path.to_vec(), index_literal.range(), ), ), - ), - _ => ( + ) + } + } + + (JSON::String(s), Some(i)) => { + let s_str = s.as_str(); + let ilen = s_str.len() as i64; + // Negative indices count from the end of the array + let index = if i < 0 { ilen + i } else { i }; + if index >= 0 && index < ilen { + let uindex = index as usize; + let single_char_string = s_str[uindex..uindex + 1].to_string(); + (Some(JSON::String(single_char_string.into())), index_errors) + } else { + ( None, vec_push( index_errors, ApplyToError::new( format!( - "Method ->{} requires an array or string input, not {}", + "Method ->{}({}) index out of bounds", method_name.as_ref(), - json_type_name(data), + i, ), input_path.to_vec(), - method_name.range(), + index_literal.range(), ), ), + ) + } + } + + (_, None) => ( + None, + vec_push( + index_errors, + ApplyToError::new( + format!( + "Method ->{} requires an integer index", + method_name.as_ref() + ), + input_path.to_vec(), + index_literal.range(), ), - }, - (Some(ref key @ JSON::String(ref s)), index_errors) => match data { - JSON::Object(map) => { - if let Some(value) = map.get(s.as_str()) { - (Some(value.clone()), index_errors) - } else { - ( - None, - vec_push( - index_errors, - ApplyToError::new( - format!( - "Method ->{}({}) object key not found", - method_name.as_ref(), - key - ), - input_path.to_vec(), - index_literal.range(), - ), - ), - ) - } - } - _ => ( + ), + ), + _ => ( + None, + vec_push( + index_errors, + ApplyToError::new( + format!( + "Method ->{} requires an array or string input, not {}", + method_name.as_ref(), + json_type_name(data), + ), + input_path.to_vec(), + method_name.range(), + ), + ), + ), + }, + (Some(ref key @ JSON::String(ref s)), index_errors) => match data { + JSON::Object(map) => { + if let Some(value) = map.get(s.as_str()) { + (Some(value.clone()), index_errors) + } else { + ( None, vec_push( index_errors, ApplyToError::new( format!( - "Method ->{}({}) requires an object input", + "Method ->{}({}) object key not found", method_name.as_ref(), key ), input_path.to_vec(), - merge_ranges( - method_name.range(), - method_args.and_then(|args| args.range()), - ), + index_literal.range(), ), ), - ), - }, - (Some(value), index_errors) => ( - None, - vec_push( - index_errors, - ApplyToError::new( - format!( - "Method ->{}({}) requires an integer or string argument", - method_name.as_ref(), - value, - ), - input_path.to_vec(), - index_literal.range(), + ) + } + } + _ => ( + None, + vec_push( + index_errors, + ApplyToError::new( + format!( + "Method ->{}({}) requires an object input", + method_name.as_ref(), + key ), - ), - ), - (None, index_errors) => ( - None, - vec_push( - index_errors, - ApplyToError::new( - format!( - "Method ->{} received undefined argument", - method_name.as_ref() - ), - input_path.to_vec(), - index_literal.range(), + input_path.to_vec(), + merge_ranges( + method_name.range(), + method_args.and_then(|args| args.range()), ), ), ), - } - } else { - ( - None, - vec![ApplyToError::new( - format!("Method ->{} requires an argument", method_name.as_ref()), + ), + }, + (Some(value), index_errors) => ( + None, + vec_push( + index_errors, + ApplyToError::new( + format!( + "Method ->{}({}) requires an integer or string argument", + method_name.as_ref(), + value, + ), input_path.to_vec(), - method_name.range(), - )], - ) - } - } else { - ( + index_literal.range(), + ), + ), + ), + (None, index_errors) => ( None, - vec![ApplyToError::new( - format!("Method ->{} requires an argument", method_name.as_ref()), - input_path.to_vec(), - method_name.range(), - )], - ) + vec_push( + index_errors, + ApplyToError::new( + format!( + "Method ->{} received undefined argument", + method_name.as_ref() + ), + input_path.to_vec(), + index_literal.range(), + ), + ), + ), } } + #[allow(dead_code)] // method type-checking disabled until we add name resolution fn get_shape( method_name: &WithRange, @@ -228,7 +218,7 @@ fn get_shape( if let Some(index_literal) = args.first() { let index_shape = index_literal.compute_output_shape( input_shape.clone(), - dollar_shape.clone(), + dollar_shape, named_var_shapes, source_id, ); @@ -282,15 +272,17 @@ fn get_shape( } ShapeCase::String(Some(s)) => { - if let Some(index) = value_opt { - let index = *index as usize; - if index < s.len() { - Shape::string_value(&s[index..index + 1], empty()) - } else { - Shape::none() - } + let Some(index) = value_opt else { + return Shape::one( + [Shape::string(empty()), Shape::none()], + empty(), + ); + }; + let index = *index as usize; + if index < s.len() { + Shape::string_value(&s[index..index + 1], empty()) } else { - Shape::one([Shape::string(empty()), Shape::none()], empty()) + Shape::none() } } ShapeCase::String(None) => { @@ -471,13 +463,13 @@ mod tests { ); assert_eq!( selection!("$->get($->echo(-5)->mul(2))").apply_to(&json!("oyez")), - expected.clone(), + expected, ); assert_eq!( // The extra spaces here should not affect the error.range, as long // as we don't accidentally capture trailing spaces in the range. selection!("$->get($->echo(-5)->mul(2) )").apply_to(&json!("oyez")), - expected.clone(), + expected, ); } diff --git a/apollo-federation/src/sources/connect/json_selection/methods/future/has.rs b/apollo-federation/src/sources/connect/json_selection/methods/future/has.rs index 42d3fad131..583f067337 100644 --- a/apollo-federation/src/sources/connect/json_selection/methods/future/has.rs +++ b/apollo-federation/src/sources/connect/json_selection/methods/future/has.rs @@ -21,58 +21,46 @@ fn has_method( vars: &VarsWithPathsMap, input_path: &InputPath, ) -> (Option, Vec) { - if let Some(MethodArgs { args, .. }) = method_args { - match args.first() { - Some(arg) => match arg.apply_to_path(data, vars, input_path) { - (Some(JSON::Number(ref n)), arg_errors) => { - match (data, n.as_i64()) { - (JSON::Array(array), Some(index)) => { - let ilen = array.len() as i64; - // Negative indices count from the end of the array - let index = if index < 0 { ilen + index } else { index }; - (Some(JSON::Bool(index >= 0 && index < ilen)), arg_errors) - } - - (JSON::String(s), Some(index)) => { - let ilen = s.as_str().len() as i64; - // Negative indices count from the end of the array - let index = if index < 0 { ilen + index } else { index }; - (Some(JSON::Bool(index >= 0 && index < ilen)), arg_errors) - } - - _ => (Some(JSON::Bool(false)), arg_errors), - } - } - - (Some(JSON::String(ref s)), arg_errors) => match data { - JSON::Object(map) => { - (Some(JSON::Bool(map.contains_key(s.as_str()))), arg_errors) - } - _ => (Some(JSON::Bool(false)), arg_errors), - }, - - (_, arg_errors) => (Some(JSON::Bool(false)), arg_errors), - }, - None => ( - None, - vec![ApplyToError::new( - format!("Method ->{} requires an argument", method_name.as_ref()), - input_path.to_vec(), - method_name.range(), - )], - ), - } - } else { - ( + let Some(arg) = method_args.and_then(|MethodArgs { args, .. }| args.first()) else { + return ( None, vec![ApplyToError::new( format!("Method ->{} requires an argument", method_name.as_ref()), input_path.to_vec(), method_name.range(), )], - ) + ); + }; + match arg.apply_to_path(data, vars, input_path) { + (Some(JSON::Number(ref n)), arg_errors) => { + match (data, n.as_i64()) { + (JSON::Array(array), Some(index)) => { + let ilen = array.len() as i64; + // Negative indices count from the end of the array + let index = if index < 0 { ilen + index } else { index }; + (Some(JSON::Bool(index >= 0 && index < ilen)), arg_errors) + } + + (JSON::String(s), Some(index)) => { + let ilen = s.as_str().len() as i64; + // Negative indices count from the end of the array + let index = if index < 0 { ilen + index } else { index }; + (Some(JSON::Bool(index >= 0 && index < ilen)), arg_errors) + } + + _ => (Some(JSON::Bool(false)), arg_errors), + } + } + + (Some(JSON::String(ref s)), arg_errors) => match data { + JSON::Object(map) => (Some(JSON::Bool(map.contains_key(s.as_str()))), arg_errors), + _ => (Some(JSON::Bool(false)), arg_errors), + }, + + (_, arg_errors) => (Some(JSON::Bool(false)), arg_errors), } } + #[allow(dead_code)] // method type-checking disabled until we add name resolution fn has_shape( method_name: &WithRange, diff --git a/apollo-federation/src/sources/connect/json_selection/methods/future/match_if.rs b/apollo-federation/src/sources/connect/json_selection/methods/future/match_if.rs index b05558905a..4f9368c627 100644 --- a/apollo-federation/src/sources/connect/json_selection/methods/future/match_if.rs +++ b/apollo-federation/src/sources/connect/json_selection/methods/future/match_if.rs @@ -44,7 +44,7 @@ fn match_if_method( pair[0].apply_to_path(data, vars, input_path); errors.extend(condition_errors); - if let Some(JSON::Bool(true)) = condition_opt { + if condition_opt == Some(JSON::Bool(true)) { return pair[1] .apply_to_path(data, vars, input_path) .prepend_errors(errors); diff --git a/apollo-federation/src/sources/connect/json_selection/methods/future/typeof.rs b/apollo-federation/src/sources/connect/json_selection/methods/future/typeof.rs index 75a99b0a22..8d3206a90b 100644 --- a/apollo-federation/src/sources/connect/json_selection/methods/future/typeof.rs +++ b/apollo-federation/src/sources/connect/json_selection/methods/future/typeof.rs @@ -41,7 +41,7 @@ fn typeof_method( ) } else { let typeof_string = JSON::String(json_type_name(data).to_string().into()); - (Some(typeof_string), vec![]) + (Some(typeof_string), Vec::new()) } } #[allow(dead_code)] // method type-checking disabled until we add name resolution diff --git a/apollo-federation/src/sources/connect/json_selection/methods/future/values.rs b/apollo-federation/src/sources/connect/json_selection/methods/future/values.rs index f6afcaf4ba..14d745a744 100644 --- a/apollo-federation/src/sources/connect/json_selection/methods/future/values.rs +++ b/apollo-federation/src/sources/connect/json_selection/methods/future/values.rs @@ -44,7 +44,7 @@ fn values_method( match data { JSON::Object(map) => { let values = map.values().cloned().collect(); - (Some(JSON::Array(values)), vec![]) + (Some(JSON::Array(values)), Vec::new()) } _ => ( None, diff --git a/apollo-federation/src/sources/connect/json_selection/methods/public/entries.rs b/apollo-federation/src/sources/connect/json_selection/methods/public/entries.rs index ea04f7680c..778e7df0c8 100644 --- a/apollo-federation/src/sources/connect/json_selection/methods/public/entries.rs +++ b/apollo-federation/src/sources/connect/json_selection/methods/public/entries.rs @@ -62,7 +62,7 @@ fn entries_method( JSON::Object(key_value_pair) }) .collect(); - (Some(JSON::Array(entries)), vec![]) + (Some(JSON::Array(entries)), Vec::new()) } _ => ( None, diff --git a/apollo-federation/src/sources/connect/json_selection/methods/public/first.rs b/apollo-federation/src/sources/connect/json_selection/methods/public/first.rs index 6f523d9e96..36351c3a7a 100644 --- a/apollo-federation/src/sources/connect/json_selection/methods/public/first.rs +++ b/apollo-federation/src/sources/connect/json_selection/methods/public/first.rs @@ -41,14 +41,11 @@ fn first_method( } match data { - JSON::Array(array) => (array.first().cloned(), vec![]), - JSON::String(s) => { - if let Some(first) = s.as_str().chars().next() { - (Some(JSON::String(first.to_string().into())), vec![]) - } else { - (None, vec![]) - } - } + JSON::Array(array) => (array.first().cloned(), Vec::new()), + JSON::String(s) => s.as_str().chars().next().map_or_else( + || (None, Vec::new()), + |first| (Some(JSON::String(first.to_string().into())), Vec::new()), + ), _ => ( Some(data.clone()), vec![ApplyToError::new( @@ -88,15 +85,11 @@ fn first_shape( match input_shape.case() { ShapeCase::String(Some(value)) => Shape::string_value(&value[0..1], locations), ShapeCase::String(None) => Shape::string(locations), - ShapeCase::Array { prefix, tail } => { - if let Some(first) = prefix.first() { - first.clone() - } else if tail.is_none() { - Shape::none() - } else { - Shape::one([tail.clone(), Shape::none()], locations) - } - } + ShapeCase::Array { prefix, tail } => match (prefix.first(), tail) { + (Some(first), _) => first.clone(), + (_, tail) if tail.is_none() => Shape::none(), + _ => Shape::one([tail.clone(), Shape::none()], locations), + }, ShapeCase::Name(_, _) => input_shape.item(0, locations), ShapeCase::Unknown => Shape::unknown(locations), // When there is no obvious first element, ->first gives us the input diff --git a/apollo-federation/src/sources/connect/json_selection/methods/public/join_not_null.rs b/apollo-federation/src/sources/connect/json_selection/methods/public/join_not_null.rs new file mode 100644 index 0000000000..981386d575 --- /dev/null +++ b/apollo-federation/src/sources/connect/json_selection/methods/public/join_not_null.rs @@ -0,0 +1,342 @@ +use apollo_compiler::collections::IndexMap; +use serde_json_bytes::Value as JSON; +use shape::Shape; +use shape::ShapeCase; +use shape::location::SourceId; + +use crate::impl_arrow_method; +use crate::sources::connect::json_selection::ApplyToError; +use crate::sources::connect::json_selection::ApplyToInternal; +use crate::sources::connect::json_selection::MethodArgs; +use crate::sources::connect::json_selection::VarsWithPathsMap; +use crate::sources::connect::json_selection::immutable::InputPath; +use crate::sources::connect::json_selection::lit_expr::LitExpr; +use crate::sources::connect::json_selection::location::Ranged; +use crate::sources::connect::json_selection::location::WithRange; + +impl_arrow_method!( + JoinNotNullMethod, + join_not_null_method, + join_not_null_method_shape +); +/// Takes an array of scalar values and joins them into a single string using a +/// separator, skipping null values. +/// +/// This method is specifically useful when dealing with lists of entity +/// references in Federation, which can contain null. It's rare that you'll want +/// to send a `null` to an upstream service when fetching a batch of entities, +/// so this is a useful and convenient method. +/// +/// $->echo(["hello", null, "world"])->joinNotNull(", ") would result in "hello, world" +fn join_not_null_method( + method_name: &WithRange, + method_args: Option<&MethodArgs>, + data: &JSON, + _vars: &VarsWithPathsMap, + input_path: &InputPath, +) -> (Option, Vec) { + let mut warnings = vec![]; + + let Some(separator) = method_args + .and_then(|args| args.args.first()) + .and_then(|s| match &**s { + LitExpr::String(s) => Some(s), + _ => None, + }) + else { + warnings.push(ApplyToError::new( + format!( + "Method ->{} requires a string argument", + method_name.as_ref() + ), + input_path.to_vec(), + method_name.range(), + )); + return (None, warnings); + }; + + fn to_string(value: &JSON, method_name: &str) -> Result, String> { + match value { + JSON::Bool(b) => Ok(Some(b.then_some("true").unwrap_or("false").to_string())), + JSON::Number(number) => Ok(Some(number.to_string())), + JSON::String(byte_string) => Ok(Some(byte_string.as_str().to_string())), + JSON::Null => Ok(None), + JSON::Array(_) | JSON::Object(_) => Err(format!( + "Method ->{} requires an array of scalar values as input", + method_name + )), + } + } + + let joined = match data { + JSON::Array(values) => { + let mut joined = Vec::with_capacity(values.len()); + for value in values { + match to_string(value, method_name) { + Ok(Some(value)) => joined.push(value), + Ok(None) => {} + Err(err) => { + warnings.push(ApplyToError::new( + err, + input_path.to_vec(), + method_name.range(), + )); + return (None, warnings); + } + } + } + joined.join(separator.as_str()) + } + // Single values are emitted as strings with no separator + _ => match to_string(data, method_name) { + Ok(value) => value.unwrap_or_else(|| "".to_string()), + Err(err) => { + warnings.push(ApplyToError::new( + err, + input_path.to_vec(), + method_name.range(), + )); + return (None, warnings); + } + }, + }; + + (Some(JSON::String(joined.into())), warnings) +} +#[allow(dead_code)] // method type-checking disabled until we add name resolution +fn join_not_null_method_shape( + method_name: &WithRange, + method_args: Option<&MethodArgs>, + input_shape: Shape, + dollar_shape: Shape, + named_var_shapes: &IndexMap<&str, Shape>, + source_id: &SourceId, +) -> Shape { + let input_shape_contract = Shape::one( + [ + Shape::string([]), + Shape::int([]), + Shape::float([]), + Shape::bool([]), + Shape::null([]), + Shape::list( + Shape::one( + [ + Shape::string([]), + Shape::int([]), + Shape::float([]), + Shape::bool([]), + Shape::null([]), + ], + [], + ), + [], + ), + ], + [], + ); + + // allow unknown input + if !(input_shape.is_unknown() || matches!(input_shape.case(), ShapeCase::Name(_, _))) { + let mismatches = input_shape_contract.validate(&input_shape); + if !mismatches.is_empty() { + return Shape::error( + format!( + "Method ->{} requires an array of scalar values as input", + method_name.as_ref() + ), + [], + ); + } + } + + let Some(selection_shape) = method_args + .and_then(|args| args.args.first()) + .map(|s| s.compute_output_shape(input_shape, dollar_shape, named_var_shapes, source_id)) + else { + return Shape::error( + format!("Method ->{} requires one argument", method_name.as_ref()), + vec![], + ); + }; + + let method_count = method_args.map(|args| args.args.len()).unwrap_or_default(); + if method_count > 1 { + return Shape::error( + format!( + "Method ->{} requires only one argument, but {} were provided", + method_name.as_ref(), + method_count + ), + vec![], + ); + } + + // allow unknown separator + if !(selection_shape.is_unknown() || matches!(selection_shape.case(), ShapeCase::Name(_, _))) { + let mismatches = Shape::string([]).validate(&selection_shape); + if !mismatches.is_empty() { + return Shape::error( + format!( + "Method ->{} requires a string argument", + method_name.as_ref() + ), + vec![], + ); + } + } + + Shape::string(method_name.shape_location(source_id)) +} + +#[cfg(test)] +mod tests { + use serde_json_bytes::json; + + use super::*; + use crate::selection; + + #[rstest::rstest] + #[case(json!(["a","b","c"]), ", ", json!("a, b, c"))] + #[case(json!([1, 2, 3]), "|", json!("1|2|3"))] + #[case(json!([1.00000000000001, 2.9999999999999, 0.3]), "|", json!("1.00000000000001|2.9999999999999|0.3"))] + #[case(json!([true, false]), " and ", json!("true and false"))] + #[case(json!([null, "a", null, 1, null]), ", ", json!("a, 1"))] + #[case(json!([null, null]), ", ", json!(""))] + #[case(json!(1), ", ", json!("1"))] + #[case(json!("a"), ", ", json!("a"))] + #[case(json!(true), ", ", json!("true"))] + #[case(json!(null), ", ", json!(""))] + fn join_not_null_should_combine_arrays_with_a_separator( + #[case] input: JSON, + #[case] separator: String, + #[case] expected: JSON, + ) { + assert_eq!( + selection!(&format!("$->joinNotNull('{}')", separator)).apply_to(&input), + (Some(expected), vec![]), + ); + } + + #[rstest::rstest] + #[case(json!({"a": 1}), vec!["Method ->joinNotNull requires an array of scalar values as input"])] + #[case(json!([{"a": 1}, {"a": 2}]), vec!["Method ->joinNotNull requires an array of scalar values as input"])] + #[case(json!([[1, 2]]), vec!["Method ->joinNotNull requires an array of scalar values as input"])] + fn join_not_null_warnings(#[case] input: JSON, #[case] expected_warnings: Vec<&str>) { + use itertools::Itertools; + + let (result, warnings) = selection!("$->joinNotNull(',')").apply_to(&input); + assert_eq!(result, None); + assert_eq!( + warnings.iter().map(|w| w.message()).collect_vec(), + expected_warnings + ); + } + + fn get_shape(args: Vec>, input: Shape) -> Shape { + join_not_null_method_shape( + &WithRange::new("joinNotNull".to_string(), Some(0..7)), + Some(&MethodArgs { args, range: None }), + input, + Shape::none(), + &IndexMap::default(), + &SourceId::new("test".to_string()), + ) + } + + #[test] + fn test_join_not_null_shape_no_args() { + let output_shape = get_shape(vec![], Shape::list(Shape::string([]), [])); + assert_eq!( + output_shape, + Shape::error( + "Method ->joinNotNull requires one argument".to_string(), + vec![] + ) + ); + } + + #[test] + fn test_join_not_null_shape_non_string_args() { + let output_shape = get_shape( + vec![WithRange::new(LitExpr::Bool(true), None)], + Shape::list(Shape::string([]), []), + ); + assert_eq!( + output_shape, + Shape::error( + "Method ->joinNotNull requires a string argument".to_string(), + vec![] + ) + ); + } + + #[test] + fn test_join_not_null_shape_two_args() { + let output_shape = get_shape( + vec![ + WithRange::new(LitExpr::String(",".to_string()), None), + WithRange::new(LitExpr::String(",".to_string()), None), + ], + Shape::list(Shape::string([]), []), + ); + assert_eq!( + output_shape, + Shape::error( + "Method ->joinNotNull requires only one argument, but 2 were provided".to_string(), + vec![] + ) + ); + } + + #[test] + fn test_join_not_null_shape_scalar_input() { + let output_shape = get_shape( + vec![WithRange::new(LitExpr::String(",".to_string()), None)], + Shape::string([]), + ); + assert_eq!( + output_shape, + Shape::string([SourceId::new("test".to_string()).location(0..7)]) + ); + } + + #[test] + fn test_join_not_null_shape_list_of_list_input() { + let output_shape = get_shape( + vec![WithRange::new(LitExpr::String(",".to_string()), None)], + Shape::list(Shape::list(Shape::string([]), []), []), + ); + assert_eq!( + output_shape, + Shape::error( + "Method ->joinNotNull requires an array of scalar values as input".to_string(), + vec![] + ) + ); + } + + #[test] + fn test_join_not_null_shape_unknown_input() { + let output_shape = get_shape( + vec![WithRange::new(LitExpr::String(",".to_string()), None)], + Shape::unknown([]), + ); + assert_eq!( + output_shape, + Shape::string([SourceId::new("test".to_string()).location(0..7)]) + ); + } + + #[test] + fn test_join_not_null_shape_named_input() { + let output_shape = get_shape( + vec![WithRange::new(LitExpr::String(",".to_string()), None)], + Shape::name("$root.bar", []), + ); + assert_eq!( + output_shape, + Shape::string([SourceId::new("test".to_string()).location(0..7)]) + ); + } +} diff --git a/apollo-federation/src/sources/connect/json_selection/methods/public/json_stringify.rs b/apollo-federation/src/sources/connect/json_selection/methods/public/json_stringify.rs index 8f4be7b66d..235de877dd 100644 --- a/apollo-federation/src/sources/connect/json_selection/methods/public/json_stringify.rs +++ b/apollo-federation/src/sources/connect/json_selection/methods/public/json_stringify.rs @@ -43,7 +43,7 @@ fn json_stringify_method( } match serde_json::to_string(data) { - Ok(val) => (Some(JSON::String(val.into())), vec![]), + Ok(val) => (Some(JSON::String(val.into())), Vec::new()), Err(err) => ( None, vec![ApplyToError::new( diff --git a/apollo-federation/src/sources/connect/json_selection/methods/public/last.rs b/apollo-federation/src/sources/connect/json_selection/methods/public/last.rs index 26ea199a96..bd9f88b163 100644 --- a/apollo-federation/src/sources/connect/json_selection/methods/public/last.rs +++ b/apollo-federation/src/sources/connect/json_selection/methods/public/last.rs @@ -41,14 +41,11 @@ fn last_method( } match data { - JSON::Array(array) => (array.last().cloned(), vec![]), - JSON::String(s) => { - if let Some(last) = s.as_str().chars().last() { - (Some(JSON::String(last.to_string().into())), vec![]) - } else { - (None, vec![]) - } - } + JSON::Array(array) => (array.last().cloned(), Vec::new()), + JSON::String(s) => s.as_str().chars().last().map_or_else( + || (None, Vec::new()), + |last| (Some(JSON::String(last.to_string().into())), Vec::new()), + ), _ => ( Some(data.clone()), vec![ApplyToError::new( @@ -83,14 +80,12 @@ fn last_shape( match input_shape.case() { ShapeCase::String(Some(value)) => { - if let Some(last_char) = value.chars().last() { + value.chars().last().map_or_else(Shape::none, |last_char| { Shape::string_value( last_char.to_string().as_str(), method_name.shape_location(source_id), ) - } else { - Shape::none() - } + }) } ShapeCase::String(None) => Shape::one( @@ -103,11 +98,7 @@ fn last_shape( ShapeCase::Array { prefix, tail } => { if tail.is_none() { - if let Some(last) = prefix.last() { - last.clone() - } else { - Shape::none() - } + prefix.last().cloned().unwrap_or_else(Shape::none) } else if let Some(last) = prefix.last() { Shape::one( [last.clone(), tail.clone(), Shape::none()], @@ -145,7 +136,7 @@ mod tests { fn last_should_get_last_element_from_array() { assert_eq!( selection!("$->last").apply_to(&json!([1, 2, 3])), - (Some(json!(3)), vec![]), + (Some(json!(3)), Vec::new()), ); } diff --git a/apollo-federation/src/sources/connect/json_selection/methods/public/map.rs b/apollo-federation/src/sources/connect/json_selection/methods/public/map.rs index 719bf990fb..63da689441 100644 --- a/apollo-federation/src/sources/connect/json_selection/methods/public/map.rs +++ b/apollo-federation/src/sources/connect/json_selection/methods/public/map.rs @@ -34,51 +34,50 @@ fn map_method( vars: &VarsWithPathsMap, input_path: &InputPath, ) -> (Option, Vec) { - if let Some(args) = method_args { - if let Some(first_arg) = args.args.first() { - if let JSON::Array(array) = data { - let mut output = Vec::with_capacity(array.len()); - let mut errors = Vec::new(); - - for (i, element) in array.iter().enumerate() { - let input_path = input_path.append(JSON::Number(i.into())); - let (applied_opt, arg_errors) = - first_arg.apply_to_path(element, vars, &input_path); - errors.extend(arg_errors); - output.insert(i, applied_opt.unwrap_or(JSON::Null)); - } - - (Some(JSON::Array(output)), errors) - } else { - // Return a singleton array wrapping the value of applying the - // ->map method the non-array input data. - first_arg - .apply_to_path(data, vars, input_path) - .and_then_collecting_errors(|value| { - (Some(JSON::Array(vec![value.clone()])), vec![]) - }) - } - } else { - ( - None, - vec![ApplyToError::new( - format!("Method ->{} requires one argument", method_name.as_ref()), - input_path.to_vec(), - method_name.range(), - )], - ) - } - } else { - ( + let Some(args) = method_args else { + return ( None, vec![ApplyToError::new( format!("Method ->{} requires one argument", method_name.as_ref()), input_path.to_vec(), method_name.range(), )], - ) + ); + }; + let Some(first_arg) = args.args.first() else { + return ( + None, + vec![ApplyToError::new( + format!("Method ->{} requires one argument", method_name.as_ref()), + input_path.to_vec(), + method_name.range(), + )], + ); + }; + + if let JSON::Array(array) = data { + let mut output = Vec::with_capacity(array.len()); + let mut errors = Vec::new(); + + for (i, element) in array.iter().enumerate() { + let input_path = input_path.append(JSON::Number(i.into())); + let (applied_opt, arg_errors) = first_arg.apply_to_path(element, vars, &input_path); + errors.extend(arg_errors); + output.insert(i, applied_opt.unwrap_or(JSON::Null)); + } + + (Some(JSON::Array(output)), errors) + } else { + // Return a singleton array wrapping the value of applying the + // ->map method the non-array input data. + first_arg + .apply_to_path(data, vars, input_path) + .and_then_collecting_errors(|value| { + (Some(JSON::Array(vec![value.clone()])), Vec::new()) + }) } } + #[allow(dead_code)] // method type-checking disabled until we add name resolution fn map_shape( method_name: &WithRange, @@ -109,7 +108,7 @@ fn map_shape( .collect::>(); let new_tail = first_arg.compute_output_shape( tail.clone(), - dollar_shape.clone(), + dollar_shape, named_var_shapes, source_id, ); @@ -118,7 +117,7 @@ fn map_shape( _ => Shape::list( first_arg.compute_output_shape( input_shape.any_item([]), - dollar_shape.clone(), + dollar_shape, named_var_shapes, source_id, ), diff --git a/apollo-federation/src/sources/connect/json_selection/methods/public/mod.rs b/apollo-federation/src/sources/connect/json_selection/methods/public/mod.rs index 0bcbb9a45a..0b0f6c0f0b 100644 --- a/apollo-federation/src/sources/connect/json_selection/methods/public/mod.rs +++ b/apollo-federation/src/sources/connect/json_selection/methods/public/mod.rs @@ -17,3 +17,5 @@ mod size; pub(crate) use size::SizeMethod; mod slice; pub(crate) use slice::SliceMethod; +mod join_not_null; +pub(crate) use join_not_null::JoinNotNullMethod; diff --git a/apollo-federation/src/sources/connect/json_selection/methods/public/size.rs b/apollo-federation/src/sources/connect/json_selection/methods/public/size.rs index 4ebd73d6e3..a33b9be3fd 100644 --- a/apollo-federation/src/sources/connect/json_selection/methods/public/size.rs +++ b/apollo-federation/src/sources/connect/json_selection/methods/public/size.rs @@ -44,17 +44,17 @@ fn size_method( match data { JSON::Array(array) => { let size = array.len() as i64; - (Some(JSON::Number(size.into())), vec![]) + (Some(JSON::Number(size.into())), Vec::new()) } JSON::String(s) => { let size = s.as_str().len() as i64; - (Some(JSON::Number(size.into())), vec![]) + (Some(JSON::Number(size.into())), Vec::new()) } // Though we can't ask for ->first or ->last or ->at(n) on an object, we // can safely return how many properties the object has for ->size. JSON::Object(map) => { let size = map.len() as i64; - (Some(JSON::Number(size.into())), vec![]) + (Some(JSON::Number(size.into())), Vec::new()) } _ => ( None, diff --git a/apollo-federation/src/sources/connect/json_selection/methods/public/slice.rs b/apollo-federation/src/sources/connect/json_selection/methods/public/slice.rs index e68953b12b..cfbe7ecb44 100644 --- a/apollo-federation/src/sources/connect/json_selection/methods/public/slice.rs +++ b/apollo-federation/src/sources/connect/json_selection/methods/public/slice.rs @@ -86,7 +86,7 @@ fn slice_method( .collect(), ) } else { - JSON::Array(vec![]) + JSON::Array(Vec::new()) } } @@ -106,7 +106,7 @@ fn slice_method( // TODO Should calling ->slice or ->slice() without arguments be an // error? In JavaScript, array->slice() copies the array, but that's not // so useful in an immutable value-typed language like JSONSelection. - (Some(data.clone()), vec![]) + (Some(data.clone()), Vec::new()) } } #[allow(dead_code)] // method type-checking disabled until we add name resolution diff --git a/apollo-federation/src/sources/connect/json_selection/parser.rs b/apollo-federation/src/sources/connect/json_selection/parser.rs index 623558b523..6e5155a959 100644 --- a/apollo-federation/src/sources/connect/json_selection/parser.rs +++ b/apollo-federation/src/sources/connect/json_selection/parser.rs @@ -1,7 +1,7 @@ use std::fmt::Display; use std::str::FromStr; -use apollo_compiler::collections::IndexSet; +use itertools::Itertools; use nom::IResult; use nom::Slice; use nom::branch::alt; @@ -81,7 +81,7 @@ pub(crate) trait ExternalVarPaths { // JSONSelection ::= PathSelection | NakedSubSelection // NakedSubSelection ::= NamedSelection* StarSelection? -#[derive(Debug, PartialEq, Clone)] +#[derive(Debug, PartialEq, Eq, Clone)] pub enum JSONSelection { // Although we reuse the SubSelection type for the JSONSelection::Named // case, we parse it as a sequence of NamedSelection items without the @@ -93,7 +93,8 @@ pub enum JSONSelection { // To keep JSONSelection::parse consumers from depending on details of the nom // error types, JSONSelection::parse reports this custom error type. Other // ::parse methods still internally report nom::error::Error for the most part. -#[derive(Debug, PartialEq, Eq, Clone)] +#[derive(thiserror::Error, Debug, PartialEq, Eq, Clone)] +#[error("{message}: {fragment}")] pub struct JSONSelectionParseError { // The message will be a meaningful error message in many cases, but may // fall back to a formatted nom::error::ErrorKind in some cases, e.g. when @@ -112,20 +113,9 @@ pub struct JSONSelectionParseError { pub offset: usize, } -impl std::error::Error for JSONSelectionParseError {} - -impl Display for JSONSelectionParseError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}: {}", self.message, self.fragment) - } -} - impl JSONSelection { pub fn empty() -> Self { - JSONSelection::Named(SubSelection { - selections: vec![], - ..Default::default() - }) + JSONSelection::Named(SubSelection::default()) } pub fn is_empty(&self) -> bool { @@ -156,20 +146,14 @@ impl JSONSelection { } Err(e) => match e { - nom::Err::Error(e) | nom::Err::Failure(e) => { - Err(JSONSelectionParseError { - message: if let Some(message_str) = e.input.extra { - message_str.to_string() - } else { - // These errors aren't the most user-friendly, but - // with any luck we can gradually replace them with - // custom error messages over time. - format!("nom::error::ErrorKind::{:?}", e.code) - }, - fragment: e.input.fragment().to_string(), - offset: e.input.location_offset(), - }) - } + nom::Err::Error(e) | nom::Err::Failure(e) => Err(JSONSelectionParseError { + message: e.input.extra.map_or_else( + || format!("nom::error::ErrorKind::{:?}", e.code), + |message_str| message_str.to_string(), + ), + fragment: e.input.fragment().to_string(), + offset: e.input.location_offset(), + }), nom::Err::Incomplete(_) => unreachable!("nom::Err::Incomplete not expected here"), }, @@ -384,27 +368,20 @@ impl NamedSelection { pub(crate) fn names(&self) -> Vec<&str> { match self { - Self::Field(alias, name, _) => { - if let Some(alias) = alias { - vec![alias.name.as_str()] - } else { - vec![name.as_str()] - } - } + Self::Field(alias, name, _) => alias + .as_ref() + .map(|alias| vec![alias.name.as_str()]) + .unwrap_or_else(|| vec![name.as_str()]), Self::Path { alias, path, .. } => { - #[allow(clippy::if_same_then_else)] if let Some(alias) = alias { vec![alias.name.as_str()] } else if let Some(sub) = path.next_subselection() { - // Flatten and deduplicate the names of the NamedSelection - // items in the SubSelection. - let mut name_set = IndexSet::default(); - for selection in sub.selections_iter() { - name_set.extend(selection.names()); - } - name_set.into_iter().collect() + sub.selections_iter() + .flat_map(|selection| selection.names()) + .unique() + .collect() } else { - vec![] + Vec::new() } } Self::Group(alias, _) => vec![alias.name.as_str()], @@ -445,7 +422,7 @@ impl ExternalVarPaths for NamedSelection { match self { Self::Field(_, _, Some(sub)) | Self::Group(_, sub) => sub.external_var_paths(), Self::Path { path, .. } => path.external_var_paths(), - _ => vec![], + _ => Vec::new(), } } } @@ -487,7 +464,7 @@ impl PathSelection { let location = parts .last() .map(|part| part.location.clone()) - .or(var.range()) + .or_else(|| var.range()) .map(|location| location.end) .and_then(|end| var.range().map(|location| location.start..end)) .unwrap_or_default(); @@ -535,7 +512,7 @@ impl PathSelection { impl ExternalVarPaths for PathSelection { fn external_var_paths(&self) -> Vec<&PathSelection> { - let mut paths = vec![]; + let mut paths = Vec::new(); match self.path.as_ref() { PathList::Var(var_name, tail) => { if matches!(var_name.as_ref(), KnownVariable::External(_)) { @@ -663,15 +640,14 @@ impl PathList { return if let Some(var) = opt_var { let full_name = format!("{}{}", dollar.as_ref(), var.as_str()); let known_var = KnownVariable::from_str(full_name.as_str()); - let var_range = merge_ranges(dollar_range.clone(), var.range()); + let var_range = merge_ranges(dollar_range, var.range()); let ranged_known_var = WithRange::new(known_var, var_range); Ok(( remainder, WithRange::new(Self::Var(ranged_known_var, rest), full_range), )) } else { - let ranged_dollar_var = - WithRange::new(KnownVariable::Dollar, dollar_range.clone()); + let ranged_dollar_var = WithRange::new(KnownVariable::Dollar, dollar_range); Ok(( remainder, WithRange::new(Self::Var(ranged_dollar_var, rest), full_range), @@ -817,7 +793,7 @@ impl PathList { parts.extend(rest.variable_path_parts()); parts } - _ => vec![], + _ => Vec::new(), } } @@ -864,7 +840,7 @@ impl PathList { impl ExternalVarPaths for PathList { fn external_var_paths(&self) -> Vec<&PathSelection> { - let mut paths = vec![]; + let mut paths = Vec::new(); match self { // PathSelection::external_var_paths is responsible for adding all // variable &PathSelection items to the set, since this @@ -953,7 +929,7 @@ impl SubSelection { pub fn selections_iter(&self) -> impl Iterator { // TODO Implement a NamedSelectionIterator to traverse nested selections // lazily, rather than using an intermediary vector. - let mut selections = vec![]; + let mut selections = Vec::new(); for selection in &self.selections { match selection { NamedSelection::Path { alias, path, .. } => { @@ -994,7 +970,7 @@ impl SubSelection { impl ExternalVarPaths for SubSelection { fn external_var_paths(&self) -> Vec<&PathSelection> { - let mut paths = vec![]; + let mut paths = Vec::new(); for selection in &self.selections { paths.extend(selection.external_var_paths()); } @@ -1158,7 +1134,7 @@ pub(crate) fn parse_string_literal(input: Span) -> ParseResult match input_char_indices.next() { Some((0, quote @ '\'')) | Some((0, quote @ '"')) => { let mut escape_next = false; - let mut chars: Vec = vec![]; + let mut chars: Vec = Vec::new(); let mut remainder_opt: Option = None; for (i, c) in input_char_indices { @@ -1181,17 +1157,17 @@ pub(crate) fn parse_string_literal(input: Span) -> ParseResult chars.push(c); } - if let Some(remainder) = remainder_opt { - Ok(( - remainder, - WithRange::new( - chars.iter().collect::(), - Some(start..remainder.location_offset()), - ), - )) - } else { - Err(nom_fail_message(input, "Unterminated string literal")) - } + remainder_opt + .ok_or_else(|| nom_fail_message(input, "Unterminated string literal")) + .map(|remainder| { + ( + remainder, + WithRange::new( + chars.iter().collect::(), + Some(start..remainder.location_offset()), + ), + ) + }) } _ => Err(nom_error_message(input, "Not a string literal")), @@ -1260,7 +1236,10 @@ mod tests { fn test_identifier() { fn check(input: &str, expected_name: &str) { let (remainder, name) = parse_identifier(new_span(input)).unwrap(); - assert!(span_is_all_spaces_or_comments(remainder)); + assert!( + span_is_all_spaces_or_comments(remainder), + "remainder is `{remainder}`" + ); assert_eq!(name.as_ref(), expected_name); } @@ -1297,7 +1276,10 @@ mod tests { fn test_string_literal() { fn check(input: &str, expected: &str) { let (remainder, lit) = parse_string_literal(new_span(input)).unwrap(); - assert!(span_is_all_spaces_or_comments(remainder)); + assert!( + span_is_all_spaces_or_comments(remainder), + "remainder is `{remainder}`" + ); assert_eq!(lit.as_ref(), expected); } check("'hello world'", "hello world"); @@ -1311,7 +1293,10 @@ mod tests { fn test_key() { fn check(input: &str, expected: &Key) { let (remainder, key) = Key::parse(new_span(input)).unwrap(); - assert!(span_is_all_spaces_or_comments(remainder)); + assert!( + span_is_all_spaces_or_comments(remainder), + "remainder is `{remainder}`" + ); assert_eq!(key.as_ref(), expected); } @@ -1326,7 +1311,10 @@ mod tests { fn test_alias() { fn check(input: &str, alias: &str) { let (remainder, parsed) = Alias::parse(new_span(input)).unwrap(); - assert!(span_is_all_spaces_or_comments(remainder)); + assert!( + span_is_all_spaces_or_comments(remainder), + "remainder is `{remainder}`" + ); assert_eq!(parsed.name(), alias); } @@ -1341,7 +1329,10 @@ mod tests { fn test_named_selection() { fn assert_result_and_names(input: &str, expected: NamedSelection, names: &[&str]) { let (remainder, selection) = NamedSelection::parse(new_span(input)).unwrap(); - assert!(span_is_all_spaces_or_comments(remainder)); + assert!( + span_is_all_spaces_or_comments(remainder), + "remainder is `{remainder}`" + ); let selection = selection.strip_ranges(); assert_eq!(selection, expected); assert_eq!(selection.names(), names); @@ -1667,7 +1658,10 @@ mod tests { #[track_caller] fn check_path_selection(input: &str, expected: PathSelection) { let (remainder, path_selection) = PathSelection::parse(new_span(input)).unwrap(); - assert!(span_is_all_spaces_or_comments(remainder)); + assert!( + span_is_all_spaces_or_comments(remainder), + "remainder is `{remainder}`" + ); assert_eq!(&path_selection.strip_ranges(), &expected); assert_eq!( selection!(input).strip_ranges(), @@ -1713,7 +1707,7 @@ mod tests { check_path_selection("$.hello. world", expected.clone()); check_path_selection("$.hello . world", expected.clone()); check_path_selection("$ . hello . world", expected.clone()); - check_path_selection(" $ . hello . world ", expected.clone()); + check_path_selection(" $ . hello . world ", expected); } { @@ -1728,7 +1722,7 @@ mod tests { check_path_selection("hello .world", expected.clone()); check_path_selection("hello. world", expected.clone()); check_path_selection("hello . world", expected.clone()); - check_path_selection(" hello . world ", expected.clone()); + check_path_selection(" hello . world ", expected); } { @@ -1751,7 +1745,7 @@ mod tests { check_path_selection("hello .world { hello }", expected.clone()); check_path_selection("hello. world { hello }", expected.clone()); check_path_selection("hello . world { hello }", expected.clone()); - check_path_selection(" hello . world { hello } ", expected.clone()); + check_path_selection(" hello . world { hello } ", expected); } { @@ -1786,7 +1780,7 @@ mod tests { ); check_path_selection( " nested . 'string literal' . \"property\" . name ", - expected.clone(), + expected, ); } @@ -1827,7 +1821,7 @@ mod tests { ); check_path_selection( " nested . \"string literal\" { leggo: 'my ego' } ", - expected.clone(), + expected, ); } @@ -1871,7 +1865,7 @@ mod tests { ); check_path_selection( " $ . results { 'quoted without alias' { id 'n a m e' } } ", - expected.clone(), + expected, ); } @@ -1915,7 +1909,7 @@ mod tests { ); check_path_selection( " $ . results { 'non-identifier alias' : 'quoted with alias' { id 'n a m e': name } } ", - expected.clone(), + expected, ); } } @@ -2493,7 +2487,7 @@ mod tests { check_path_selection("data->query($.a, $.b, $.c )", expected.clone()); check_path_selection("data->query($.a, $.b, $.c,)", expected.clone()); check_path_selection("data->query($.a, $.b, $.c ,)", expected.clone()); - check_path_selection("data->query($.a, $.b, $.c , )", expected.clone()); + check_path_selection("data->query($.a, $.b, $.c , )", expected); } { @@ -2535,7 +2529,7 @@ mod tests { check_path_selection("data.x->concat([data.y, data.z,])", expected.clone()); check_path_selection("data.x->concat([data.y, data.z , ])", expected.clone()); check_path_selection("data.x->concat([data.y, data.z,],)", expected.clone()); - check_path_selection("data.x->concat([data.y, data.z , ] , )", expected.clone()); + check_path_selection("data.x->concat([data.y, data.z , ] , )", expected); } check_path_selection( @@ -2719,7 +2713,10 @@ mod tests { fn test_subselection() { fn check_parsed(input: &str, expected: SubSelection) { let (remainder, parsed) = SubSelection::parse(new_span(input)).unwrap(); - assert!(span_is_all_spaces_or_comments(remainder)); + assert!( + span_is_all_spaces_or_comments(remainder), + "remainder is `{remainder}`" + ); assert_eq!(parsed.strip_ranges(), expected); } diff --git a/apollo-federation/src/sources/connect/json_selection/selection_set.rs b/apollo-federation/src/sources/connect/json_selection/selection_set.rs index 90c996d9dc..a94889c311 100644 --- a/apollo-federation/src/sources/connect/json_selection/selection_set.rs +++ b/apollo-federation/src/sources/connect/json_selection/selection_set.rs @@ -61,16 +61,18 @@ impl JSONSelection { selection_set: &SelectionSet, required_keys: Option<&FieldSet>, ) -> Self { - let selection_set = match required_keys { - Some(keys) => { - let mut new_set = selection_set.clone(); - for selection in keys.selection_set.selections.iter() { - new_set.push(selection.clone()); - } - new_set - } - None => selection_set.clone(), - }; + let selection_set = required_keys.map_or_else( + || selection_set.clone(), + |keys| { + keys.selection_set.selections.iter().cloned().fold( + selection_set.clone(), + |mut acc, selection| { + acc.push(selection); + acc + }, + ) + }, + ); match self { Self::Named(sub) => Self::Named(sub.apply_selection_set(document, &selection_set)), diff --git a/apollo-federation/src/sources/connect/mod.rs b/apollo-federation/src/sources/connect/mod.rs index 4e71f1bb7b..7c2eed2c8f 100644 --- a/apollo-federation/src/sources/connect/mod.rs +++ b/apollo-federation/src/sources/connect/mod.rs @@ -1,3 +1,11 @@ +#![deny(nonstandard_style)] +#![deny(clippy::redundant_clone)] +#![deny(clippy::manual_while_let_some)] +#![deny(clippy::needless_borrow)] +#![deny(clippy::manual_ok_or)] +#![deny(clippy::needless_collect)] +#![deny(clippy::or_fun_call)] + use std::fmt::Display; use std::hash::Hash; use std::hash::Hasher; diff --git a/apollo-federation/src/sources/connect/models.rs b/apollo-federation/src/sources/connect/models.rs index 51a752dbec..21a22dd0a3 100644 --- a/apollo-federation/src/sources/connect/models.rs +++ b/apollo-federation/src/sources/connect/models.rs @@ -25,7 +25,6 @@ use super::id::ConnectorPosition; use super::json_selection::ExternalVarPaths; use super::spec::schema::ConnectDirectiveArguments; use super::spec::schema::SourceDirectiveArguments; -use super::spec::versions::VersionInfo; use super::variable::Namespace; use super::variable::VariableReference; use crate::error::FederationError; @@ -81,7 +80,7 @@ pub type CustomConfiguration = Arc>; /// A connector can be used as a potential entity resolver for a type, with /// extra validation rules based on the transport args and field position within /// a schema. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum EntityResolver { /// The user defined a connector on a field that acts as an entity resolver Explicit, @@ -113,14 +112,11 @@ impl Connector { return Ok(Default::default()); }; - let version: VersionInfo = spec.into(); - let source_name = ConnectSpec::source_directive_name(&link); - let source_arguments = extract_source_directive_arguments(schema, &source_name, &version)?; + let source_arguments = extract_source_directive_arguments(schema, &source_name)?; let connect_name = ConnectSpec::connect_directive_name(&link); - let connect_arguments = - extract_connect_directive_arguments(schema, &connect_name, &version)?; + let connect_arguments = extract_connect_directive_arguments(schema, &connect_name)?; connect_arguments .into_iter() @@ -258,7 +254,7 @@ impl Connector { .id .source_name .clone() - .unwrap_or(self.id.synthetic_name()); + .unwrap_or_else(|| self.id.synthetic_name()); format!("{}.{}", self.id.subgraph_name, source_name) } } diff --git a/apollo-federation/src/sources/connect/models/http_json_transport.rs b/apollo-federation/src/sources/connect/models/http_json_transport.rs index e357ec54f2..2ef0d6c537 100644 --- a/apollo-federation/src/sources/connect/models/http_json_transport.rs +++ b/apollo-federation/src/sources/connect/models/http_json_transport.rs @@ -12,6 +12,7 @@ use apollo_compiler::parser::SourceSpan; use either::Either; use http::HeaderName; use http::Uri; +use http::header; use http::uri::InvalidUri; use http::uri::InvalidUriParts; use http::uri::Parts; @@ -34,7 +35,6 @@ use crate::sources::connect::spec::schema::HEADERS_ARGUMENT_NAME; use crate::sources::connect::spec::schema::HTTP_HEADER_MAPPING_FROM_ARGUMENT_NAME; use crate::sources::connect::spec::schema::HTTP_HEADER_MAPPING_NAME_ARGUMENT_NAME; use crate::sources::connect::spec::schema::HTTP_HEADER_MAPPING_VALUE_ARGUMENT_NAME; -use crate::sources::connect::spec::versions::AllowedHeaders; use crate::sources::connect::string_template; use crate::sources::connect::string_template::UriString; use crate::sources::connect::string_template::write_value; @@ -303,7 +303,7 @@ pub enum HTTPMethod { impl HTTPMethod { #[inline] - pub fn as_str(&self) -> &str { + pub const fn as_str(&self) -> &str { match self { HTTPMethod::Get => "GET", HTTPMethod::Post => "POST", @@ -362,28 +362,19 @@ impl<'a> Header<'a> { /// Get a list of headers from the `headers` argument in a `@connect` or `@source` directive. pub(crate) fn from_headers_arg( node: &'a Node, - allowed_headers: &AllowedHeaders, ) -> Vec>> { - if let Some(values) = node.as_list() { - values - .iter() - .map(|v| Self::from_single(v, allowed_headers)) - .collect() - } else if node.as_object().is_some() { - vec![Self::from_single(node, allowed_headers)] - } else { - vec![Err(HeaderParseError::Other { + match (node.as_list(), node.as_object()) { + (Some(values), _) => values.iter().map(Self::from_single).collect(), + (None, Some(_)) => vec![Self::from_single(node)], + _ => vec![Err(HeaderParseError::Other { message: format!("`{HEADERS_ARGUMENT_NAME}` must be an object or list of objects"), node, - })] + })], } } /// Build a single [`Self`] from a single entry in the `headers` arg. - fn from_single( - node: &'a Node, - allowed_headers: &AllowedHeaders, - ) -> Result> { + fn from_single(node: &'a Node) -> Result> { let mappings = node.as_object().ok_or_else(|| HeaderParseError::Other { message: "the HTTP header mapping is not an object".to_string(), node, @@ -409,7 +400,7 @@ impl<'a> Header<'a> { node: name_node, })?; - if allowed_headers.header_name_is_reserved(&name) { + if RESERVED_HEADERS.contains(&name) { return Err(HeaderParseError::Other { message: format!("header '{name}' is reserved and cannot be set by a connector"), node: name_node, @@ -424,7 +415,7 @@ impl<'a> Header<'a> { .find(|(name, _value)| *name == HTTP_HEADER_MAPPING_VALUE_ARGUMENT_NAME); match (from, value) { - (Some(_), None) if allowed_headers.header_name_allowed_static(&name) => { + (Some(_), None) if STATIC_HEADERS.contains(&name) => { Err(HeaderParseError::Other{ message: format!( "header '{name}' can't be set with `{HTTP_HEADER_MAPPING_FROM_ARGUMENT_NAME}`, only with `{HTTP_HEADER_MAPPING_VALUE_ARGUMENT_NAME}`" ), node: name_node}) @@ -509,6 +500,22 @@ impl Display for HeaderParseError<'_> { impl Error for HeaderParseError<'_> {} +const RESERVED_HEADERS: [HeaderName; 11] = [ + header::CONNECTION, + header::PROXY_AUTHENTICATE, + header::PROXY_AUTHORIZATION, + header::TE, + header::TRAILER, + header::TRANSFER_ENCODING, + header::UPGRADE, + header::CONTENT_LENGTH, + header::CONTENT_ENCODING, + header::ACCEPT_ENCODING, + HeaderName::from_static("keep-alive"), +]; + +const STATIC_HEADERS: [HeaderName; 3] = [header::CONTENT_TYPE, header::ACCEPT, header::HOST]; + #[cfg(test)] mod test_make_uri { use std::str::FromStr; diff --git a/apollo-federation/src/sources/connect/spec/directives.rs b/apollo-federation/src/sources/connect/spec/directives.rs index 0fe28ee908..1cb5e976ab 100644 --- a/apollo-federation/src/sources/connect/spec/directives.rs +++ b/apollo-federation/src/sources/connect/spec/directives.rs @@ -20,7 +20,6 @@ use super::schema::SOURCE_BASE_URL_ARGUMENT_NAME; use super::schema::SOURCE_NAME_ARGUMENT_NAME; use super::schema::SourceDirectiveArguments; use super::schema::SourceHTTPArguments; -use super::versions::VersionInfo; use crate::error::FederationError; use crate::schema::position::InterfaceFieldDefinitionPosition; use crate::schema::position::ObjectOrInterfaceFieldDefinitionPosition; @@ -41,21 +40,19 @@ macro_rules! internal { pub(crate) fn extract_source_directive_arguments( schema: &Schema, name: &Name, - version_info: &VersionInfo, ) -> Result, FederationError> { schema .schema_definition .directives .iter() .filter(|directive| directive.name == *name) - .map(|d| SourceDirectiveArguments::from_directive(d, version_info)) + .map(SourceDirectiveArguments::from_directive) .collect() } pub(crate) fn extract_connect_directive_arguments( schema: &Schema, name: &Name, - version_info: &VersionInfo, ) -> Result, FederationError> { // connect on fields schema @@ -100,11 +97,7 @@ pub(crate) fn extract_connect_directive_arguments( directive_name: directive.name.clone(), directive_index: i, }); - ConnectDirectiveArguments::from_position_and_directive( - position, - directive, - version_info, - ) + ConnectDirectiveArguments::from_position_and_directive(position, directive) }) }) }) @@ -127,9 +120,7 @@ pub(crate) fn extract_connect_directive_arguments( directive_index: i, }); ConnectDirectiveArguments::from_position_and_directive( - position, - directive, - version_info, + position, directive, ) }) }), @@ -141,10 +132,7 @@ pub(crate) fn extract_connect_directive_arguments( type ObjectNode = [(Name, Node)]; impl SourceDirectiveArguments { - fn from_directive( - value: &Component, - version_info: &VersionInfo, - ) -> Result { + fn from_directive(value: &Component) -> Result { let args = &value.arguments; // We'll have to iterate over the arg list and keep the properties by their name @@ -154,14 +142,14 @@ impl SourceDirectiveArguments { let arg_name = arg.name.as_str(); if arg_name == SOURCE_NAME_ARGUMENT_NAME.as_str() { - name = Some(arg.value.as_str().ok_or(internal!( - "`name` field in `@source` directive is not a string" - ))?); + name = Some(arg.value.as_str().ok_or_else(|| { + internal!("`name` field in `@source` directive is not a string") + })?); } else if arg_name == HTTP_ARGUMENT_NAME.as_str() { - let http_value = arg.value.as_object().ok_or(internal!( - "`http` field in `@source` directive is not an object" - ))?; - let http_value = SourceHTTPArguments::from_values(http_value, version_info)?; + let http_value = arg.value.as_object().ok_or_else(|| { + internal!("`http` field in `@source` directive is not an object") + })?; + let http_value = SourceHTTPArguments::from_values(http_value)?; http = Some(http_value); } else { @@ -173,18 +161,15 @@ impl SourceDirectiveArguments { Ok(Self { name: name - .ok_or(internal!("missing `name` field in `@source` directive"))? + .ok_or_else(|| internal!("missing `name` field in `@source` directive"))? .to_string(), - http: http.ok_or(internal!("missing `http` field in `@source` directive"))?, + http: http.ok_or_else(|| internal!("missing `http` field in `@source` directive"))?, }) } } impl SourceHTTPArguments { - fn from_values( - values: &ObjectNode, - version_info: &VersionInfo, - ) -> Result { + fn from_values(values: &ObjectNode) -> Result { let mut base_url = None; let mut headers = None; let mut path = None; @@ -193,7 +178,7 @@ impl SourceHTTPArguments { let name = name.as_str(); if name == SOURCE_BASE_URL_ARGUMENT_NAME.as_str() { - let base_url_value = value.as_str().ok_or(internal!( + let base_url_value = value.as_str().ok_or_else(|| internal!( "`baseURL` field in `@source` directive's `http.baseURL` field is not a string" ))?; @@ -204,20 +189,22 @@ impl SourceHTTPArguments { ); } else if name == HEADERS_ARGUMENT_NAME.as_str() { headers = Some( - Header::from_headers_arg(value, &version_info.allowed_headers) + Header::from_headers_arg(value) .into_iter() .map_ok(|Header { name, source, .. }| (name, source)) .try_collect() .map_err(|err| internal!(err.to_string()))?, ); } else if name == PATH_ARGUMENT_NAME.as_str() { - let value = value.as_str().ok_or(internal!(format!( - "`{}` field in `@source` directive's `http.path` field is not a string", - PATH_ARGUMENT_NAME - )))?; + let value = value.as_str().ok_or_else(|| { + internal!(format!( + "`{}` field in `@source` directive's `http.path` field is not a string", + PATH_ARGUMENT_NAME + )) + })?; path = Some(JSONSelection::parse(value).map_err(|e| internal!(e.message))?); } else if name == QUERY_PARAMS_ARGUMENT_NAME.as_str() { - let value = value.as_str().ok_or(internal!(format!( + let value = value.as_str().ok_or_else(|| internal!(format!( "`{}` field in `@source` directive's `http.queryParams` field is not a string", QUERY_PARAMS_ARGUMENT_NAME )))?; @@ -230,9 +217,9 @@ impl SourceHTTPArguments { } Ok(Self { - base_url: base_url.ok_or(internal!( - "missing `base_url` field in `@source` directive's `http` argument" - ))?, + base_url: base_url.ok_or_else(|| { + internal!("missing `base_url` field in `@source` directive's `http` argument") + })?, headers: headers.unwrap_or_default(), path, query_params: query, @@ -244,7 +231,6 @@ impl ConnectDirectiveArguments { fn from_position_and_directive( position: ConnectorPosition, value: &Node, - version_info: &VersionInfo, ) -> Result { let args = &value.arguments; @@ -258,33 +244,33 @@ impl ConnectDirectiveArguments { let arg_name = arg.name.as_str(); if arg_name == CONNECT_SOURCE_ARGUMENT_NAME.as_str() { - let source_value = arg.value.as_str().ok_or(internal!( - "`source` field in `@source` directive is not a string" - ))?; + let source_value = arg.value.as_str().ok_or_else(|| { + internal!("`source` field in `@source` directive is not a string") + })?; source = Some(source_value); } else if arg_name == HTTP_ARGUMENT_NAME.as_str() { - let http_value = arg.value.as_object().ok_or(internal!( - "`http` field in `@connect` directive is not an object" - ))?; + let http_value = arg.value.as_object().ok_or_else(|| { + internal!("`http` field in `@connect` directive is not an object") + })?; - http = Some(ConnectHTTPArguments::from_values(http_value, version_info)?); + http = Some(ConnectHTTPArguments::from_values(http_value)?); } else if arg_name == "batch" { - let http_value = arg.value.as_object().ok_or(internal!( - "`http` field in `@connect` directive is not an object" - ))?; + let http_value = arg.value.as_object().ok_or_else(|| { + internal!("`http` field in `@connect` directive is not an object") + })?; batch = Some(ConnectBatchArguments::from_values(http_value)?); } else if arg_name == CONNECT_SELECTION_ARGUMENT_NAME.as_str() { - let selection_value = arg.value.as_str().ok_or(internal!( - "`selection` field in `@connect` directive is not a string" - ))?; + let selection_value = arg.value.as_str().ok_or_else(|| { + internal!("`selection` field in `@connect` directive is not a string") + })?; selection = Some(JSONSelection::parse(selection_value).map_err(|e| internal!(e.message))?); } else if arg_name == CONNECT_ENTITY_ARGUMENT_NAME.as_str() { - let entity_value = arg.value.to_bool().ok_or(internal!( - "`entity` field in `@connect` directive is not a boolean" - ))?; + let entity_value = arg.value.to_bool().ok_or_else(|| { + internal!("`entity` field in `@connect` directive is not a boolean") + })?; entity = Some(entity_value); } else { @@ -298,7 +284,8 @@ impl ConnectDirectiveArguments { position, source: source.map(|s| s.to_string()), http, - selection: selection.ok_or(internal!("`@connect` directive is missing a selection"))?, + selection: selection + .ok_or_else(|| internal!("`@connect` directive is missing a selection"))?, entity: entity.unwrap_or_default(), batch, }) @@ -306,10 +293,7 @@ impl ConnectDirectiveArguments { } impl ConnectHTTPArguments { - fn from_values( - values: &ObjectNode, - version_info: &VersionInfo, - ) -> Result { + fn from_values(values: &ObjectNode) -> Result { let mut get = None; let mut post = None; let mut patch = None; @@ -323,49 +307,53 @@ impl ConnectHTTPArguments { let name = name.as_str(); if name == CONNECT_BODY_ARGUMENT_NAME.as_str() { - let body_value = value.as_str().ok_or(internal!( - "`body` field in `@connect` directive's `http` field is not a string" - ))?; + let body_value = value.as_str().ok_or_else(|| { + internal!("`body` field in `@connect` directive's `http` field is not a string") + })?; body = Some(JSONSelection::parse(body_value).map_err(|e| internal!(e.message))?); } else if name == HEADERS_ARGUMENT_NAME.as_str() { headers = Some( - Header::from_headers_arg(value, &version_info.allowed_headers) + Header::from_headers_arg(value) .into_iter() .map_ok(|Header { name, source, .. }| (name, source)) .try_collect() .map_err(|err| internal!(err.to_string()))?, ); } else if name == "GET" { - get = Some(value.as_str().ok_or(internal!( + get = Some(value.as_str().ok_or_else(|| internal!( "supplied HTTP template URL in `@connect` directive's `http` field is not a string" ))?.to_string()); } else if name == "POST" { - post = Some(value.as_str().ok_or(internal!( + post = Some(value.as_str().ok_or_else(|| internal!( "supplied HTTP template URL in `@connect` directive's `http` field is not a string" ))?.to_string()); } else if name == "PATCH" { - patch = Some(value.as_str().ok_or(internal!( + patch = Some(value.as_str().ok_or_else(|| internal!( "supplied HTTP template URL in `@connect` directive's `http` field is not a string" ))?.to_string()); } else if name == "PUT" { - put = Some(value.as_str().ok_or(internal!( + put = Some(value.as_str().ok_or_else(|| internal!( "supplied HTTP template URL in `@connect` directive's `http` field is not a string" ))?.to_string()); } else if name == "DELETE" { - delete = Some(value.as_str().ok_or(internal!( + delete = Some(value.as_str().ok_or_else(|| internal!( "supplied HTTP template URL in `@connect` directive's `http` field is not a string" ))?.to_string()); } else if name == PATH_ARGUMENT_NAME.as_str() { - let value = value.as_str().ok_or(internal!(format!( - "`{}` field in `@connect` directive's `http` field is not a string", - PATH_ARGUMENT_NAME - )))?; + let value = value.as_str().ok_or_else(|| { + internal!(format!( + "`{}` field in `@connect` directive's `http` field is not a string", + PATH_ARGUMENT_NAME + )) + })?; path = Some(JSONSelection::parse(value).map_err(|e| internal!(e.message))?); } else if name == QUERY_PARAMS_ARGUMENT_NAME.as_str() { - let value = value.as_str().ok_or(internal!(format!( - "`{}` field in `@connect` directive's `http` field is not a string", - QUERY_PARAMS_ARGUMENT_NAME - )))?; + let value = value.as_str().ok_or_else(|| { + internal!(format!( + "`{}` field in `@connect` directive's `http` field is not a string", + QUERY_PARAMS_ARGUMENT_NAME + )) + })?; query_params = Some(JSONSelection::parse(value).map_err(|e| internal!(e.message))?); } } @@ -391,7 +379,7 @@ impl ConnectBatchArguments { let name = name.as_str(); if name == "maxSize" { - let max_size_int = Some(value.to_i32().ok_or(internal!( + let max_size_int = Some(value.to_i32().ok_or_else(|| internal!( "supplied 'max_size' field in `@connect` directive's `batch` field is not a positive integer" ))?); // Convert the int to a usize since it is used for chunking an array later. @@ -413,7 +401,6 @@ mod tests { use crate::ValidFederationSubgraphs; use crate::schema::FederationSchema; - use crate::sources::connect::ConnectSpec; use crate::sources::connect::spec::schema::CONNECT_DIRECTIVE_NAME_IN_SPEC; use crate::sources::connect::spec::schema::SOURCE_DIRECTIVE_NAME_IN_SPEC; use crate::sources::connect::spec::schema::SourceDirectiveArguments; @@ -483,7 +470,7 @@ mod tests { insta::assert_snapshot!( actual_definition.to_string(), - @"directive @connect(source: String, http: connect__ConnectHTTP, batch: connect__ConnectBatch, selection: connect__JSONSelection!, entity: Boolean = false) repeatable on FIELD_DEFINITION" + @"directive @connect(source: String, http: connect__ConnectHTTP, batch: connect__ConnectBatch, selection: connect__JSONSelection!, entity: Boolean = false) repeatable on FIELD_DEFINITION | OBJECT" ); let fields = schema @@ -525,7 +512,7 @@ mod tests { .directives .iter() .filter(|directive| directive.name == SOURCE_DIRECTIVE_NAME_IN_SPEC) - .map(|d| SourceDirectiveArguments::from_directive(d, &ConnectSpec::V0_1.into())) + .map(SourceDirectiveArguments::from_directive) .collect(); insta::assert_debug_snapshot!( @@ -571,11 +558,7 @@ mod tests { let schema = &subgraph.schema; // Extract the connects from the schema definition and map them to their `Connect` equivalent - let connects = super::extract_connect_directive_arguments( - schema.schema(), - &name!(connect), - &ConnectSpec::V0_1.into(), - ); + let connects = super::extract_connect_directive_arguments(schema.schema(), &name!(connect)); insta::assert_debug_snapshot!( connects.unwrap(), diff --git a/apollo-federation/src/sources/connect/spec/mod.rs b/apollo-federation/src/sources/connect/spec/mod.rs index 11cbffdc38..fabc4cd497 100644 --- a/apollo-federation/src/sources/connect/spec/mod.rs +++ b/apollo-federation/src/sources/connect/spec/mod.rs @@ -17,7 +17,6 @@ mod directives; pub(crate) mod schema; mod type_and_directive_specifications; -pub(crate) mod versions; use std::fmt::Display; @@ -25,7 +24,6 @@ use apollo_compiler::Name; use apollo_compiler::Schema; use apollo_compiler::ast::Argument; use apollo_compiler::ast::Directive; -use apollo_compiler::ast::DirectiveLocation; use apollo_compiler::ast::Value; use apollo_compiler::name; pub(crate) use directives::extract_connect_directive_arguments; @@ -118,9 +116,7 @@ impl ConnectSpec { return Ok(()); }; - let spec = Self::try_from(&link.url.version)?; - - type_and_directive_specifications::check_or_add(&link, &spec, schema) + type_and_directive_specifications::check_or_add(&link, schema) } pub(crate) fn source_directive_name(link: &Link) -> Name { @@ -137,7 +133,7 @@ impl ConnectSpec { arguments: vec![ Argument { name: name!("graphs"), - value: Value::List(vec![]).into(), + value: Value::List(Vec::new()).into(), } .into(), Argument { @@ -157,21 +153,6 @@ impl ConnectSpec { ], } } - - pub(crate) fn connect_directive_locations(&self) -> &'static [DirectiveLocation] { - match self { - ConnectSpec::V0_1 => CONNECT_V0_1_LOCATIONS, - ConnectSpec::V0_2 => CONNECT_V0_2_LOCATIONS, - } - } - - pub(crate) const fn available() -> &'static [ConnectSpec] { - if cfg!(any(feature = "connect_v0.2", test)) { - &[ConnectSpec::V0_1, ConnectSpec::V0_2] - } else { - &[ConnectSpec::V0_1] - } - } } impl TryFrom<&Version> for ConnectSpec { @@ -179,7 +160,6 @@ impl TryFrom<&Version> for ConnectSpec { fn try_from(version: &Version) -> Result { match (version.major, version.minor) { (0, 1) => Ok(Self::V0_1), - #[cfg(any(feature = "connect_v0.2", test))] (0, 2) => Ok(Self::V0_2), _ => Err(SingleFederationError::UnknownLinkVersion { message: format!("Unknown connect version: {version}"), @@ -202,9 +182,3 @@ impl From for Version { } } } - -const CONNECT_V0_1_LOCATIONS: &[DirectiveLocation] = &[DirectiveLocation::FieldDefinition]; -const CONNECT_V0_2_LOCATIONS: &[DirectiveLocation] = &[ - DirectiveLocation::FieldDefinition, - DirectiveLocation::Object, -]; diff --git a/apollo-federation/src/sources/connect/spec/type_and_directive_specifications.rs b/apollo-federation/src/sources/connect/spec/type_and_directive_specifications.rs index b38e093362..24ada64c0b 100644 --- a/apollo-federation/src/sources/connect/spec/type_and_directive_specifications.rs +++ b/apollo-federation/src/sources/connect/spec/type_and_directive_specifications.rs @@ -44,7 +44,6 @@ use crate::sources::connect::spec::schema::SOURCE_BASE_URL_ARGUMENT_NAME; pub(super) fn check_or_add( link: &Link, - spec: &ConnectSpec, schema: &mut FederationSchema, ) -> Result<(), FederationError> { // the `get_type` closure expects a SingleFederationError, so we can't @@ -71,21 +70,21 @@ pub(super) fn check_or_add( let http_header_mapping_field_list = vec![ InputValueDefinition { description: None, - name: HTTP_HEADER_MAPPING_NAME_ARGUMENT_NAME.clone(), + name: HTTP_HEADER_MAPPING_NAME_ARGUMENT_NAME, ty: ty!(String!).into(), default_value: None, directives: Default::default(), }, InputValueDefinition { description: None, - name: HTTP_HEADER_MAPPING_FROM_ARGUMENT_NAME.clone(), + name: HTTP_HEADER_MAPPING_FROM_ARGUMENT_NAME, ty: ty!(String).into(), default_value: None, directives: Default::default(), }, InputValueDefinition { description: None, - name: HTTP_HEADER_MAPPING_VALUE_ARGUMENT_NAME.clone(), + name: HTTP_HEADER_MAPPING_VALUE_ARGUMENT_NAME, ty: ty!([String!]).into(), default_value: None, directives: Default::default(), @@ -153,14 +152,14 @@ pub(super) fn check_or_add( }, InputValueDefinition { description: None, - name: CONNECT_BODY_ARGUMENT_NAME.clone(), + name: CONNECT_BODY_ARGUMENT_NAME, ty: Type::Named(json_selection_spec.name.clone()).into(), default_value: None, directives: Default::default(), }, InputValueDefinition { description: None, - name: HEADERS_ARGUMENT_NAME.clone(), + name: HEADERS_ARGUMENT_NAME, ty: Type::List(Box::new(Type::NonNullNamed( http_header_mapping.name.clone(), ))) @@ -173,14 +172,14 @@ pub(super) fn check_or_add( connect_http_field_list.extend([ InputValueDefinition { description: None, - name: PATH_ARGUMENT_NAME.clone(), + name: PATH_ARGUMENT_NAME, ty: Type::Named(json_selection_spec.name.clone()).into(), default_value: None, directives: Default::default(), }, InputValueDefinition { description: None, - name: QUERY_PARAMS_ARGUMENT_NAME.clone(), + name: QUERY_PARAMS_ARGUMENT_NAME, ty: Type::Named(json_selection_spec.name.clone()).into(), default_value: None, directives: Default::default(), @@ -252,7 +251,7 @@ pub(super) fn check_or_add( &[ DirectiveArgumentSpecification { base_spec: ArgumentSpecification { - name: CONNECT_SOURCE_ARGUMENT_NAME.clone(), + name: CONNECT_SOURCE_ARGUMENT_NAME, get_type: |_, _| Ok(ty!(String)), default_value: None, }, @@ -260,7 +259,7 @@ pub(super) fn check_or_add( }, DirectiveArgumentSpecification { base_spec: ArgumentSpecification { - name: HTTP_ARGUMENT_NAME.clone(), + name: HTTP_ARGUMENT_NAME, get_type: |s, _| { let name = s .metadata() @@ -276,7 +275,7 @@ pub(super) fn check_or_add( }, DirectiveArgumentSpecification { base_spec: ArgumentSpecification { - name: BATCH_ARGUMENT_NAME.clone(), + name: BATCH_ARGUMENT_NAME, get_type: |s, _| { let name = s .metadata() @@ -292,7 +291,7 @@ pub(super) fn check_or_add( }, DirectiveArgumentSpecification { base_spec: ArgumentSpecification { - name: CONNECT_SELECTION_ARGUMENT_NAME.clone(), + name: CONNECT_SELECTION_ARGUMENT_NAME, get_type: |s, _| { let name = s .metadata() @@ -308,7 +307,7 @@ pub(super) fn check_or_add( }, DirectiveArgumentSpecification { base_spec: ArgumentSpecification { - name: CONNECT_ENTITY_ARGUMENT_NAME.clone(), + name: CONNECT_ENTITY_ARGUMENT_NAME, get_type: |_, _| Ok(Type::Named(name!(Boolean))), default_value: Some(Value::Boolean(false)), }, @@ -316,7 +315,10 @@ pub(super) fn check_or_add( }, ], true, - spec.connect_directive_locations(), + &[ + DirectiveLocation::FieldDefinition, + DirectiveLocation::Object, + ], false, None, None, @@ -327,14 +329,14 @@ pub(super) fn check_or_add( let mut source_http_field_list = vec![ InputValueDefinition { description: None, - name: SOURCE_BASE_URL_ARGUMENT_NAME.clone(), + name: SOURCE_BASE_URL_ARGUMENT_NAME, ty: ty!(String!).into(), default_value: None, directives: Default::default(), }, InputValueDefinition { description: None, - name: HEADERS_ARGUMENT_NAME.clone(), + name: HEADERS_ARGUMENT_NAME, ty: Type::List(Box::new(Type::NonNullNamed( http_header_mapping.name.clone(), ))) @@ -347,14 +349,14 @@ pub(super) fn check_or_add( source_http_field_list.extend([ InputValueDefinition { description: None, - name: PATH_ARGUMENT_NAME.clone(), + name: PATH_ARGUMENT_NAME, ty: Type::Named(json_selection_spec.name.clone()).into(), default_value: None, directives: Default::default(), }, InputValueDefinition { description: None, - name: QUERY_PARAMS_ARGUMENT_NAME.clone(), + name: QUERY_PARAMS_ARGUMENT_NAME, ty: Type::Named(json_selection_spec.name.clone()).into(), default_value: None, directives: Default::default(), @@ -392,7 +394,7 @@ pub(super) fn check_or_add( &[ DirectiveArgumentSpecification { base_spec: ArgumentSpecification { - name: SOURCE_NAME_ARGUMENT_NAME.clone(), + name: SOURCE_NAME_ARGUMENT_NAME, get_type: |_, _| Ok(ty!(String!)), default_value: None, }, @@ -400,7 +402,7 @@ pub(super) fn check_or_add( }, DirectiveArgumentSpecification { base_spec: ArgumentSpecification { - name: HTTP_ARGUMENT_NAME.clone(), + name: HTTP_ARGUMENT_NAME, get_type: |s, _| { let name = s .metadata() @@ -468,7 +470,7 @@ mod tests { .for_identity(&ConnectSpec::identity()) .unwrap(); - check_or_add(&link, &ConnectSpec::V0_1, &mut federation_schema).unwrap(); + check_or_add(&link, &mut federation_schema).unwrap(); assert_snapshot!(federation_schema.schema().serialize().to_string(), @r###" schema { @@ -479,7 +481,7 @@ mod tests { directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA - directive @connect(source: String, http: connect__ConnectHTTP, batch: connect__ConnectBatch, selection: connect__JSONSelection!, entity: Boolean = false) repeatable on FIELD_DEFINITION + directive @connect(source: String, http: connect__ConnectHTTP, batch: connect__ConnectBatch, selection: connect__JSONSelection!, entity: Boolean = false) repeatable on FIELD_DEFINITION | OBJECT directive @source(name: String!, http: connect__SourceHTTP) repeatable on SCHEMA @@ -548,7 +550,7 @@ mod tests { .for_identity(&ConnectSpec::identity()) .unwrap(); - check_or_add(&link, &ConnectSpec::V0_2, &mut federation_schema).unwrap(); + check_or_add(&link, &mut federation_schema).unwrap(); assert_snapshot!(federation_schema.schema().serialize().to_string(), @r###" schema { diff --git a/apollo-federation/src/sources/connect/spec/versions.rs b/apollo-federation/src/sources/connect/spec/versions.rs deleted file mode 100644 index 43179d447c..0000000000 --- a/apollo-federation/src/sources/connect/spec/versions.rs +++ /dev/null @@ -1,80 +0,0 @@ -use http::HeaderName; -use http::header; - -use super::ConnectSpec; - -/// Container for version-specific information -pub(crate) struct VersionInfo { - pub(crate) allowed_headers: AllowedHeaders, -} - -impl VersionInfo { - fn new(version: &ConnectSpec) -> Self { - Self { - allowed_headers: AllowedHeaders::new(version), - } - } -} - -impl From for VersionInfo { - fn from(version: ConnectSpec) -> Self { - Self::new(&version) - } -} - -/// Information about headers that differs between versions -pub(crate) struct AllowedHeaders { - reserved_headers: Vec, - static_headers: Vec, -} - -impl AllowedHeaders { - pub(crate) fn header_name_is_reserved(&self, header_name: &HeaderName) -> bool { - self.reserved_headers.contains(header_name) - } - - pub(crate) fn header_name_allowed_static(&self, header_name: &HeaderName) -> bool { - self.static_headers.contains(header_name) - } - - fn new(version: &ConnectSpec) -> Self { - match version { - ConnectSpec::V0_1 => Self { - reserved_headers: vec![ - header::CONNECTION, - header::PROXY_AUTHENTICATE, - header::PROXY_AUTHORIZATION, - header::TE, - header::TRAILER, - header::TRANSFER_ENCODING, - header::UPGRADE, - header::CONTENT_LENGTH, - header::CONTENT_ENCODING, - header::HOST, - header::ACCEPT_ENCODING, - KEEP_ALIVE.clone(), - ], - static_headers: vec![header::CONTENT_TYPE, header::ACCEPT], - }, - // moves Host to allow setting it via `value:` - ConnectSpec::V0_2 => Self { - reserved_headers: vec![ - header::CONNECTION, - header::PROXY_AUTHENTICATE, - header::PROXY_AUTHORIZATION, - header::TE, - header::TRAILER, - header::TRANSFER_ENCODING, - header::UPGRADE, - header::CONTENT_LENGTH, - header::CONTENT_ENCODING, - header::ACCEPT_ENCODING, - KEEP_ALIVE.clone(), - ], - static_headers: vec![header::CONTENT_TYPE, header::ACCEPT, header::HOST], - }, - } - } -} - -static KEEP_ALIVE: HeaderName = HeaderName::from_static("keep-alive"); diff --git a/apollo-federation/src/sources/connect/string_template.rs b/apollo-federation/src/sources/connect/string_template.rs index 69f9fa20fe..550a73d447 100644 --- a/apollo-federation/src/sources/connect/string_template.rs +++ b/apollo-federation/src/sources/connect/string_template.rs @@ -165,7 +165,7 @@ impl Display for StringTemplate { /// A general-purpose error type which includes both a description of the problem and the offset span /// within the original expression where the problem occurred. Used for both parsing and interpolation. -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Eq)] pub struct Error { /// A human-readable description of the issue. pub message: String, @@ -325,7 +325,7 @@ mod encoding { } impl UriString { - pub(crate) fn new() -> Self { + pub(crate) const fn new() -> Self { Self { value: String::new(), } @@ -638,7 +638,7 @@ mod test_interpolate_uri { let uri = template.interpolate(vars).expect("Failed to interpolate"); - assert_eq!(uri.to_string(), "/1/1.2/true//string") + assert_eq!(uri, "/1/1.2/true//string") } #[test] diff --git a/apollo-federation/src/sources/connect/validation/connect.rs b/apollo-federation/src/sources/connect/validation/connect.rs index 9f1c0a69d5..e6e15f73e6 100644 --- a/apollo-federation/src/sources/connect/validation/connect.rs +++ b/apollo-federation/src/sources/connect/validation/connect.rs @@ -16,7 +16,6 @@ use super::coordinates::ConnectDirectiveCoordinate; use super::coordinates::connect_directive_name_coordinate; use super::coordinates::source_name_value_coordinate; use super::source::SourceName; -use crate::sources::connect::ConnectSpec; use crate::sources::connect::Namespace; use crate::sources::connect::id::ConnectedElement; use crate::sources::connect::id::ObjectCategory; @@ -130,27 +129,8 @@ impl<'schema> Connect<'schema> { .map(|coordinate| Self::parse(coordinate, schema, source_names)) .partition_result(); - let mut messages: Vec = messages.into_iter().flatten().collect(); + let messages: Vec = messages.into_iter().flatten().collect(); - // TODO: find a better place for feature gates like this - if schema.connect_link.spec == ConnectSpec::V0_1 - && connects - .iter() - .any(|connect| matches!(connect.coordinate.element, ConnectedElement::Type { .. })) - { - messages.push(Message { - code: Code::FeatureUnavailable, - message: format!( - "Using `@{connect_directive_name}` on `type {object_name}` requires connectors v0.2. Learn more at https://go.apollo.dev/connectors/changelog.", - object_name = object.name, - connect_directive_name = schema.connect_directive_name(), - ), - locations: object - .line_column_range(&schema.sources) - .into_iter() - .collect(), - }); - } (connects, messages) } diff --git a/apollo-federation/src/sources/connect/validation/connect/entity.rs b/apollo-federation/src/sources/connect/validation/connect/entity.rs index 175681a27c..762ad0aba8 100644 --- a/apollo-federation/src/sources/connect/validation/connect/entity.rs +++ b/apollo-federation/src/sources/connect/validation/connect/entity.rs @@ -248,10 +248,7 @@ impl<'schema> ArgumentVisitor<'schema> { &mut self, field: &'schema Node, entity_type: &'schema Node, - ) -> Result< - Vec>, - as FieldVisitor>>::Error, - > { + ) -> Result>, >>::Error> { // At the root level, visit each argument to the entity field field.arguments.iter().filter_map(|arg| { if let Some(input_type) = self.schema.types.get(arg.ty.inner_named_type()) { @@ -291,10 +288,7 @@ impl<'schema> ArgumentVisitor<'schema> { &mut self, child_input_type: &'schema Node, entity_type: &'schema ExtendedType, - ) -> Result< - Vec>, - as FieldVisitor>>::Error, - > { + ) -> Result>, >>::Error> { // At the child level, visit each field on the input type let ExtendedType::Object(entity_object_type) = entity_type else { // Entity type was not an object type - this will be reported by field visitor diff --git a/apollo-federation/src/sources/connect/validation/connect/selection.rs b/apollo-federation/src/sources/connect/validation/connect/selection.rs index e3b70dc39b..25e55d00c9 100644 --- a/apollo-federation/src/sources/connect/validation/connect/selection.rs +++ b/apollo-federation/src/sources/connect/validation/connect/selection.rs @@ -224,7 +224,7 @@ struct SelectionValidator<'schema> { } impl<'schema> SelectionValidator<'schema> { - fn new( + const fn new( schema: &'schema SchemaInfo<'schema>, root: PathPart<'schema>, string: GraphQLString<'schema>, @@ -349,7 +349,7 @@ enum PathPart<'a> { } impl PathPart<'_> { - fn ty(&self) -> &Node { + const fn ty(&self) -> &Node { match self { PathPart::Root(ty) => ty, PathPart::Field { ty, .. } => ty, diff --git a/apollo-federation/src/sources/connect/validation/connect/selection/variables.rs b/apollo-federation/src/sources/connect/validation/connect/selection/variables.rs index e61d0756bb..3bbe58ac85 100644 --- a/apollo-federation/src/sources/connect/validation/connect/selection/variables.rs +++ b/apollo-federation/src/sources/connect/validation/connect/selection/variables.rs @@ -154,7 +154,7 @@ fn resolve_path( "`{variable_type}` does not have a field named `{nested_field_name}`." ), locations: expression.line_col_for_subslice( - path_component_range.start..path_component_range.end, + path_component_range, schema ).into_iter().collect(), }) @@ -178,7 +178,7 @@ pub(crate) struct ThisResolver<'a> { } impl<'a> ThisResolver<'a> { - pub(crate) fn new(object: &'a ObjectType, field: &'a Component) -> Self { + pub(crate) const fn new(object: &'a ObjectType, field: &'a Component) -> Self { Self { object, field } } } @@ -222,7 +222,7 @@ pub(crate) struct ArgsResolver<'a> { } impl<'a> ArgsResolver<'a> { - pub(crate) fn new(field: &'a Component) -> Self { + pub(crate) const fn new(field: &'a Component) -> Self { Self { field } } } diff --git a/apollo-federation/src/sources/connect/validation/graphql.rs b/apollo-federation/src/sources/connect/validation/graphql.rs index 0b2b87efd5..a30190333e 100644 --- a/apollo-federation/src/sources/connect/validation/graphql.rs +++ b/apollo-federation/src/sources/connect/validation/graphql.rs @@ -11,7 +11,6 @@ mod strings; pub(super) use strings::GraphQLString; -use crate::sources::connect::spec::versions::VersionInfo; use crate::sources::connect::validation::link::ConnectLink; pub(super) struct SchemaInfo<'schema> { @@ -19,7 +18,6 @@ pub(super) struct SchemaInfo<'schema> { len: usize, lookup: LineColLookup<'schema>, pub(crate) connect_link: ConnectLink<'schema>, - pub(crate) version_info: VersionInfo, /// A lookup map for the Shapes computed from GraphQL types. pub(crate) shape_lookup: IndexMap<&'schema str, Shape>, } @@ -30,13 +28,11 @@ impl<'schema> SchemaInfo<'schema> { src: &'schema str, connect_link: ConnectLink<'schema>, ) -> Self { - let version_info = connect_link.spec.into(); Self { schema, len: src.len(), lookup: LineColLookup::new(src), connect_link, - version_info, shape_lookup: shape::graphql::shapes_for_schema(schema), } } @@ -55,12 +51,12 @@ impl<'schema> SchemaInfo<'schema> { #[inline] pub(crate) fn source_directive_name(&self) -> &Name { - &self.connect_link.source_directive_name + self.connect_link.source_directive_name() } #[inline] pub(crate) fn connect_directive_name(&self) -> &Name { - &self.connect_link.connect_directive_name + self.connect_link.connect_directive_name() } } diff --git a/apollo-federation/src/sources/connect/validation/graphql/strings.rs b/apollo-federation/src/sources/connect/validation/graphql/strings.rs index 6d0ed700a1..07f80e93e0 100644 --- a/apollo-federation/src/sources/connect/validation/graphql/strings.rs +++ b/apollo-federation/src/sources/connect/validation/graphql/strings.rs @@ -18,7 +18,7 @@ use nom::AsChar; use crate::sources::connect::validation::graphql::SchemaInfo; -fn is_whitespace(c: char) -> bool { +const fn is_whitespace(c: char) -> bool { matches!(c, ' ' | '\t') } @@ -110,7 +110,7 @@ impl<'schema> GraphQLString<'schema> { }) } - pub(crate) fn as_str(&self) -> &str { + pub(crate) const fn as_str(&self) -> &str { match self { GraphQLString::Standard { data } => data.compiled_string, GraphQLString::Block { data, .. } => data.compiled_string, diff --git a/apollo-federation/src/sources/connect/validation/http/headers.rs b/apollo-federation/src/sources/connect/validation/http/headers.rs index 70253e5cbc..4a8ed6522f 100644 --- a/apollo-federation/src/sources/connect/validation/http/headers.rs +++ b/apollo-federation/src/sources/connect/validation/http/headers.rs @@ -39,7 +39,7 @@ impl<'schema> Headers<'schema> { #[allow(clippy::mutable_key_type)] let mut headers: IndexMap = IndexMap::new(); - for header in Header::from_headers_arg(headers_arg, &schema.version_info.allowed_headers) { + for header in Header::from_headers_arg(headers_arg) { let header = match header { Ok(header) => header, Err(err) => { diff --git a/apollo-federation/src/sources/connect/validation/link.rs b/apollo-federation/src/sources/connect/validation/link.rs index 1ecf593ae8..ef218c912c 100644 --- a/apollo-federation/src/sources/connect/validation/link.rs +++ b/apollo-federation/src/sources/connect/validation/link.rs @@ -7,6 +7,7 @@ use apollo_compiler::Schema; use apollo_compiler::schema::Component; use apollo_compiler::schema::Directive; use itertools::Itertools; +use strum::IntoEnumIterator; use crate::link::Link; use crate::sources::connect::ConnectSpec; @@ -16,10 +17,10 @@ use crate::sources::connect::validation::Message; /// The `@link` in a subgraph which enables connectors #[derive(Clone, Debug)] pub(super) struct ConnectLink<'schema> { - pub(crate) spec: ConnectSpec, - pub(crate) source_directive_name: Name, - pub(crate) connect_directive_name: Name, - pub(crate) directive: &'schema Component, + spec: ConnectSpec, + source_directive_name: Name, + connect_directive_name: Name, + directive: &'schema Component, link: Link, } @@ -35,21 +36,10 @@ impl<'schema> ConnectLink<'schema> { let spec = match ConnectSpec::try_from(&link.url.version) { Err(err) => { - let available_versions = ConnectSpec::available(); - let message = if available_versions.len() == 1 { - // TODO: No need to branch here once multiple spec versions are available - format!("{err}; should be {version}.", version = ConnectSpec::V0_1) - } else { - // This won't happen today, but it's prepping for 0.2 so we don't forget - format!( - "{err}; should be one of {available_versions}.", - available_versions = available_versions - .iter() - .copied() - .map(ConnectSpec::as_str) - .join(", "), - ) - }; + let message = format!( + "{err}; should be one of {available_versions}.", + available_versions = ConnectSpec::iter().map(ConnectSpec::as_str).join(", "), + ); return Some(Err(Message { code: Code::UnknownConnectorsVersion, message, @@ -71,10 +61,30 @@ impl<'schema> ConnectLink<'schema> { link, })) } + + pub(super) fn spec(&self) -> ConnectSpec { + self.spec + } + pub(super) fn set_spec(&mut self, spec: ConnectSpec) { + self.spec = spec; + self.link.url.version = spec.into(); + } + + pub(super) fn source_directive_name(&self) -> &Name { + &self.source_directive_name + } + + pub(super) fn connect_directive_name(&self) -> &Name { + &self.connect_directive_name + } + + pub(super) fn directive(&self) -> &Component { + self.directive + } } impl Display for ConnectLink<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "@link(url: \"{}\")", self.link.url) + write!(f, "{}", self.link) } } diff --git a/apollo-federation/src/sources/connect/validation/mod.rs b/apollo-federation/src/sources/connect/validation/mod.rs index ad90ccd7cb..88c1f2c105 100644 --- a/apollo-federation/src/sources/connect/validation/mod.rs +++ b/apollo-federation/src/sources/connect/validation/mod.rs @@ -38,6 +38,7 @@ use itertools::Itertools; use strum_macros::Display; use strum_macros::IntoStaticStr; +use crate::sources::connect::ConnectSpec; use crate::sources::connect::spec::schema::SOURCE_DIRECTIVE_NAME_IN_SPEC; use crate::sources::connect::validation::connect::fields_seen_by_all_connects; use crate::sources::connect::validation::graphql::GraphQLString; @@ -56,18 +57,21 @@ pub struct ValidationResult { /// The parsed (and potentially invalid) schema of the subgraph pub schema: Schema, + + /// The optionally transformed schema to be used in later steps. + pub transformed: String, } /// Validate the connectors-related directives `@source` and `@connect`. /// /// This function attempts to collect as many validation errors as possible, so it does not bail /// out as soon as it encounters one. -pub fn validate(source_text: &str, file_name: &str) -> ValidationResult { +pub fn validate(mut source_text: String, file_name: &str) -> ValidationResult { // TODO: Use parse_and_validate (adding in directives as needed) // TODO: Handle schema errors rather than relying on JavaScript to catch it later let schema = SchemaBuilder::new() .adopt_orphan_extensions() - .parse(source_text, file_name) + .parse(&source_text, file_name) .build() .unwrap_or_else(|schema_with_errors| schema_with_errors.partial); let link = match ConnectLink::new(&schema) { @@ -76,6 +80,7 @@ pub fn validate(source_text: &str, file_name: &str) -> ValidationResult { errors: Vec::new(), has_connectors: false, schema, + transformed: source_text, }; } Some(Err(err)) => { @@ -83,11 +88,12 @@ pub fn validate(source_text: &str, file_name: &str) -> ValidationResult { errors: vec![err], has_connectors: true, schema, + transformed: source_text, }; } Some(Ok(link)) => link, }; - let schema_info = SchemaInfo::new(&schema, source_text, link); + let schema_info = SchemaInfo::new(&schema, &source_text, link); let (source_directives, mut messages) = SourceDirective::find(&schema_info); let all_source_names = source_directives @@ -109,7 +115,7 @@ pub fn validate(source_text: &str, file_name: &str) -> ValidationResult { } } - if schema_info.connect_link.source_directive_name == DEFAULT_SOURCE_DIRECTIVE_NAME + if schema_info.connect_link.source_directive_name() == DEFAULT_SOURCE_DIRECTIVE_NAME && messages .iter() .any(|error| error.code == Code::NoSourcesDefined) @@ -117,16 +123,44 @@ pub fn validate(source_text: &str, file_name: &str) -> ValidationResult { messages.push(Message { code: Code::NoSourceImport, message: format!("The `@{SOURCE_DIRECTIVE_NAME_IN_SPEC}` directive is not imported. Try adding `@{SOURCE_DIRECTIVE_NAME_IN_SPEC}` to `import` for `{link}`", link=schema_info.connect_link), - locations: schema_info.connect_link.directive.line_column_range(&schema.sources) + locations: schema_info.connect_link.directive().line_column_range(&schema.sources) .into_iter() .collect(), }); } + // Auto-upgrade the schema as the _last_ step, so that error messages from earlier don't have + // incorrect line/col info. + if schema_info.connect_link.spec() == ConnectSpec::V0_1 { + if let Some(replace_range) = schema_info.connect_link.directive().location() { + let mut new_link = schema_info.connect_link.clone(); + new_link.set_spec(ConnectSpec::V0_2); + source_text.replace_range( + replace_range.offset()..replace_range.end_offset(), + &new_link.to_string(), + ); + } else { + messages.push(Message { + code: Code::UnknownConnectorsVersion, + message: "Failed to auto-upgrade 0.1 to 0.2, you must manually update the version in `@link`".to_string(), + locations: schema_info.connect_link.directive().line_column_range(&schema.sources) + .into_iter() + .collect(), + }); + return ValidationResult { + errors: messages, + has_connectors: true, + schema, + transformed: source_text, + }; + }; + } + ValidationResult { errors: messages, has_connectors: true, schema, + transformed: source_text, } } @@ -272,7 +306,7 @@ pub enum Code { } impl Code { - pub const fn severity(&self) -> Severity { + pub fn severity(&self) -> Severity { match self { Self::NoSourceImport | Self::NullabilityMismatch => Severity::Warning, _ => Severity::Error, @@ -295,6 +329,7 @@ mod test_validate_source { use insta::assert_snapshot; use insta::glob; + use pretty_assertions::assert_str_eq; use super::*; @@ -304,9 +339,19 @@ mod test_validate_source { glob!("test_data", "**/*.graphql", |path| { let schema = read_to_string(path).unwrap(); let start_time = std::time::Instant::now(); - let result = validate(&schema, path.to_str().unwrap()); + let result = validate(schema.clone(), path.to_str().unwrap()); let end_time = std::time::Instant::now(); assert_snapshot!(format!("{:#?}", result.errors)); + if path.parent().is_some_and(|parent| parent.ends_with("transformed")) { + assert_snapshot!(&diff::lines(&schema, &result.transformed).into_iter().filter_map(|res| match res { + diff::Result::Left(line) => Some(format!("- {line}")), + diff::Result::Right(line) => Some(format!("+ {line}")), + diff::Result::Both(_, _) => None, + }).join("\n")); + } else { + assert_str_eq!(schema, result.transformed, "Schema should not have been transformed by validations") + } + assert!(end_time - start_time < std::time::Duration::from_millis(100)); }); }); diff --git a/apollo-federation/src/sources/connect/validation/schema.rs b/apollo-federation/src/sources/connect/validation/schema.rs index d6148025c1..b351b29516 100644 --- a/apollo-federation/src/sources/connect/validation/schema.rs +++ b/apollo-federation/src/sources/connect/validation/schema.rs @@ -146,9 +146,9 @@ fn check_seen_fields( fields_seen_by_connectors: Vec<(Name, Name)>, ) -> impl Iterator { let federation = Link::for_identity(schema, &Identity::federation_identity()); - let external_directive_name = federation - .map(|(link, _)| link.directive_name_in_schema(&EXTERNAL_DIRECTIVE_NAME)) - .unwrap_or(EXTERNAL_DIRECTIVE_NAME.clone()); + let external_directive_name = federation.map_or(EXTERNAL_DIRECTIVE_NAME, |(link, _)| { + link.directive_name_in_schema(&EXTERNAL_DIRECTIVE_NAME) + }); let all_fields: IndexSet<_> = schema .types @@ -269,32 +269,30 @@ fn resolvable_key_fields<'a>( .unwrap_or(true) }) .filter_map(|directive| { - if let Some(fields_str) = directive + directive .arguments .iter() .find(|arg| arg.name == FEDERATION_FIELDS_ARGUMENT_NAME) .map(|arg| &arg.value) .and_then(|value| value.as_str()) - { - Parser::new() - .parse_field_set( - Valid::assume_valid_ref(schema), - object.name.clone(), - fields_str.to_string(), - "", - ) - .ok() - .map(|field_set| (field_set, directive)) - } else { - None - } + .and_then(|fields_str| { + Parser::new() + .parse_field_set( + Valid::assume_valid_ref(schema), + object.name.clone(), + fields_str.to_string(), + "", + ) + .ok() + .map(|field_set| (field_set, directive)) + }) }) } fn advanced_validations(schema: &SchemaInfo, subgraph_name: &str) -> Vec { let mut messages = Vec::new(); - let Ok(connectors) = Connector::from_schema(schema, subgraph_name, schema.connect_link.spec) + let Ok(connectors) = Connector::from_schema(schema, subgraph_name, schema.connect_link.spec()) else { return messages; }; diff --git a/apollo-federation/src/sources/connect/validation/schema/keys.rs b/apollo-federation/src/sources/connect/validation/schema/keys.rs index ea8bc7de2a..50d5c9ce80 100644 --- a/apollo-federation/src/sources/connect/validation/schema/keys.rs +++ b/apollo-federation/src/sources/connect/validation/schema/keys.rs @@ -56,13 +56,11 @@ impl<'schema> EntityKeyChecker<'schema> { for (key, directive, _) in &self.resolvable_keys { let for_type = self.entity_connectors.get(&key.selection_set.ty); - let key_exists = for_type - .map(|connectors| { - connectors - .iter() - .any(|connector| field_set_is_subset(key, connector)) - }) - .unwrap_or(false); + let key_exists = for_type.is_some_and(|connectors| { + connectors + .iter() + .any(|connector| field_set_is_subset(key, connector)) + }); if !key_exists { messages.push(Message { code: Code::MissingEntityConnector, @@ -130,7 +128,7 @@ pub(crate) fn field_set_error( variables.iter().join("`, `"), type_name ), - locations: vec![], + locations: Vec::new(), } } diff --git a/apollo-federation/src/sources/connect/validation/snapshots/validation_tests@feature-gates__v0_1_connect_on_object.graphql.snap b/apollo-federation/src/sources/connect/validation/snapshots/validation_tests@feature-gates__v0_1_connect_on_object.graphql.snap deleted file mode 100644 index f0d031aea6..0000000000 --- a/apollo-federation/src/sources/connect/validation/snapshots/validation_tests@feature-gates__v0_1_connect_on_object.graphql.snap +++ /dev/null @@ -1,14 +0,0 @@ ---- -source: apollo-federation/src/sources/connect/validation/mod.rs -expression: "format!(\"{:#?}\", result.errors)" -input_file: apollo-federation/src/sources/connect/validation/test_data/feature-gates/v0_1_connect_on_object.graphql ---- -[ - Message { - code: FeatureUnavailable, - message: "Using `@connect` on `type Product` requires connectors v0.2. Learn more at https://go.apollo.dev/connectors/changelog.", - locations: [ - 13:1..19:2, - ], - }, -] diff --git a/apollo-federation/src/sources/connect/validation/snapshots/validation_tests@headers__disallowed_header_names.graphql.snap b/apollo-federation/src/sources/connect/validation/snapshots/validation_tests@headers__disallowed_header_names.graphql.snap index a672444ed0..318e71c976 100644 --- a/apollo-federation/src/sources/connect/validation/snapshots/validation_tests@headers__disallowed_header_names.graphql.snap +++ b/apollo-federation/src/sources/connect/validation/snapshots/validation_tests@headers__disallowed_header_names.graphql.snap @@ -69,7 +69,7 @@ input_file: apollo-federation/src/sources/connect/validation/test_data/headers/d }, Message { code: InvalidHeader, - message: "In `@source(http.headers:)` header 'host' is reserved and cannot be set by a connector", + message: "In `@source(http.headers:)` header 'host' can't be set with `from`, only with `value`", locations: [ 20:17..20:23, ], @@ -165,13 +165,6 @@ input_file: apollo-federation/src/sources/connect/validation/test_data/headers/d 41:17..41:35, ], }, - Message { - code: InvalidHeader, - message: "In `@source(http.headers:)` header 'host' is reserved and cannot be set by a connector", - locations: [ - 42:17..42:23, - ], - }, Message { code: InvalidHeader, message: "In `@source(http.headers:)` header 'accept-encoding' is reserved and cannot be set by a connector", diff --git a/apollo-federation/src/sources/connect/validation/snapshots/validation_tests@missing_source_import.graphql.snap b/apollo-federation/src/sources/connect/validation/snapshots/validation_tests@missing_source_import.graphql.snap index cfaa27a744..6c886b500a 100644 --- a/apollo-federation/src/sources/connect/validation/snapshots/validation_tests@missing_source_import.graphql.snap +++ b/apollo-federation/src/sources/connect/validation/snapshots/validation_tests@missing_source_import.graphql.snap @@ -13,7 +13,7 @@ input_file: apollo-federation/src/sources/connect/validation/test_data/missing_s }, Message { code: NoSourceImport, - message: "The `@source` directive is not imported. Try adding `@source` to `import` for `@link(url: \"https://specs.apollo.dev/connect/v0.1\")`", + message: "The `@source` directive is not imported. Try adding `@source` to `import` for `@link(url: \"https://specs.apollo.dev/connect/v0.2\", import: [\"@connect\"])`", locations: [ 2:3..2:76, ], diff --git a/apollo-federation/src/sources/connect/validation/snapshots/validation_tests@transformed__upgrade_0.1.graphql-2.snap b/apollo-federation/src/sources/connect/validation/snapshots/validation_tests@transformed__upgrade_0.1.graphql-2.snap new file mode 100644 index 0000000000..4493b1be8a --- /dev/null +++ b/apollo-federation/src/sources/connect/validation/snapshots/validation_tests@transformed__upgrade_0.1.graphql-2.snap @@ -0,0 +1,7 @@ +--- +source: apollo-federation/src/sources/connect/validation/mod.rs +expression: "&diff::lines(&schema,\n&result.transformed).into_iter().filter_map(|res| match res\n{\n diff::Result::Left(line) => Some(format!(\"- {line}\")),\n diff::Result::Right(line) => Some(format!(\"+ {line}\")),\n diff::Result::Both(_, _) => None,\n}).join(\"\\n\")" +input_file: apollo-federation/src/sources/connect/validation/test_data/transformed/upgrade_0.1.graphql +--- +- @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) ++ @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) diff --git a/apollo-federation/src/sources/connect/validation/snapshots/validation_tests@feature-gates__v0_2_connect_on_object.graphql.snap b/apollo-federation/src/sources/connect/validation/snapshots/validation_tests@transformed__upgrade_0.1.graphql.snap similarity index 78% rename from apollo-federation/src/sources/connect/validation/snapshots/validation_tests@feature-gates__v0_2_connect_on_object.graphql.snap rename to apollo-federation/src/sources/connect/validation/snapshots/validation_tests@transformed__upgrade_0.1.graphql.snap index 4ac0b97d9f..44db42e2ab 100644 --- a/apollo-federation/src/sources/connect/validation/snapshots/validation_tests@feature-gates__v0_2_connect_on_object.graphql.snap +++ b/apollo-federation/src/sources/connect/validation/snapshots/validation_tests@transformed__upgrade_0.1.graphql.snap @@ -1,6 +1,6 @@ --- source: apollo-federation/src/sources/connect/validation/mod.rs expression: "format!(\"{:#?}\", result.errors)" -input_file: apollo-federation/src/sources/connect/validation/test_data/feature-gates/v0_2_connect_on_object.graphql +input_file: apollo-federation/src/sources/connect/validation/test_data/transformed/upgrade_0.1.graphql --- [] diff --git a/apollo-federation/src/sources/connect/validation/test_data/all_fields_selected.graphql b/apollo-federation/src/sources/connect/validation/test_data/all_fields_selected.graphql index 7cb8035eb4..9d86c42ba3 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/all_fields_selected.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/all_fields_selected.graphql @@ -3,7 +3,7 @@ extend schema url: "https://specs.apollo.dev/federation/v2.10" import: ["@key", "@external", "@requires"] ) - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { ts: [T] diff --git a/apollo-federation/src/sources/connect/validation/test_data/all_fields_selected_repro.graphql b/apollo-federation/src/sources/connect/validation/test_data/all_fields_selected_repro.graphql index 6f0242a615..84efd72b91 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/all_fields_selected_repro.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/all_fields_selected_repro.graphql @@ -4,7 +4,7 @@ extend schema import: ["@key", "@requires", "@override", "@external", "@shareable"] ) @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@source", "@connect"] ) diff --git a/apollo-federation/src/sources/connect/validation/test_data/body_selection.graphql b/apollo-federation/src/sources/connect/validation/test_data/body_selection.graphql index 617cbf3455..01d0261187 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/body_selection.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/body_selection.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect"] ) diff --git a/apollo-federation/src/sources/connect/validation/test_data/circular_reference.graphql b/apollo-federation/src/sources/connect/validation/test_data/circular_reference.graphql index fb1338f4e7..b22786416d 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/circular_reference.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/circular_reference.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect"] ) diff --git a/apollo-federation/src/sources/connect/validation/test_data/circular_reference_2.graphql b/apollo-federation/src/sources/connect/validation/test_data/circular_reference_2.graphql index 7abee48bac..c59d1e0ac9 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/circular_reference_2.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/circular_reference_2.graphql @@ -1,5 +1,5 @@ extend schema - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { track(id: ID!): Track diff --git a/apollo-federation/src/sources/connect/validation/test_data/circular_reference_3.graphql b/apollo-federation/src/sources/connect/validation/test_data/circular_reference_3.graphql index 47fe256290..82b2cac917 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/circular_reference_3.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/circular_reference_3.graphql @@ -1,5 +1,5 @@ extend schema - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { me: User @connect(http: { GET: "http://127.0.0.1/me" }, selection: "id name") diff --git a/apollo-federation/src/sources/connect/validation/test_data/connect_source_name_mismatch.graphql b/apollo-federation/src/sources/connect/validation/test_data/connect_source_name_mismatch.graphql index 797aa73101..ca9066d800 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/connect_source_name_mismatch.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/connect_source_name_mismatch.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) @source(name: "v2", http: { baseURL: "http://127.0.0.1" }) diff --git a/apollo-federation/src/sources/connect/validation/test_data/connect_source_undefined.graphql b/apollo-federation/src/sources/connect/validation/test_data/connect_source_undefined.graphql index 1957c26bb4..951b538752 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/connect_source_undefined.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/connect_source_undefined.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) diff --git a/apollo-federation/src/sources/connect/validation/test_data/denest_scalars.graphql b/apollo-federation/src/sources/connect/validation/test_data/denest_scalars.graphql index 0a734eb285..cbd736c5ad 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/denest_scalars.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/denest_scalars.graphql @@ -1,5 +1,5 @@ extend schema - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { me: User diff --git a/apollo-federation/src/sources/connect/validation/test_data/denest_scalars2.graphql b/apollo-federation/src/sources/connect/validation/test_data/denest_scalars2.graphql index b6df406747..f718095f83 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/denest_scalars2.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/denest_scalars2.graphql @@ -1,5 +1,5 @@ extend schema - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { me: User diff --git a/apollo-federation/src/sources/connect/validation/test_data/disallowed_abstract_types.graphql b/apollo-federation/src/sources/connect/validation/test_data/disallowed_abstract_types.graphql index 03203d888d..08a457e086 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/disallowed_abstract_types.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/disallowed_abstract_types.graphql @@ -1,5 +1,5 @@ extend schema - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { products: [Product] diff --git a/apollo-federation/src/sources/connect/validation/test_data/disallowed_federation_imports.graphql b/apollo-federation/src/sources/connect/validation/test_data/disallowed_federation_imports.graphql index a42bdb2b55..06b9ac430a 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/disallowed_federation_imports.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/disallowed_federation_imports.graphql @@ -12,7 +12,7 @@ extend schema ] ) @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) diff --git a/apollo-federation/src/sources/connect/validation/test_data/duplicate_source_name.graphql b/apollo-federation/src/sources/connect/validation/test_data/duplicate_source_name.graphql index 4d5a99f08e..73b21d170b 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/duplicate_source_name.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/duplicate_source_name.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) @source(name: "v1", http: { baseURL: "http://127.0.0.1" }) diff --git a/apollo-federation/src/sources/connect/validation/test_data/empty_selection.graphql b/apollo-federation/src/sources/connect/validation/test_data/empty_selection.graphql index ba956be496..85248b7850 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/empty_selection.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/empty_selection.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) @source(name: "v2", http: { baseURL: "http://127.0.0.1" }) diff --git a/apollo-federation/src/sources/connect/validation/test_data/empty_source_name.graphql b/apollo-federation/src/sources/connect/validation/test_data/empty_source_name.graphql index 2c6f9cb4e4..75dca45df3 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/empty_source_name.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/empty_source_name.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) @source(name: "", http: { baseURL: "http://127.0.0.1" }) diff --git a/apollo-federation/src/sources/connect/validation/test_data/feature-gates/v0_1_connect_on_object.graphql b/apollo-federation/src/sources/connect/validation/test_data/feature-gates/v0_1_connect_on_object.graphql deleted file mode 100644 index 93af39f150..0000000000 --- a/apollo-federation/src/sources/connect/validation/test_data/feature-gates/v0_1_connect_on_object.graphql +++ /dev/null @@ -1,19 +0,0 @@ -extend schema - @link(url: "https://specs.apollo.dev/federation/v2.10") - @link( - url: "https://specs.apollo.dev/connect/v0.1" - import: ["@source", "@connect"] - ) - -type Query { - products: [Product] - @connect(http: { GET: "http://localhost:4001/products" }, selection: "id") -} - -type Product - @connect( - http: { GET: "http://localhost:4001/products", body: "ids: $batch.id" } - selection: "id" - ) { - id: ID! -} diff --git a/apollo-federation/src/sources/connect/validation/test_data/feature-gates/v0_2_connect_on_object.graphql b/apollo-federation/src/sources/connect/validation/test_data/feature-gates/v0_2_connect_on_object.graphql deleted file mode 100644 index ea56e6c7cb..0000000000 --- a/apollo-federation/src/sources/connect/validation/test_data/feature-gates/v0_2_connect_on_object.graphql +++ /dev/null @@ -1,19 +0,0 @@ -extend schema - @link(url: "https://specs.apollo.dev/federation/v2.11") - @link( - url: "https://specs.apollo.dev/connect/v0.2" - import: ["@source", "@connect"] - ) - -type Query { - products: [Product] - @connect(http: { GET: "http://localhost:4001/products" }, selection: "id") -} - -type Product - @connect( - http: { GET: "http://localhost:4001/products", body: "ids: $batch.id" } - selection: "id" - ) { - id: ID! -} diff --git a/apollo-federation/src/sources/connect/validation/test_data/fields_with_arguments.graphql b/apollo-federation/src/sources/connect/validation/test_data/fields_with_arguments.graphql index 5fc799baa6..d7d3ea95d3 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/fields_with_arguments.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/fields_with_arguments.graphql @@ -1,6 +1,6 @@ extend schema @link(url: "https://specs.apollo.dev/federation/v2.10", import: ["@key"]) - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { ts(first: Int): [T] diff --git a/apollo-federation/src/sources/connect/validation/test_data/group_selection_on_scalar.graphql b/apollo-federation/src/sources/connect/validation/test_data/group_selection_on_scalar.graphql index 6d644cdff2..9a5f533c15 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/group_selection_on_scalar.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/group_selection_on_scalar.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect"] ) diff --git a/apollo-federation/src/sources/connect/validation/test_data/headers/disallowed_header_names.graphql b/apollo-federation/src/sources/connect/validation/test_data/headers/disallowed_header_names.graphql index 3fe5a3371a..a7e2d466a5 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/headers/disallowed_header_names.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/headers/disallowed_header_names.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) @source( diff --git a/apollo-federation/src/sources/connect/validation/test_data/headers/expressions_that_evaluate_to_invalid_types.graphql b/apollo-federation/src/sources/connect/validation/test_data/headers/expressions_that_evaluate_to_invalid_types.graphql index f0d6a60b02..47c5001ef2 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/headers/expressions_that_evaluate_to_invalid_types.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/headers/expressions_that_evaluate_to_invalid_types.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) @source( diff --git a/apollo-federation/src/sources/connect/validation/test_data/headers/invalid_connect_http_headers.graphql b/apollo-federation/src/sources/connect/validation/test_data/headers/invalid_connect_http_headers.graphql index d01ec27812..60a543104e 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/headers/invalid_connect_http_headers.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/headers/invalid_connect_http_headers.graphql @@ -1,5 +1,5 @@ extend schema - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { resources: [String!]! diff --git a/apollo-federation/src/sources/connect/validation/test_data/headers/invalid_namespace_in_header_variables.graphql b/apollo-federation/src/sources/connect/validation/test_data/headers/invalid_namespace_in_header_variables.graphql index 00c68d3361..c684e5ce4a 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/headers/invalid_namespace_in_header_variables.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/headers/invalid_namespace_in_header_variables.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) @source( diff --git a/apollo-federation/src/sources/connect/validation/test_data/headers/invalid_nested_paths_in_header_variables.graphql b/apollo-federation/src/sources/connect/validation/test_data/headers/invalid_nested_paths_in_header_variables.graphql index 36b56d38f8..772fa0fc9a 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/headers/invalid_nested_paths_in_header_variables.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/headers/invalid_nested_paths_in_header_variables.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) diff --git a/apollo-federation/src/sources/connect/validation/test_data/headers/invalid_source_http_headers.graphql b/apollo-federation/src/sources/connect/validation/test_data/headers/invalid_source_http_headers.graphql index 223dff2c7c..6185d5e738 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/headers/invalid_source_http_headers.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/headers/invalid_source_http_headers.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) @source( diff --git a/apollo-federation/src/sources/connect/validation/test_data/invalid_chars_in_source_name.graphql b/apollo-federation/src/sources/connect/validation/test_data/invalid_chars_in_source_name.graphql index 0688c8877b..377b76f71e 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/invalid_chars_in_source_name.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/invalid_chars_in_source_name.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) @source(name: "u$ers", http: { baseURL: "http://127.0.0.1" }) diff --git a/apollo-federation/src/sources/connect/validation/test_data/invalid_namespace_in_body_selection.graphql b/apollo-federation/src/sources/connect/validation/test_data/invalid_namespace_in_body_selection.graphql index 0d3ddb2437..170a160613 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/invalid_namespace_in_body_selection.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/invalid_namespace_in_body_selection.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect"] ) diff --git a/apollo-federation/src/sources/connect/validation/test_data/invalid_nested_paths_in_json_selection.graphql b/apollo-federation/src/sources/connect/validation/test_data/invalid_nested_paths_in_json_selection.graphql index 666f00de7a..1cc73c03d9 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/invalid_nested_paths_in_json_selection.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/invalid_nested_paths_in_json_selection.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) diff --git a/apollo-federation/src/sources/connect/validation/test_data/invalid_selection_syntax.graphql b/apollo-federation/src/sources/connect/validation/test_data/invalid_selection_syntax.graphql index 9ce31b3c16..e5ce18737c 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/invalid_selection_syntax.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/invalid_selection_syntax.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect"] ) diff --git a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/arg_is_object_but_field_is_not.graphql b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/arg_is_object_but_field_is_not.graphql index 0399ec5ef0..7914549296 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/arg_is_object_but_field_is_not.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/arg_is_object_but_field_is_not.graphql @@ -1,6 +1,6 @@ extend schema @link(url: "https://specs.apollo.dev/federation/v2.10", import: ["@key"]) - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { product(id: ProductInput!): Product diff --git a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/arg_type_doesnt_match_field_type.graphql b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/arg_type_doesnt_match_field_type.graphql index d9029ff1ab..cdd77f80a1 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/arg_type_doesnt_match_field_type.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/arg_type_doesnt_match_field_type.graphql @@ -1,6 +1,6 @@ extend schema @link(url: "https://specs.apollo.dev/federation/v2.10", import: ["@key"]) - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { product(id: String!): Product diff --git a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/composite_key_doesnt_match.graphql b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/composite_key_doesnt_match.graphql index 0f4e2f7cde..a5c4c5507c 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/composite_key_doesnt_match.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/composite_key_doesnt_match.graphql @@ -1,6 +1,6 @@ extend schema @link(url: "https://specs.apollo.dev/federation/v2.10", import: ["@key"]) - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { "The auto-key `store` field doesn't match the explicit composite key, so that key is unresolved" diff --git a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_arg_field_arg_name_mismatch.graphql b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_arg_field_arg_name_mismatch.graphql index 951b28331b..5513dc5889 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_arg_field_arg_name_mismatch.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_arg_field_arg_name_mismatch.graphql @@ -1,6 +1,6 @@ extend schema @link(url: "https://specs.apollo.dev/federation/v2.10", import: ["@key"]) - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { product(id: ID!): Product diff --git a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_arg_field_arg_name_mismatch_composite_key.graphql b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_arg_field_arg_name_mismatch_composite_key.graphql index 507cd65180..aba541b6bc 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_arg_field_arg_name_mismatch_composite_key.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_arg_field_arg_name_mismatch_composite_key.graphql @@ -1,6 +1,6 @@ extend schema @link(url: "https://specs.apollo.dev/federation/v2.10", import: ["@key"]) - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { product(id: ID!, store: StoreInput!): Product diff --git a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_true_on_list_type.graphql b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_true_on_list_type.graphql index 5e19542792..37ccdda9de 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_true_on_list_type.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_true_on_list_type.graphql @@ -1,5 +1,5 @@ extend schema - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { users(id: ID!): [User] diff --git a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_true_on_non_root_field.graphql b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_true_on_non_root_field.graphql index 92b1fe2b11..c084b38864 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_true_on_non_root_field.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_true_on_non_root_field.graphql @@ -1,5 +1,5 @@ extend schema - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { user(id: ID!): User diff --git a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_true_returning_non_null_type.graphql b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_true_returning_non_null_type.graphql index e17256ab4c..546dcd1fbd 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_true_returning_non_null_type.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_true_returning_non_null_type.graphql @@ -1,5 +1,5 @@ extend schema - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { user(id: ID!): User! diff --git a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_true_returning_scalar.graphql b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_true_returning_scalar.graphql index 66ff40b7ff..e50d668836 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_true_returning_scalar.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/entity_true_returning_scalar.graphql @@ -1,5 +1,5 @@ extend schema - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { name: String diff --git a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/multiple_keys_not_all_resolved.graphql b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/multiple_keys_not_all_resolved.graphql index 20725c014c..215e19580e 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/multiple_keys_not_all_resolved.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/multiple_keys_not_all_resolved.graphql @@ -1,6 +1,6 @@ extend schema @link(url: "https://specs.apollo.dev/federation/v2.10", import: ["@key"]) - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { "The auto-key here matches the first `@key`, but the second `@key` is still unresolved" diff --git a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/no_args_for_entity_true.graphql b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/no_args_for_entity_true.graphql index b0dc483bcc..ad24dc55b3 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/no_args_for_entity_true.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/no_args_for_entity_true.graphql @@ -1,6 +1,6 @@ extend schema @link(url: "https://specs.apollo.dev/federation/v2.10", import: ["@key"]) - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { product: Product diff --git a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/unrelated_keys.graphql b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/unrelated_keys.graphql index e4781dafa6..cc383e1333 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/unrelated_keys.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/invalid/unrelated_keys.graphql @@ -1,6 +1,6 @@ extend schema @link(url: "https://specs.apollo.dev/federation/v2.10", import: ["@key"]) - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { products: [Product] diff --git a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/basic_implicit_key.graphql b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/basic_implicit_key.graphql index e529d1cc20..3898615620 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/basic_implicit_key.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/basic_implicit_key.graphql @@ -1,5 +1,5 @@ extend schema - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { user(id: ID!): User diff --git a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/entity_connector_matches_non_resolvable_key.graphql b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/entity_connector_matches_non_resolvable_key.graphql index 40d5f5b19f..4616faa44f 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/entity_connector_matches_non_resolvable_key.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/entity_connector_matches_non_resolvable_key.graphql @@ -1,6 +1,6 @@ extend schema @link(url: "https://specs.apollo.dev/federation/v2.8", import: ["@key"]) - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { product(id: ID!): Product diff --git a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/entity_connector_matches_one_of_multiple_keys.graphql b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/entity_connector_matches_one_of_multiple_keys.graphql index a7f81db808..f68a299b18 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/entity_connector_matches_one_of_multiple_keys.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/entity_connector_matches_one_of_multiple_keys.graphql @@ -1,6 +1,6 @@ extend schema @link(url: "https://specs.apollo.dev/federation/v2.8", import: ["@key"]) - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { product(id: ID!, store: StoreInput!): Product diff --git a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/entity_field_counts_as_key_resolver.graphql b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/entity_field_counts_as_key_resolver.graphql index d3d846ef37..9926bbc79b 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/entity_field_counts_as_key_resolver.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/entity_field_counts_as_key_resolver.graphql @@ -1,7 +1,7 @@ extend schema @link(url: "https://specs.apollo.dev/federation/v2.8", import: ["@key"]) @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) @source(name: "v2", http: { baseURL: "http://localhost" }) diff --git a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/mix_explicit_and_implicit.graphql b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/mix_explicit_and_implicit.graphql index 960ecc55f2..33d34312d1 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/mix_explicit_and_implicit.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/mix_explicit_and_implicit.graphql @@ -1,6 +1,6 @@ extend schema @link(url: "https://specs.apollo.dev/federation/v2.8", import: ["@key"]) - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { implicit(id: ID!): Product diff --git a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/multiple_entity_connectors_for_multiple_keys.graphql b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/multiple_entity_connectors_for_multiple_keys.graphql index 2945addbfc..08bc32503d 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/multiple_entity_connectors_for_multiple_keys.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/keys_and_entities/valid/multiple_entity_connectors_for_multiple_keys.graphql @@ -1,7 +1,7 @@ extend schema @link(url: "https://specs.apollo.dev/federation/v2.8", import: ["@key"]) @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) @source(name: "v1", http: { baseURL: "http://localhost" }) diff --git a/apollo-federation/src/sources/connect/validation/test_data/missing_connect_on_mutation_field.graphql b/apollo-federation/src/sources/connect/validation/test_data/missing_connect_on_mutation_field.graphql index 6a0bd3e603..69292055b6 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/missing_connect_on_mutation_field.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/missing_connect_on_mutation_field.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) @source(name: "v2", http: { baseURL: "http://127.0.0.1" }) diff --git a/apollo-federation/src/sources/connect/validation/test_data/missing_connect_on_query_field.graphql b/apollo-federation/src/sources/connect/validation/test_data/missing_connect_on_query_field.graphql index edd21d5267..19ba436e28 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/missing_connect_on_query_field.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/missing_connect_on_query_field.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) @source(name: "v2", http: {baseURL: "http://127.0.0.1"}) diff --git a/apollo-federation/src/sources/connect/validation/test_data/missing_http_method_on_connect.graphql b/apollo-federation/src/sources/connect/validation/test_data/missing_http_method_on_connect.graphql index c3900be6b6..53f920215d 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/missing_http_method_on_connect.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/missing_http_method_on_connect.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@source", "@connect"] ) @source(name: "v2", http: { baseURL: "http://127.0.0.1" }) diff --git a/apollo-federation/src/sources/connect/validation/test_data/missing_source_import.graphql b/apollo-federation/src/sources/connect/validation/test_data/missing_source_import.graphql index 4895abc631..d4b3a57345 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/missing_source_import.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/missing_source_import.graphql @@ -1,5 +1,5 @@ extend schema - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) @source(name: "v2", http: { baseURL: "http://127.0.0.1" }) type Query { diff --git a/apollo-federation/src/sources/connect/validation/test_data/multiple_errors.graphql b/apollo-federation/src/sources/connect/validation/test_data/multiple_errors.graphql index 57e50694f1..8abd5ceefa 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/multiple_errors.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/multiple_errors.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) @source(name: "u$ers", http: { baseURL: "ftp://127.0.0.1" }) diff --git a/apollo-federation/src/sources/connect/validation/test_data/multiple_http_methods_on_connect.graphql b/apollo-federation/src/sources/connect/validation/test_data/multiple_http_methods_on_connect.graphql index 4030fb4ee1..5429f57166 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/multiple_http_methods_on_connect.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/multiple_http_methods_on_connect.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@source", "@connect"] ) @source(name: "v2", http: { baseURL: "http://127.0.0.1" }) diff --git a/apollo-federation/src/sources/connect/validation/test_data/non_root_circular_reference.graphql b/apollo-federation/src/sources/connect/validation/test_data/non_root_circular_reference.graphql index c9904b6fe3..5776e1a5e2 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/non_root_circular_reference.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/non_root_circular_reference.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) @source(name: "v2", http: { baseURL: "http://127.0.0.1" }) diff --git a/apollo-federation/src/sources/connect/validation/test_data/renamed_connect_directive.graphql b/apollo-federation/src/sources/connect/validation/test_data/renamed_connect_directive.graphql index da1a44065b..056764b406 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/renamed_connect_directive.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/renamed_connect_directive.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: [{name: "@connect", as: "@data"}, "@source"] ) @source(name: "v2", http: {baseURL: "http://127.0.0.1"}) diff --git a/apollo-federation/src/sources/connect/validation/test_data/request_headers.graphql b/apollo-federation/src/sources/connect/validation/test_data/request_headers.graphql index 9f645294d1..38525c9c2f 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/request_headers.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/request_headers.graphql @@ -1,5 +1,5 @@ extend schema - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect", "@source"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect", "@source"]) @source( name: "invalid_api" http: { baseURL: "http://127.0.0.1", headers: [{ name: "x-my-header", value: "{$request.headers.someheader}" }] } diff --git a/apollo-federation/src/sources/connect/validation/test_data/select_nonexistant_group.graphql b/apollo-federation/src/sources/connect/validation/test_data/select_nonexistant_group.graphql index 092adae6ba..ab194b86df 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/select_nonexistant_group.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/select_nonexistant_group.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect"] ) diff --git a/apollo-federation/src/sources/connect/validation/test_data/source_directive_rename.graphql b/apollo-federation/src/sources/connect/validation/test_data/source_directive_rename.graphql index b6d1261885..af6a332418 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/source_directive_rename.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/source_directive_rename.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", { name: "@source", as: "@api" }] ) @api(name: "users", http: { baseURL: "blahblahblah" }) diff --git a/apollo-federation/src/sources/connect/validation/test_data/subscriptions_with_connectors.graphql b/apollo-federation/src/sources/connect/validation/test_data/subscriptions_with_connectors.graphql index 9506ad1fc9..73d481bf6a 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/subscriptions_with_connectors.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/subscriptions_with_connectors.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) @source(name: "v2", http: { baseURL: "http://127.0.0.1" }) diff --git a/apollo-federation/src/sources/connect/validation/test_data/transformed/upgrade_0.1.graphql b/apollo-federation/src/sources/connect/validation/test_data/transformed/upgrade_0.1.graphql new file mode 100644 index 0000000000..6502bd82f0 --- /dev/null +++ b/apollo-federation/src/sources/connect/validation/test_data/transformed/upgrade_0.1.graphql @@ -0,0 +1,14 @@ +extend schema + @link( + url: "https://specs.apollo.dev/federation/v2.10" + import: ["@key", "@external", "@requires"] + ) + @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + +type Query { + something: String + @connect( + http: { GET: "http://localhost" } + selection: "$" + ) +} diff --git a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/absolute_connect_url_with_source.graphql b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/absolute_connect_url_with_source.graphql index e76fc5d5e1..563de77a2d 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/absolute_connect_url_with_source.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/absolute_connect_url_with_source.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) @source(name: "v2", http: { baseURL: "http://127.0.0.1" }) diff --git a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/expressions-in-domain.graphql b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/expressions-in-domain.graphql index ced9ba328e..c62047068a 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/expressions-in-domain.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/expressions-in-domain.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) diff --git a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid-jsonselection-in-expression.graphql b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid-jsonselection-in-expression.graphql index cbc47441ff..82facb2bf0 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid-jsonselection-in-expression.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid-jsonselection-in-expression.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) @source(name: "v2", http: { baseURL: "http://127.0.0.1" }) diff --git a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid-path-parameter.graphql b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid-path-parameter.graphql index 8dfb31ce68..79c5eced7a 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid-path-parameter.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid-path-parameter.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) @source(name: "v2", http: { baseURL: "http://127.0.0.1" }) diff --git a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_connect_url.graphql b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_connect_url.graphql index dc3c5cddd9..75e1408f5a 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_connect_url.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_connect_url.graphql @@ -1,5 +1,5 @@ extend schema - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { resources: [String!]! @connect(http: { GET: "127.0.0.1" }, selection: "$") diff --git a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_connect_url_scheme.graphql b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_connect_url_scheme.graphql index f29d5d2a10..8c949d51d8 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_connect_url_scheme.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_connect_url_scheme.graphql @@ -1,5 +1,5 @@ extend schema - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { resources: [String!]! diff --git a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_namespace_in_url_template_variables.graphql b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_namespace_in_url_template_variables.graphql index 5af8460751..ab2d7c2132 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_namespace_in_url_template_variables.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_namespace_in_url_template_variables.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect"] ) diff --git a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_nested_paths_in_url_template_variables.graphql b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_nested_paths_in_url_template_variables.graphql index 70f771ce35..53803942cf 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_nested_paths_in_url_template_variables.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_nested_paths_in_url_template_variables.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) diff --git a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_source_url.graphql b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_source_url.graphql index 89eb02ad98..ac6a9961f8 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_source_url.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_source_url.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) @source(name: "v1", http: { baseURL: "127.0.0.1" }) diff --git a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_source_url_scheme.graphql b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_source_url_scheme.graphql index 7c63d06a51..33fd6e49da 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_source_url_scheme.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_source_url_scheme.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) @source(name: "v1", http: { baseURL: "file://data.json" }) diff --git a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_types.graphql b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_types.graphql index fb5a014fda..2db8affdb9 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_types.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/invalid_types.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect"] ) diff --git a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/relative_connect_url_without_source.graphql b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/relative_connect_url_without_source.graphql index c21b025006..9f161f7311 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/relative_connect_url_without_source.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/relative_connect_url_without_source.graphql @@ -1,5 +1,5 @@ extend schema - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { resources: [String!]! @connect(http: { GET: "/resources" }, selection: "$") diff --git a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/this_on_root_types.graphql b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/this_on_root_types.graphql index 7fe9c2cb3e..59168daf20 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/this_on_root_types.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/this_on_root_types.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) diff --git a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/undefined_arg_in_url_template.graphql b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/undefined_arg_in_url_template.graphql index a4fba63a93..11ad0d6e02 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/undefined_arg_in_url_template.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/undefined_arg_in_url_template.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) diff --git a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/undefined_this_in_url_template.graphql b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/undefined_this_in_url_template.graphql index cd384decde..d1f771cd20 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/undefined_this_in_url_template.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/undefined_this_in_url_template.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) diff --git a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/valid-expressions-after-domain.graphql b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/valid-expressions-after-domain.graphql index a5129d4d82..4ed0cf54a6 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/valid-expressions-after-domain.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/valid-expressions-after-domain.graphql @@ -1,6 +1,6 @@ extend schema @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) diff --git a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/valid_connect_absolute_url.graphql b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/valid_connect_absolute_url.graphql index 5519116135..70d6066dc3 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/valid_connect_absolute_url.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/valid_connect_absolute_url.graphql @@ -1,5 +1,5 @@ extend schema - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { resources(anArg: String): [String!]! diff --git a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/valid_connect_multiline.graphql b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/valid_connect_multiline.graphql index 8f268d33ac..d85328e5ef 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/uri_templates/valid_connect_multiline.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/uri_templates/valid_connect_multiline.graphql @@ -1,5 +1,5 @@ extend schema - @link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { resources(anArg: String): [String!]! diff --git a/apollo-federation/src/sources/connect/validation/test_data/valid_large_body.graphql b/apollo-federation/src/sources/connect/validation/test_data/valid_large_body.graphql index 756241d772..e2874cddf3 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/valid_large_body.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/valid_large_body.graphql @@ -1,7 +1,7 @@ extend schema @link(url: "https://specs.apollo.dev/federation/v2.10") @link( - url: "https://specs.apollo.dev/connect/v0.1" + url: "https://specs.apollo.dev/connect/v0.2" import: ["@connect", "@source"] ) @source( diff --git a/apollo-federation/src/sources/connect/validation/test_data/valid_selection_with_escapes.graphql b/apollo-federation/src/sources/connect/validation/test_data/valid_selection_with_escapes.graphql index 979d10cf64..0c248ce70b 100644 --- a/apollo-federation/src/sources/connect/validation/test_data/valid_selection_with_escapes.graphql +++ b/apollo-federation/src/sources/connect/validation/test_data/valid_selection_with_escapes.graphql @@ -1,5 +1,5 @@ extend schema -@link(url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect"]) +@link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@connect"]) type Query { block: T diff --git a/apollo-federation/src/sources/connect/variable.rs b/apollo-federation/src/sources/connect/variable.rs index ce0cd49047..3e14fd2e26 100644 --- a/apollo-federation/src/sources/connect/variable.rs +++ b/apollo-federation/src/sources/connect/variable.rs @@ -22,7 +22,7 @@ pub(crate) struct VariableContext<'schema> { } impl<'schema> VariableContext<'schema> { - pub(crate) fn new( + pub(crate) const fn new( element: &'schema ConnectedElement<'schema>, phase: Phase, target: Target, @@ -61,7 +61,7 @@ impl<'schema> VariableContext<'schema> { } /// Get the error code for this context - pub(crate) fn error_code(&self) -> Code { + pub(crate) const fn error_code(&self) -> Code { match self.target { Target::Body => Code::InvalidSelection, } @@ -97,7 +97,7 @@ pub enum Namespace { } impl Namespace { - pub fn as_str(&self) -> &'static str { + pub const fn as_str(&self) -> &'static str { match self { Self::Args => "$args", Self::Config => "$config", @@ -182,7 +182,7 @@ pub(crate) struct VariablePathPart<'a> { } impl VariablePathPart<'_> { - pub(crate) fn as_str(&self) -> &str { + pub(crate) const fn as_str(&self) -> &str { self.part } } diff --git a/apollo-federation/src/subgraph/database.rs b/apollo-federation/src/subgraph/database.rs deleted file mode 100644 index 141e57d5c8..0000000000 --- a/apollo-federation/src/subgraph/database.rs +++ /dev/null @@ -1,95 +0,0 @@ -//! Valid federation 2 subgraphs. -//! -//! Note: technically, federation 1 subgraphs are still accepted as input of -//! composition. However, there is some pre-composition steps that "massage" -//! the input schema to transform them in fully valid federation 2 subgraphs, -//! so the subgraphs seen by composition and query planning are always fully -//! valid federation 2 ones, and this is what this database handles. -//! Note2: This does assumes that whichever way an implementation of this -//! trait is created, some validation that the underlying schema is a valid -//! federation subgraph (so valid graphql, link to the federation spec, and -//! pass additional federation validations). If this is not the case, most -//! of the methods here will panic. - -use std::sync::Arc; - -use apollo_compiler::executable::SelectionSet; - -// TODO: we should define this as part as some more generic "FederationSpec" definition, but need -// to define the ground work for that in `apollo-at-link` first. -#[cfg(test)] -pub(crate) fn federation_link_identity() -> crate::link::spec::Identity { - crate::link::spec::Identity { - domain: crate::link::spec::APOLLO_SPEC_DOMAIN.to_string(), - name: apollo_compiler::name!("federation"), - } -} - -#[derive(Eq, PartialEq, Debug, Clone)] -pub(crate) struct Key { - pub(crate) type_name: apollo_compiler::Name, - // TODO: this should _not_ be an Option below; but we don't know how to build the SelectionSet, - // so until we have a solution, we use None to have code that compiles. - selections: Option>, -} - -impl Key { - // TODO: same remark as above: not meant to be `Option` - // TODO remove suppression OR use method in final version - #[allow(dead_code)] - pub(crate) fn selections(&self) -> Option> { - self.selections.clone() - } - - #[cfg(test)] - pub(crate) fn from_directive_application( - type_name: &apollo_compiler::Name, - directive: &apollo_compiler::executable::Directive, - ) -> Option { - directive - .arguments - .iter() - .find(|arg| arg.name == "fields") - .and_then(|arg| arg.value.as_str()) - .map(|_value| Key { - type_name: type_name.clone(), - // TODO: obviously not what we want. - selections: None, - }) - } -} - -#[cfg(test)] -pub(crate) fn federation_link(schema: &apollo_compiler::Schema) -> Arc { - crate::link::database::links_metadata(schema) - // TODO: error handling? - .unwrap_or_default() - .unwrap_or_default() - .for_identity(&federation_link_identity()) - .expect("The presence of the federation link should have been validated on construction") -} - -/// The name of the @key directive in this subgraph. -/// This will either return 'federation__key' if the `@key` directive is not imported, -/// or whatever never it is imported under otherwise. Commonly, this would just be `key`. -#[cfg(test)] -pub(crate) fn key_directive_name(schema: &apollo_compiler::Schema) -> apollo_compiler::Name { - federation_link(schema).directive_name_in_schema(&apollo_compiler::name!("key")) -} - -#[cfg(test)] -pub(crate) fn keys( - schema: &apollo_compiler::Schema, - type_name: &apollo_compiler::Name, -) -> Vec { - let key_name = key_directive_name(schema); - if let Some(type_def) = schema.types.get(type_name) { - type_def - .directives() - .get_all(&key_name) - .filter_map(|directive| Key::from_directive_application(type_name, directive)) - .collect() - } else { - vec![] - } -} diff --git a/apollo-federation/src/subgraph/mod.rs b/apollo-federation/src/subgraph/mod.rs index cad5063084..f7934fef86 100644 --- a/apollo-federation/src/subgraph/mod.rs +++ b/apollo-federation/src/subgraph/mod.rs @@ -1,7 +1,5 @@ -use std::collections::BTreeMap; use std::fmt::Display; use std::fmt::Formatter; -use std::sync::Arc; use apollo_compiler::Node; use apollo_compiler::Schema; @@ -32,7 +30,6 @@ use crate::subgraph::spec::LinkSpecDefinitions; use crate::subgraph::spec::SERVICE_SDL_QUERY; use crate::subgraph::spec::SERVICE_TYPE; -mod database; pub mod spec; pub mod typestate; // TODO: Move here to overwrite Subgraph after API is reasonable @@ -307,32 +304,6 @@ impl std::fmt::Debug for Subgraph { } } -pub struct Subgraphs { - subgraphs: BTreeMap>, -} - -#[allow(clippy::new_without_default)] -impl Subgraphs { - pub fn new() -> Self { - Subgraphs { - subgraphs: BTreeMap::new(), - } - } - - pub fn add(&mut self, subgraph: Subgraph) -> Result<(), String> { - if self.subgraphs.contains_key(&subgraph.name) { - return Err(format!("A subgraph named {} already exists", subgraph.name)); - } - self.subgraphs - .insert(subgraph.name.clone(), Arc::new(subgraph)); - Ok(()) - } - - pub fn get(&self, name: &str) -> Option> { - self.subgraphs.get(name).cloned() - } -} - pub struct ValidSubgraph { pub name: String, pub url: String, @@ -368,62 +339,42 @@ pub struct SubgraphError { } impl SubgraphError { - pub(crate) fn new(subgraph: impl Into, error: impl Into) -> Self { + pub fn new(subgraph: impl Into, error: impl Into) -> Self { SubgraphError { subgraph: subgraph.into(), error: error.into(), } } -} -impl Display for SubgraphError { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "[{}] {}", self.subgraph, self.error) + pub fn error(&self) -> &FederationError { + &self.error } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::subgraph::database::keys; - - #[test] - fn can_inspect_a_type_key() { - // TODO: no schema expansion currently, so need to having the `@link` to `link` and the - // @link directive definition for @link-bootstrapping to work. Also, we should - // theoretically have the @key directive definition added too (but validation is not - // wired up yet, so we get away without). Point being, this is just some toy code at - // the moment. - - let schema = r#" - extend schema - @link(url: "https://specs.apollo.dev/link/v1.0", import: ["Import"]) - @link(url: "https://specs.apollo.dev/federation/v2.3", import: ["@key"]) - - type Query { - t: T - } - - type T @key(fields: "id") { - id: ID! - x: Int - } - - enum link__Purpose { - SECURITY - EXECUTION - } - scalar Import - - directive @link(url: String, as: String, import: [Import], for: link__Purpose) repeatable on SCHEMA - "#; + pub fn into_inner(self) -> FederationError { + self.error + } - let subgraph = Subgraph::new("S1", "http://s1", schema).unwrap(); - let keys = keys(&subgraph.schema, &name!("T")); - assert_eq!(keys.len(), 1); - assert_eq!(keys.first().unwrap().type_name, name!("T")); + // Format subgraph errors in the same way as `Rover` does. + // And return them as a vector of (error_code, error_message) tuples + // - Gather associated errors from the validation error. + // - Split each error into its code and message. + // - Add the subgraph name prefix to FederationError message. + pub fn format_errors(&self) -> Vec<(String, String)> { + self.error + .errors() + .iter() + .map(|e| { + ( + e.code_string(), + format!("[{subgraph}] {e}", subgraph = self.subgraph), + ) + }) + .collect() + } +} - // TODO: no accessible selection yet. +impl Display for SubgraphError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "[{}] {}", self.subgraph, self.error) } } diff --git a/apollo-federation/src/subgraph/typestate.rs b/apollo-federation/src/subgraph/typestate.rs index a944945f96..d0f5f9f0d1 100644 --- a/apollo-federation/src/subgraph/typestate.rs +++ b/apollo-federation/src/subgraph/typestate.rs @@ -1,9 +1,12 @@ use apollo_compiler::Name; +use apollo_compiler::Node; use apollo_compiler::Schema; +use apollo_compiler::ast; use apollo_compiler::collections::IndexSet; use apollo_compiler::name; use apollo_compiler::schema::Component; use apollo_compiler::schema::ComponentName; +use apollo_compiler::schema::Directive; use apollo_compiler::schema::Type; use crate::LinkSpecDefinition; @@ -15,7 +18,13 @@ use crate::link::federation_spec_definition::FEDERATION_EXTENDS_DIRECTIVE_NAME_I use crate::link::federation_spec_definition::FEDERATION_KEY_DIRECTIVE_NAME_IN_SPEC; use crate::link::federation_spec_definition::FEDERATION_PROVIDES_DIRECTIVE_NAME_IN_SPEC; use crate::link::federation_spec_definition::FEDERATION_REQUIRES_DIRECTIVE_NAME_IN_SPEC; +use crate::link::federation_spec_definition::FEDERATION_VERSIONS; +use crate::link::federation_spec_definition::FederationSpecDefinition; use crate::link::federation_spec_definition::add_fed1_link_to_schema; +use crate::link::link_spec_definition::LINK_DIRECTIVE_IMPORT_ARGUMENT_NAME; +use crate::link::link_spec_definition::LINK_DIRECTIVE_URL_ARGUMENT_NAME; +use crate::link::spec::Identity; +use crate::link::spec::Version; use crate::link::spec_definition::SpecDefinition; use crate::schema::FederationSchema; use crate::schema::blueprint::FederationBlueprint; @@ -117,7 +126,7 @@ impl Subgraph { } pub fn parse( - name: &'static str, + name: &str, url: &str, schema_str: &str, ) -> Result, FederationError> { @@ -129,6 +138,18 @@ impl Subgraph { Ok(Self::new(name, url, schema)) } + /// Converts the schema to a fed2 schema. + /// - It is assumed to have no `@link` to the federation spec. + /// - Returns an equivalent subgraph with a `@link` to the auto expanded federation spec. + /// - This is mainly for testing and not optimized. + // PORT_NOTE: Corresponds to `asFed2SubgraphDocument` function in JS, but simplified. + pub fn into_fed2_subgraph(self) -> Result { + let mut schema = self.state.schema; + let federation_spec = FederationSpecDefinition::auto_expanded_federation_spec(); + add_federation_link_to_schema(&mut schema, federation_spec.version())?; + Ok(Self::new(&self.name, &self.url, schema)) + } + pub fn assume_expanded(self) -> Result, FederationError> { let schema = FederationSchema::new(self.state.schema)?; let metadata = compute_subgraph_metadata(&schema)?.ok_or_else(|| { @@ -167,7 +188,7 @@ impl Subgraph { // PORT_NOTE: JS doesn't actually add the 1.0 federation spec link to the schema. In // Rust, we add it, so that fed 1 and fed 2 can be processed the same way. add_fed1_link_to_schema(&mut schema)?; - } + }; // Now that we have the definition for `@link` and an application, the bootstrap directive detection should work. schema.collect_links_metadata()?; @@ -201,6 +222,48 @@ impl Subgraph { } } +/// Adds a federation (v2 or above) link directive to the schema. +/// - Similar to `add_fed1_link_to_schema`, but the link is added before bootstrapping. +/// - This is mainly for testing. +fn add_federation_link_to_schema( + schema: &mut Schema, + federation_version: &Version, +) -> Result<(), FederationError> { + let federation_spec = FEDERATION_VERSIONS + .find(federation_version) + .ok_or_else(|| internal_error!( + "Subgraph unexpectedly does not use a supported federation spec version. Requested version: {}", + federation_version, + ))?; + + // Insert `@link(url: "http://specs.apollo.dev/federation/vX.Y", import: ...)`. + // - auto import all directives. + let imports: Vec<_> = federation_spec + .directive_specs() + .iter() + .map(|d| format!("@{}", d.name()).into()) + .collect(); + + schema + .schema_definition + .make_mut() + .directives + .push(Component::new(Directive { + name: Identity::link_identity().name, + arguments: vec![ + Node::new(ast::Argument { + name: LINK_DIRECTIVE_URL_ARGUMENT_NAME, + value: federation_spec.url().to_string().into(), + }), + Node::new(ast::Argument { + name: LINK_DIRECTIVE_IMPORT_ARGUMENT_NAME, + value: Node::new(ast::Value::List(imports)), + }), + ], + })); + Ok(()) +} + fn add_federation_operations(schema: &mut FederationSchema) -> Result<(), FederationError> { // Add federation operation types ANY_TYPE_SPEC.check_or_add(schema, None)?; @@ -239,7 +302,7 @@ fn add_federation_operations(schema: &mut FederationSchema) -> Result<(), Federa // Add `Query._service` (if not already present) let service_field_pos = ObjectFieldDefinitionPosition { - type_name: query_root_type_name.clone(), + type_name: query_root_type_name, field_name: FEDERATION_SERVICE_FIELD_NAME, }; if service_field_pos.try_get(schema.schema()).is_none() { @@ -254,19 +317,11 @@ impl Subgraph { todo!("Implement upgrade logic for expanded subgraphs"); } - pub fn validate( - mut self, - rename_root_types: bool, - ) -> Result, SubgraphError> { + pub fn validate(self, rename_root_types: bool) -> Result, SubgraphError> { let blueprint = FederationBlueprint::new(rename_root_types); - blueprint - .on_validation(&mut self.state.schema) + let schema = blueprint + .on_validation(self.state.schema) .map_err(|e| SubgraphError::new(self.name.clone(), e))?; - let schema = self - .state - .schema - .validate_or_return_self() - .map_err(|t| SubgraphError::new(self.name.clone(), t.1))?; Ok(Subgraph { name: self.name, @@ -303,6 +358,11 @@ impl Subgraph { self.state.schema() } + /// Returns the schema as a string. Mainly for testing purposes. + pub fn schema_string(&self) -> String { + self.schema().schema().to_string() + } + pub(crate) fn extends_directive_name(&self) -> Result, FederationError> { self.metadata() .federation_spec_definition() @@ -458,6 +518,7 @@ mod tests { name!("requires"), name!("skip"), name!("specifiedBy"), + name!("tag"), ] ); } @@ -497,6 +558,7 @@ mod tests { name!("federation__provides"), name!("federation__requires"), name!("federation__shareable"), + name!("federation__tag"), name!("include"), name!("link"), name!("skip"), @@ -541,6 +603,7 @@ mod tests { name!("federation__provides"), name!("federation__requires"), name!("federation__shareable"), + name!("federation__tag"), name!("include"), name!("link"), name!("skip"), @@ -831,154 +894,3 @@ mod tests { ); } } - -// PORT_NOTE: Corresponds to '@core/@link handling' tests in JS -#[cfg(test)] -mod link_handling_tests { - use super::*; - - // TODO(FED-543): Remaining directive definitions should be added to the schema - #[allow(dead_code)] - const EXPECTED_FULL_SCHEMA: &str = r#" - schema - @link(url: "https://specs.apollo.dev/link/v1.0") - @link(url: "https://specs.apollo.dev/federation/v2.0", import: ["@key"]) - { - query: Query - } - - directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA - - directive @key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE - - directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION - - directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION - - directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION - - directive @federation__tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION - - directive @federation__extends on OBJECT | INTERFACE - - directive @federation__shareable on OBJECT | FIELD_DEFINITION - - directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION - - directive @federation__override(from: String!) on FIELD_DEFINITION - - type T - @key(fields: "k") - { - k: ID! - } - - enum link__Purpose { - """ - \`SECURITY\` features provide metadata necessary to securely resolve fields. - """ - SECURITY - - """ - \`EXECUTION\` features provide metadata necessary for operation execution. - """ - EXECUTION - } - - scalar link__Import - - scalar federation__FieldSet - - scalar _Any - - type _Service { - sdl: String - } - - union _Entity = T - - type Query { - _entities(representations: [_Any!]!): [_Entity]! - _service: _Service! - } - "#; - - #[test] - fn expands_everything_if_only_the_federation_spec_is_linked() { - let subgraph = Subgraph::parse( - "S", - "", - r#" - extend schema - @link(url: "https://specs.apollo.dev/federation/v2.0", import: ["@key"]) - - type T @key(fields: "k") { - k: ID! - } - "#, - ) - .expect("valid schema") - .expand_links() - .expect("expands subgraph") - .validate(true) - .expect("expanded subgraph to be valid"); - - // TODO(FED-543): `subgraph` is supposed to be compared against `EXPECTED_FULL_SCHEMA`, but - // it's failing due to missing directive definitions. So, we use - // `insta::assert_snapshot` for now. - // assert_eq!(subgraph.schema().schema().to_string(), EXPECTED_FULL_SCHEMA); - insta::assert_snapshot!(subgraph.schema().schema().to_string(), @r###" - schema @link(url: "https://specs.apollo.dev/link/v1.0") { - query: Query - } - - extend schema @link(url: "https://specs.apollo.dev/federation/v2.0", import: ["@key"]) - - directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA - - directive @key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE - - directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION - - directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION - - directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION - - directive @federation__shareable on OBJECT | FIELD_DEFINITION - - directive @federation__override(from: String!) on FIELD_DEFINITION - - type T @key(fields: "k") { - k: ID! - } - - enum link__Purpose { - """ - `SECURITY` features provide metadata necessary to securely resolve fields. - """ - SECURITY - """ - `EXECUTION` features provide metadata necessary for operation execution. - """ - EXECUTION - } - - scalar link__Import - - scalar federation__FieldSet - - scalar _Any - - type _Service { - sdl: String - } - - union _Entity = T - - type Query { - _entities(representations: [_Any!]!): [_Entity]! - _service: _Service! - } - "###); - } -} diff --git a/apollo-federation/src/supergraph/mod.rs b/apollo-federation/src/supergraph/mod.rs index 9b5d8faa2d..ffda1f4574 100644 --- a/apollo-federation/src/supergraph/mod.rs +++ b/apollo-federation/src/supergraph/mod.rs @@ -14,6 +14,7 @@ use apollo_compiler::ast::FieldDefinition; use apollo_compiler::collections::IndexMap; use apollo_compiler::collections::IndexSet; use apollo_compiler::executable; +use apollo_compiler::executable::FieldSet; use apollo_compiler::name; use apollo_compiler::schema::Component; use apollo_compiler::schema::ComponentName; @@ -913,7 +914,7 @@ fn extract_interface_type_content( ), } })?; - Ok(match subgraph.schema.get_type(type_name.clone())? { + Ok(match subgraph.schema.get_type(type_name)? { TypeDefinitionPosition::Object(pos) => { if !is_interface_object { return Err( @@ -1772,7 +1773,7 @@ fn add_federation_operations( // `Query._service` ObjectFieldDefinitionPosition { - type_name: query_root_type_name.clone(), + type_name: query_root_type_name, field_name: FEDERATION_SERVICE_FIELD_NAME, } .insert( @@ -1928,12 +1929,20 @@ fn remove_inactive_applications( parent_type_pos.type_name().clone(), fields, )?; + let is_modified = remove_non_external_leaf_fields(schema, &mut fields)?; if is_modified { let replacement_directive = if fields.selections.is_empty() { None } else { - let fields = fields.serialize().no_indent().to_string(); + let fields = FieldSet { + sources: Default::default(), + selection_set: fields, + } + .serialize() + .no_indent() + .to_string(); + Some(Node::new(match directive_kind { FieldSetDirectiveKind::Provides => { federation_spec_definition.provides_directive(schema, fields)? diff --git a/apollo-federation/src/utils/human_readable.rs b/apollo-federation/src/utils/human_readable.rs new file mode 100644 index 0000000000..1ed3a0c91f --- /dev/null +++ b/apollo-federation/src/utils/human_readable.rs @@ -0,0 +1,150 @@ +pub(crate) struct JoinStringsOptions<'a> { + pub(crate) separator: &'a str, + pub(crate) first_separator: Option<&'a str>, + pub(crate) last_separator: Option<&'a str>, + /// When displaying a list of something in a human-readable form, after what size (in number of + /// characters) we start displaying only a subset of the list. Note this only counts characters + /// in list elements, and ignores separators. + pub(crate) output_length_limit: Option, +} + +impl Default for JoinStringsOptions<'_> { + fn default() -> Self { + Self { + separator: ", ", + first_separator: None, + last_separator: Some(" and "), + output_length_limit: None, + } + } +} + +/// Joins an iterator of strings, but with the ability to use a specific different separator for the +/// first and/or last occurrence (if both are given and the list is size two, the first separator is +/// used). Optionally, if the resulting list to print is "too long", it can display a subset of the +/// elements and uses an ellipsis (...) for the rest. +/// +/// The goal is to make the reading flow slightly better. For instance, if you have a vector of +/// subgraphs `s = ["A", "B", "C"]`, then `join_strings(s.iter(), Default::default())` will yield +/// "A, B and C". +pub(crate) fn join_strings( + mut iter: impl Iterator>, + options: JoinStringsOptions, +) -> String { + let mut output = String::new(); + let Some(first) = iter.next() else { + return output; + }; + output.push_str(first.as_ref()); + let Some(second) = iter.next() else { + return output; + }; + // PORT_NOTE: The analogous JS code in `printHumanReadableList()` was only tracking the length + // of elements getting added to the list and ignored separators, so we do the same here. + let mut element_length = first.as_ref().chars().count(); + // Returns true if push would exceed limit, and instead pushes default separator and "...". + let mut push_sep_and_element = |sep: &str, element: &str| { + if let Some(output_length_limit) = options.output_length_limit { + // PORT_NOTE: The analogous JS code in `printHumanReadableList()` has a bug where it + // doesn't early exit when the length would be too long, and later small elements in the + // list may erroneously extend the printed subset. That bug is fixed here. + let new_element_length = element_length + element.chars().count(); + return if new_element_length <= output_length_limit { + element_length = new_element_length; + output.push_str(sep); + output.push_str(element); + false + } else { + output.push_str(options.separator); + output.push_str("..."); + true + }; + } + output.push_str(sep); + output.push_str(element); + false + }; + let last_sep = options.last_separator.unwrap_or(options.separator); + let Some(mut current) = iter.next() else { + push_sep_and_element(options.first_separator.unwrap_or(last_sep), second.as_ref()); + return output; + }; + if push_sep_and_element( + options.first_separator.unwrap_or(options.separator), + second.as_ref(), + ) { + return output; + } + for next in iter { + if push_sep_and_element(options.separator, current.as_ref()) { + return output; + } + current = next; + } + push_sep_and_element(last_sep, current.as_ref()); + output +} + +pub(crate) struct HumanReadableListOptions<'a> { + pub(crate) prefix: Option>, + pub(crate) last_separator: Option<&'a str>, + /// When displaying a list of something in a human-readable form, after what size (in number of + /// characters) we start displaying only a subset of the list. + pub(crate) output_length_limit: usize, +} + +pub(crate) struct HumanReadableListPrefix<'a> { + pub(crate) singular: &'a str, + pub(crate) plural: &'a str, +} + +impl Default for HumanReadableListOptions<'_> { + fn default() -> Self { + Self { + prefix: None, + last_separator: Some(" and "), + output_length_limit: 100, + } + } +} + +// PORT_NOTE: Named `printHumanReadableList` in the JS codebase, but "print" in Rust has the +// implication it prints to stdout/stderr, so we remove it here. Also, the "emptyValue" option is +// never used, so it's not ported. +/// Like [join_strings], joins an iterator of strings, but with a few differences, namely: +/// - It allows prefixing the whole list, and to use a different prefix if there's only a single +/// element in the list. +/// - It forces the use of ", " as separator, but allows a different last separator. +/// - It forces an output length limit to be specified. In other words, this function assumes it's +/// more useful to avoid flooding the output than printing everything when the list is too long. +pub(crate) fn human_readable_list( + mut iter: impl Iterator>, + options: HumanReadableListOptions, +) -> String { + let Some(first) = iter.next() else { + // TODO: The JS code returns an empty string here, which we've ported accordingly. However, + // this probably isn't want the caller wants, and something like e.g. "no types" for prefix + // type/types may be better. + return "".to_owned(); + }; + let Some(second) = iter.next() else { + return if let Some(prefix) = options.prefix { + format!("{} {}", prefix.singular, first.as_ref()) + } else { + first.as_ref().to_owned() + }; + }; + let joined_strings = join_strings( + [first, second].into_iter().chain(iter), + JoinStringsOptions { + last_separator: options.last_separator, + output_length_limit: Some(options.output_length_limit), + ..Default::default() + }, + ); + if let Some(prefix) = options.prefix { + format!("{} {}", prefix.plural, joined_strings) + } else { + joined_strings + } +} diff --git a/apollo-federation/src/utils/mod.rs b/apollo-federation/src/utils/mod.rs index 75ee5b0891..94fca57a23 100644 --- a/apollo-federation/src/utils/mod.rs +++ b/apollo-federation/src/utils/mod.rs @@ -1,6 +1,7 @@ //! This module contains various tools that help the ergonomics of this crate. mod fallible_iterator; +pub(crate) mod human_readable; pub(crate) mod logging; pub(crate) mod serde_bridge; diff --git a/apollo-federation/tests/dhat_profiling/connectors_validation.rs b/apollo-federation/tests/dhat_profiling/connectors_validation.rs index ec12a16159..106e86f383 100644 --- a/apollo-federation/tests/dhat_profiling/connectors_validation.rs +++ b/apollo-federation/tests/dhat_profiling/connectors_validation.rs @@ -16,7 +16,7 @@ fn valid_large_body() { let _profiler = dhat::Profiler::builder().testing().build(); - apollo_federation::sources::connect::validation::validate(&schema, SCHEMA); + apollo_federation::sources::connect::validation::validate(schema, SCHEMA); let stats = dhat::HeapStats::get(); dhat::assert!(stats.max_bytes < MAX_BYTES); diff --git a/apollo-federation/tests/dhat_profiling/query_plan.rs b/apollo-federation/tests/dhat_profiling/query_plan.rs index 590a44e9a2..d87f0cee8d 100644 --- a/apollo-federation/tests/dhat_profiling/query_plan.rs +++ b/apollo-federation/tests/dhat_profiling/query_plan.rs @@ -74,11 +74,9 @@ fn valid_query_plan() { .to_api_schema(api_options) .expect("api schema should be valid"); let qp_config = apollo_federation::query_plan::query_planner::QueryPlannerConfig::default(); - let planner = apollo_federation::query_plan::query_planner::QueryPlanner::new( - &supergraph, - qp_config.clone(), - ) - .expect("query planner should be created"); + let planner = + apollo_federation::query_plan::query_planner::QueryPlanner::new(&supergraph, qp_config) + .expect("query planner should be created"); let stats = dhat::HeapStats::get(); dhat::assert!(stats.max_bytes < MAX_BYTES_QUERY_PLANNER); dhat::assert!(stats.total_blocks < MAX_ALLOCATIONS_QUERY_PLANNER); diff --git a/apollo-federation/tests/query_plan/build_query_plan_support.rs b/apollo-federation/tests/query_plan/build_query_plan_support.rs index 3c6266a895..4b3ee680a8 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_support.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_support.rs @@ -93,7 +93,7 @@ pub(crate) fn test_planner( ) -> QueryPlanner { let supergraph = compose(function_path, subgraph_names_and_schemas); let supergraph = apollo_federation::Supergraph::new(&supergraph).expect("valid supergraph"); - QueryPlanner::new(&supergraph, config.clone()).expect("can create query planner") + QueryPlanner::new(&supergraph, config).expect("can create query planner") } #[track_caller] diff --git a/apollo-federation/tests/snapshots/main__composition_tests__can_compose_supergraph.snap b/apollo-federation/tests/snapshots/main__composition_tests__can_compose_supergraph.snap index c1ebe41486..79c5a91adf 100644 --- a/apollo-federation/tests/snapshots/main__composition_tests__can_compose_supergraph.snap +++ b/apollo-federation/tests/snapshots/main__composition_tests__can_compose_supergraph.snap @@ -2,7 +2,7 @@ source: apollo-federation/tests/composition_tests.rs expression: print_sdl(supergraph.schema.schema()) --- -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) { query: Query } diff --git a/apollo-federation/tests/snapshots/main__composition_tests__can_compose_types_from_different_subgraphs.snap b/apollo-federation/tests/snapshots/main__composition_tests__can_compose_types_from_different_subgraphs.snap index 1df16844dc..cd534b94f0 100644 --- a/apollo-federation/tests/snapshots/main__composition_tests__can_compose_types_from_different_subgraphs.snap +++ b/apollo-federation/tests/snapshots/main__composition_tests__can_compose_types_from_different_subgraphs.snap @@ -2,7 +2,7 @@ source: apollo-federation/tests/composition_tests.rs expression: print_sdl(supergraph.schema.schema()) --- -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) { query: Query } diff --git a/apollo-federation/tests/snapshots/main__composition_tests__can_compose_with_descriptions.snap b/apollo-federation/tests/snapshots/main__composition_tests__can_compose_with_descriptions.snap index 9eb2912465..3a4c8f9a45 100644 --- a/apollo-federation/tests/snapshots/main__composition_tests__can_compose_with_descriptions.snap +++ b/apollo-federation/tests/snapshots/main__composition_tests__can_compose_with_descriptions.snap @@ -3,7 +3,7 @@ source: apollo-federation/tests/composition_tests.rs expression: print_sdl(supergraph.schema.schema()) --- """A cool schema""" -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) { query: Query } diff --git a/apollo-federation/tests/snapshots/main__composition_tests__compose_removes_federation_directives.snap b/apollo-federation/tests/snapshots/main__composition_tests__compose_removes_federation_directives.snap index 209831b025..e78cfc9371 100644 --- a/apollo-federation/tests/snapshots/main__composition_tests__compose_removes_federation_directives.snap +++ b/apollo-federation/tests/snapshots/main__composition_tests__compose_removes_federation_directives.snap @@ -2,7 +2,7 @@ source: apollo-federation/tests/composition_tests.rs expression: print_sdl(supergraph.schema.schema()) --- -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) { query: Query } diff --git a/apollo-federation/tests/subgraph/mod.rs b/apollo-federation/tests/subgraph/mod.rs index 7610f5925b..3b4f779a79 100644 --- a/apollo-federation/tests/subgraph/mod.rs +++ b/apollo-federation/tests/subgraph/mod.rs @@ -1 +1,2 @@ mod parse_expand_tests; +mod subgraph_validation_tests; diff --git a/apollo-federation/tests/subgraph/subgraph_validation_tests.rs b/apollo-federation/tests/subgraph/subgraph_validation_tests.rs new file mode 100644 index 0000000000..fd763ed42b --- /dev/null +++ b/apollo-federation/tests/subgraph/subgraph_validation_tests.rs @@ -0,0 +1,2198 @@ +use apollo_federation::subgraph::SubgraphError; +use apollo_federation::subgraph::typestate::Subgraph; +use apollo_federation::subgraph::typestate::Validated; + +enum BuildOption { + AsIs, + AsFed2, +} + +fn build_inner( + schema_str: &str, + build_option: BuildOption, +) -> Result, SubgraphError> { + let name = "S"; + let subgraph = + Subgraph::parse(name, &format!("http://{name}"), schema_str).expect("valid schema"); + let subgraph = if matches!(build_option, BuildOption::AsFed2) { + subgraph + .into_fed2_subgraph() + .map_err(|e| SubgraphError::new(name, e))? + } else { + subgraph + }; + subgraph + .expand_links() + .map_err(|e| SubgraphError::new(name, e))? + .validate(true) +} + +fn build_and_validate(schema_str: &str) -> Subgraph { + build_inner(schema_str, BuildOption::AsIs).expect("expanded subgraph to be valid") +} + +fn build_for_errors_with_option(schema: &str, build_option: BuildOption) -> Vec<(String, String)> { + build_inner(schema, build_option) + .expect_err("subgraph error was expected") + .format_errors() +} + +/// Build subgraph expecting errors, assuming fed 2. +fn build_for_errors(schema: &str) -> Vec<(String, String)> { + build_for_errors_with_option(schema, BuildOption::AsFed2) +} + +fn remove_indentation(s: &str) -> String { + // count the last lines that are space-only + let first_empty_lines = s.lines().take_while(|line| line.trim().is_empty()).count(); + let last_empty_lines = s + .lines() + .rev() + .take_while(|line| line.trim().is_empty()) + .count(); + + // lines without the space-only first/last lines + let lines = s + .lines() + .skip(first_empty_lines) + .take(s.lines().count() - first_empty_lines - last_empty_lines); + + // compute the indentation + let indentation = lines + .clone() + .map(|line| line.chars().take_while(|c| *c == ' ').count()) + .min() + .unwrap_or(0); + + // remove the indentation + lines + .map(|line| { + line.trim_end() + .chars() + .skip(indentation) + .collect::() + }) + .collect::>() + .join("\n") +} + +/// True if a and b contain the same error messages +fn check_errors(a: &[(String, String)], b: &[(&str, &str)]) -> Result<(), String> { + if a.len() != b.len() { + return Err(format!( + "Mismatched error counts: {} != {}\n\nexpected:\n{}\n\nactual:\n{}", + b.len(), + a.len(), + b.iter() + .map(|(code, msg)| { format!("- {}: {}", code, msg) }) + .collect::>() + .join("\n"), + a.iter() + .map(|(code, msg)| { format!("+ {}: {}", code, msg) }) + .collect::>() + .join("\n"), + )); + } + + // remove indentations from messages to ignore indentation differences + let b_iter = b + .iter() + .map(|(code, message)| (*code, remove_indentation(message))); + let diff: Vec<_> = a + .iter() + .map(|(code, message)| (code.as_str(), remove_indentation(message))) + .zip(b_iter) + .filter(|(a_i, b_i)| a_i.0 != b_i.0 || a_i.1 != b_i.1) + .collect(); + if diff.is_empty() { + Ok(()) + } else { + Err(format!( + "Mismatched errors:\n{}\n", + diff.iter() + .map(|(a_i, b_i)| { format!("- {}: {}\n+ {}: {}", b_i.0, b_i.1, a_i.0, a_i.1) }) + .collect::>() + .join("\n") + )) + } +} + +macro_rules! assert_errors { + ($a:expr, $b:expr) => { + match check_errors(&$a, &$b) { + Ok(()) => { + // Success + } + Err(e) => { + panic!("{e}") + } + } + }; +} + +mod fieldset_based_directives { + use super::*; + + #[test] + #[should_panic(expected = r#"Mismatched errors:"#)] + fn rejects_field_defined_with_arguments_in_key() { + let schema_str = r#" + type Query { + t: T + } + type T @key(fields: "f") { + f(x: Int): Int + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [( + "KEY_FIELDS_HAS_ARGS", + r#"[S] On type "T", for @key(fields: "f"): field T.f cannot be included because it has arguments (fields with argument are not allowed in @key)"#, + )] + ); + } + + #[test] + #[should_panic(expected = r#"Mismatched errors:"#)] + fn rejects_field_defined_with_arguments_in_provides() { + let schema_str = r#" + type Query { + t: T @provides(fields: "f") + } + + type T { + f(x: Int): Int @external + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [( + "PROVIDES_FIELDS_HAS_ARGS", + r#"[S] On field "Query.t", for @provides(fields: "f"): field T.f cannot be included because it has arguments (fields with argument are not allowed in @provides)"#, + )] + ); + } + + #[test] + #[should_panic(expected = r#"Mismatched errors:"#)] + fn rejects_provides_on_non_external_fields() { + let schema_str = r#" + type Query { + t: T @provides(fields: "f") + } + + type T { + f: Int + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [( + "PROVIDES_FIELDS_MISSING_EXTERNAL", + r#"[S] On field "Query.t", for @provides(fields: "f"): field "T.f" should not be part of a @provides since it is already provided by this subgraph (it is not marked @external)"#, + )] + ); + } + + #[test] + #[should_panic(expected = r#"Mismatched errors:"#)] + fn rejects_requires_on_non_external_fields() { + let schema_str = r#" + type Query { + t: T + } + + type T { + f: Int + g: Int @requires(fields: "f") + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [( + "REQUIRES_FIELDS_MISSING_EXTERNAL", + r#"[S] On field "T.g", for @requires(fields: "f"): field "T.f" should not be part of a @requires since it is already provided by this subgraph (it is not marked @external)"#, + )] + ); + } + + #[test] + fn rejects_key_on_interfaces_in_all_specs() { + for version in ["2.0", "2.1", "2.2"] { + let schema_str = format!( + r#" + extend schema + @link(url: "https://specs.apollo.dev/federation/v{}", import: ["@key"]) + + type Query {{ + t: T + }} + + interface T @key(fields: "f") {{ + f: Int + }} + "#, + version + ); + let err = build_for_errors_with_option(&schema_str, BuildOption::AsIs); + + assert_errors!( + err, + [( + "KEY_UNSUPPORTED_ON_INTERFACE", + r#"[S] Cannot use @key on interface "T": @key is not yet supported on interfaces"#, + )] + ); + } + } + + #[test] + fn rejects_provides_on_interfaces() { + let schema_str = r#" + type Query { + t: T + } + + interface T { + f: U @provides(fields: "g") + } + + type U { + g: Int @external + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [( + "PROVIDES_UNSUPPORTED_ON_INTERFACE", + r#"[S] Cannot use @provides on field "T.f" of parent type "T": @provides is not yet supported within interfaces"#, + )] + ); + } + + #[test] + fn rejects_requires_on_interfaces() { + let schema_str = r#" + type Query { + t: T + } + + interface T { + f: Int @external + g: Int @requires(fields: "f") + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [ + ( + "REQUIRES_UNSUPPORTED_ON_INTERFACE", + r#"[S] Cannot use @requires on field "T.g" of parent type "T": @requires is not yet supported within interfaces"#, + ), + ( + "EXTERNAL_ON_INTERFACE", + r#"[S] Interface type field "T.f" is marked @external but @external is not allowed on interface fields."#, + ), + ] + ); + } + + #[test] + fn rejects_unused_external() { + let schema_str = r#" + type Query { + t: T + } + + type T { + f: Int @external + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [( + "EXTERNAL_UNUSED", + r#"[S] Field "T.f" is marked @external but is not used in any federation directive (@key, @provides, @requires) or to satisfy an interface; the field declaration has no use and should be removed (or the field should not be @external)."#, + )] + ); + } + + #[test] + #[should_panic(expected = r#"subgraph error was expected:"#)] + fn rejects_provides_on_non_object_fields() { + let schema_str = r#" + type Query { + t: T @provides(fields: "f") + } + + type T { + f: Int + } + "#; + let err = build_for_errors_with_option(schema_str, BuildOption::AsIs); + + assert_errors!( + err, + [( + "PROVIDES_ON_NON_OBJECT_FIELD", + r#"[S] Invalid @provides directive on field "Query.t": field has type "Int" which is not a Composite Type"#, + )] + ); + } + + #[test] + #[should_panic(expected = r#"Mismatched errors:"#)] + fn rejects_non_string_argument_to_key() { + let schema_str = r#" + type Query { + t: T + } + + type T @key(fields: ["f"]) { + f: Int + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [( + "KEY_INVALID_FIELDS_TYPE", + r#"[S] On type "T", for @key(fields: ["f"]): Invalid value for argument "fields": must be a string."#, + )] + ); + } + + #[test] + #[ignore = "temporary ignore for build break"] + #[should_panic(expected = r#"subgraph error was expected:"#)] + fn rejects_non_string_argument_to_provides() { + let schema_str = r#" + type Query { + t: T @provides(fields: ["f"]) + } + + type T { + f: Int @external + } + "#; + let err = build_for_errors(schema_str); + + // Note: since the error here is that we cannot parse the key `fields`, this also means that @external on + // `f` will appear unused and we get an error for it. It's kind of hard to avoid cleanly and hopefully + // not a big deal (having errors dependencies is not exactly unheard of). + assert_errors!( + err, + [ + ( + "PROVIDES_INVALID_FIELDS_TYPE", + r#"[S] On field "Query.t", for @provides(fields: ["f"]): Invalid value for argument "fields": must be a string."#, + ), + ( + "EXTERNAL_UNUSED", + r#"[S] Field "T.f" is marked @external but is not used in any federation directive (@key, @provides, @requires) or to satisfy an interface; the field declaration has no use and should be removed (or the field should not be @external)."#, + ), + ] + ); + } + + #[test] + #[ignore = "temporary ignore for build break"] + #[should_panic(expected = r#"subgraph error was expected:"#)] + fn rejects_non_string_argument_to_requires() { + let schema_str = r#" + type Query { + t: T + } + + type T { + f: Int @external + g: Int @requires(fields: ["f"]) + } + "#; + let err = build_for_errors(schema_str); + + // Note: since the error here is that we cannot parse the key `fields`, this also means that @external on + // `f` will appear unused and we get an error for it. It's kind of hard to avoid cleanly and hopefully + // not a big deal (having errors dependencies is not exactly unheard of). + assert_errors!( + err, + [ + ( + "REQUIRES_INVALID_FIELDS_TYPE", + r#"[S] On field "T.g", for @requires(fields: ["f"]): Invalid value for argument "fields": must be a string."#, + ), + ( + "EXTERNAL_UNUSED", + r#"[S] Field "T.f" is marked @external but is not used in any federation directive (@key, @provides, @requires) or to satisfy an interface; the field declaration has no use and should be removed (or the field should not be @external)."#, + ), + ] + ); + } + + #[test] + #[should_panic(expected = r#"Mismatched errors:"#)] + // Special case of non-string argument, specialized because it hits a different + // code-path due to enum values being parsed as string and requiring special care. + fn rejects_enum_like_argument_to_key() { + let schema_str = r#" + type Query { + t: T + } + + type T @key(fields: f) { + f: Int + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [( + "KEY_INVALID_FIELDS_TYPE", + r#"[S] On type "T", for @key(fields: f): Invalid value for argument "fields": must be a string."#, + )] + ); + } + + #[test] + #[ignore = "temporary ignore for build break"] + #[should_panic(expected = r#"subgraph error was expected:"#)] + // Special case of non-string argument, specialized because it hits a different + // code-path due to enum values being parsed as string and requiring special care. + fn rejects_enum_like_argument_to_provides() { + let schema_str = r#" + type Query { + t: T @provides(fields: f) + } + + type T { + f: Int @external + } + "#; + let err = build_for_errors(schema_str); + + // Note: since the error here is that we cannot parse the key `fields`, this also mean that @external on + // `f` will appear unused and we get an error for it. It's kind of hard to avoid cleanly and hopefully + // not a big deal (having errors dependencies is not exactly unheard of). + assert_errors!( + err, + [ + ( + "PROVIDES_INVALID_FIELDS_TYPE", + r#"[S] On field "Query.t", for @provides(fields: f): Invalid value for argument "fields": must be a string."#, + ), + ( + "EXTERNAL_UNUSED", + r#"[S] Field "T.f" is marked @external but is not used in any federation directive (@key, @provides, @requires) or to satisfy an interface; the field declaration has no use and should be removed (or the field should not be @external)."#, + ), + ] + ); + } + + #[test] + #[ignore = "temporary ignore for build break"] + #[should_panic(expected = r#"subgraph error was expected:"#)] + // Special case of non-string argument, specialized because it hits a different + // code-path due to enum values being parsed as string and requiring special care. + fn rejects_enum_like_argument_to_requires() { + let schema_str = r#" + type Query { + t: T + } + + type T { + f: Int @external + g: Int @requires(fields: f) + } + "#; + let err = build_for_errors(schema_str); + + // Note: since the error here is that we cannot parse the key `fields`, this also mean that @external on + // `f` will appear unused and we get an error for it. It's kind of hard to avoid cleanly and hopefully + // not a big deal (having errors dependencies is not exactly unheard of). + assert_errors!( + err, + [ + ( + "REQUIRES_INVALID_FIELDS_TYPE", + r#"[S] On field "T.g", for @requires(fields: f): Invalid value for argument "fields": must be a string."#, + ), + ( + "EXTERNAL_UNUSED", + r#"[S] Field "T.f" is marked @external but is not used in any federation directive (@key, @provides, @requires) or to satisfy an interface; the field declaration has no use and should be removed (or the field should not be @external)."#, + ), + ] + ); + } + + #[test] + #[should_panic(expected = r#"Mismatched errors:"#)] + fn rejects_invalid_fields_argument_to_key() { + let schema_str = r#" + type Query { + t: T + } + + type T @key(fields: ":f") { + f: Int + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [( + "KEY_INVALID_FIELDS", + r#"[S] On type "T", for @key(fields: ":f"): Syntax Error: Expected Name, found ":"."#, + )] + ); + } + + #[test] + #[ignore = "temporary ignore for build break"] + #[should_panic(expected = r#"Mismatched error counts: 1 != 2"#)] + fn rejects_invalid_fields_argument_to_provides() { + let schema_str = r#" + type Query { + t: T @provides(fields: "{{f}}") + } + + type T { + f: Int @external + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [ + ( + "PROVIDES_INVALID_FIELDS", + r#"[S] On field "Query.t", for @provides(fields: "{{f}}"): Syntax Error: Expected Name, found "{"."#, + ), + ( + "EXTERNAL_UNUSED", + r#"[S] Field "T.f" is marked @external but is not used in any federation directive (@key, @provides, @requires) or to satisfy an interface; the field declaration has no use and should be removed (or the field should not be @external)."#, + ), + ] + ); + } + + #[test] + #[should_panic(expected = r#"Mismatched errors:"#)] + fn rejects_invalid_fields_argument_to_requires() { + let schema_str = r#" + type Query { + t: T + } + + type T { + f: Int @external + g: Int @requires(fields: "f b") + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [( + "REQUIRES_INVALID_FIELDS", + r#"[S] On field "T.g", for @requires(fields: "f b"): Cannot query field "b" on type "T" (if the field is defined in another subgraph, you need to add it to this subgraph with @external)."#, + )] + ); + } + + #[test] + #[ignore = "temporary ignore for build break"] + #[should_panic(expected = r#"subgraph error was expected:"#)] + fn rejects_key_on_interface_field() { + let schema_str = r#" + type Query { + t: T + } + + type T @key(fields: "f") { + f: I + } + + interface I { + i: Int + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [( + "KEY_FIELDS_SELECT_INVALID_TYPE", + r#"[S] On type "T", for @key(fields: "f"): field "T.f" is a Interface type which is not allowed in @key"#, + )] + ); + } + + #[test] + #[ignore = "temporary ignore for build break"] + #[should_panic(expected = r#"subgraph error was expected:"#)] + fn rejects_key_on_union_field() { + let schema_str = r#" + type Query { + t: T + } + + type T @key(fields: "f") { + f: U + } + + union U = Query | T + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [( + "KEY_FIELDS_SELECT_INVALID_TYPE", + r#"[S] On type "T", for @key(fields: "f"): field "T.f" is a Union type which is not allowed in @key"#, + )] + ); + } + + #[test] + #[ignore = "temporary ignore for build break"] + #[should_panic(expected = r#"subgraph error was expected:"#)] + fn rejects_directive_applications_in_key() { + let schema_str = r#" + type Query { + t: T + } + + type T @key(fields: "v { x ... @include(if: false) { y }}") { + v: V + } + + type V { + x: Int + y: Int + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [( + "KEY_DIRECTIVE_IN_FIELDS_ARG", + r#"[S] On type "T", for @key(fields: "v { x ... @include(if: false) { y }}"): cannot have directive applications in the @key(fields:) argument but found @include(if: false)."#, + )] + ); + } + + #[test] + #[ignore = "temporary ignore for build break"] + #[should_panic(expected = r#"subgraph error was expected:"#)] + fn rejects_directive_applications_in_provides() { + let schema_str = r#" + type Query { + t: T @provides(fields: "v { ... on V @skip(if: true) { x y } }") + } + + type T @key(fields: "id") { + id: ID + v: V @external + } + + type V { + x: Int + y: Int + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [( + "PROVIDES_DIRECTIVE_IN_FIELDS_ARG", + r#"[S] On field "Query.t", for @provides(fields: "v { ... on V @skip(if: true) { x y } }"): cannot have directive applications in the @provides(fields:) argument but found @skip(if: true)."#, + )] + ); + } + + #[test] + #[ignore = "temporary ignore for build break"] + #[should_panic(expected = r#"subgraph error was expected:"#)] + fn rejects_directive_applications_in_requires() { + let schema_str = r#" + type Query { + t: T + } + + type T @key(fields: "id") { + id: ID + a: Int @requires(fields: "... @skip(if: false) { b }") + b: Int @external + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [( + "REQUIRES_DIRECTIVE_IN_FIELDS_ARG", + r#"[S] On field "T.a", for @requires(fields: "... @skip(if: false) { b }"): cannot have directive applications in the @requires(fields:) argument but found @skip(if: false)."#, + )] + ); + } + + #[test] + #[ignore = "temporary ignore for build break"] + #[should_panic(expected = r#"subgraph error was expected:"#)] + fn can_collect_multiple_errors_in_a_single_fields_argument() { + let schema_str = r#" + type Query { + t: T @provides(fields: "f(x: 3)") + } + + type T @key(fields: "id") { + id: ID + f(x: Int): Int + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [ + ( + "PROVIDES_FIELDS_HAS_ARGS", + r#"[S] On field "Query.t", for @provides(fields: "f(x: 3)"): field T.f cannot be included because it has arguments (fields with argument are not allowed in @provides)"#, + ), + ( + "PROVIDES_FIELDS_MISSING_EXTERNAL", + r#"[S] On field "Query.t", for @provides(fields: "f(x: 3)"): field "T.f" should not be part of a @provides since it is already provided by this subgraph (it is not marked @external)"#, + ), + ] + ); + } + + #[test] + #[ignore = "temporary ignore for build break"] + #[should_panic(expected = r#"subgraph error was expected:"#)] + fn rejects_aliases_in_key() { + let schema_str = r#" + type Query { + t: T + } + + type T @key(fields: "foo: id") { + id: ID! + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [( + "KEY_INVALID_FIELDS", + r#"[S] On type "T", for @key(fields: "foo: id"): Cannot use alias "foo" in "foo: id": aliases are not currently supported in @key"#, + )] + ); + } + + #[test] + #[ignore = "temporary ignore for build break"] + #[should_panic(expected = r#"subgraph error was expected:"#)] + fn rejects_aliases_in_provides() { + let schema_str = r#" + type Query { + t: T @provides(fields: "bar: x") + } + + type T @key(fields: "id") { + id: ID! + x: Int @external + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [( + "PROVIDES_INVALID_FIELDS", + r#"[S] On field "Query.t", for @provides(fields: "bar: x"): Cannot use alias "bar" in "bar: x": aliases are not currently supported in @provides"#, + )] + ); + } + + #[test] + #[ignore = "temporary ignore for build break"] + #[should_panic(expected = r#"subgraph error was expected:"#)] + fn rejects_aliases_in_requires() { + let schema_str = r#" + type Query { + t: T + } + + type T { + x: X @external + y: Int @external + g: Int @requires(fields: "foo: y") + h: Int @requires(fields: "x { m: a n: b }") + } + + type X { + a: Int + b: Int + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [ + ( + "REQUIRES_INVALID_FIELDS", + r#"[S] On field "T.g", for @requires(fields: "foo: y"): Cannot use alias "foo" in "foo: y": aliases are not currently supported in @requires"#, + ), + ( + "REQUIRES_INVALID_FIELDS", + r#"[S] On field "T.h", for @requires(fields: "x { m: a n: b }"): Cannot use alias "m" in "m: a": aliases are not currently supported in @requires"#, + ), + ] + ); + } +} + +mod root_types { + use super::*; + + #[test] + fn rejects_using_query_as_type_name_if_not_the_query_root() { + let schema_str = r#" + schema { + query: MyQuery + } + + type MyQuery { + f: Int + } + + type Query { + g: Int + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [( + "ROOT_QUERY_USED", + r#"[S] The schema has a type named "Query" but it is not set as the query root type ("MyQuery" is instead): this is not supported by federation. If a root type does not use its default name, there should be no other type with that default name."#, + )] + ); + } + + #[test] + fn rejects_using_mutation_as_type_name_if_not_the_mutation_root() { + let schema_str = r#" + schema { + mutation: MyMutation + } + + type MyMutation { + f: Int + } + + type Mutation { + g: Int + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [( + "ROOT_MUTATION_USED", + r#"[S] The schema has a type named "Mutation" but it is not set as the mutation root type ("MyMutation" is instead): this is not supported by federation. If a root type does not use its default name, there should be no other type with that default name."#, + )] + ); + } + + #[test] + fn rejects_using_subscription_as_type_name_if_not_the_subscription_root() { + let schema_str = r#" + schema { + subscription: MySubscription + } + + type MySubscription { + f: Int + } + + type Subscription { + g: Int + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [( + "ROOT_SUBSCRIPTION_USED", + r#"[S] The schema has a type named "Subscription" but it is not set as the subscription root type ("MySubscription" is instead): this is not supported by federation. If a root type does not use its default name, there should be no other type with that default name."#, + )] + ); + } +} + +mod custom_error_message_for_misnamed_directives { + use super::*; + + struct FedVersionSchemaParams { + extended_schema: &'static str, + extra_msg: &'static str, + } + + #[test] + #[ignore = "temporary ignore for build break"] + #[should_panic(expected = r#"Mismatched error counts: 1 != 3"#)] + fn has_suggestions_if_a_federation_directive_is_misspelled_in_all_schema_versions() { + let schema_versions = [ + FedVersionSchemaParams { + // fed1 + extended_schema: r#""#, + extra_msg: " If so, note that it is a federation 2 directive but this schema is a federation 1 one. To be a federation 2 schema, it needs to @link to the federation specification v2.", + }, + FedVersionSchemaParams { + // fed2 + extended_schema: r#" + extend schema + @link(url: "https://specs.apollo.dev/federation/v2.0") + "#, + extra_msg: "", + }, + ]; + for fed_ver in schema_versions { + let schema_str = format!( + r#"{} + type T @keys(fields: "id") {{ + id: Int @foo + foo: String @sharable + }} + "#, + fed_ver.extended_schema + ); + let err = build_for_errors(&schema_str); + + assert_errors!( + err, + [ + ("INVALID_GRAPHQL", r#"[S] Unknown directive "@foo"."#,), + ( + "INVALID_GRAPHQL", + format!( + r#"[S] Unknown directive "@sharable". Did you mean "@shareable"?{}"#, + fed_ver.extra_msg + ) + .as_str(), + ), + ( + "INVALID_GRAPHQL", + r#"[S] Unknown directive "@keys". Did you mean "@key"?"#, + ), + ] + ); + } + } + + #[test] + #[ignore = "temporary ignore for build break"] + #[should_panic(expected = r#"Mismatched errors:"#)] + fn has_suggestions_if_a_fed2_directive_is_used_in_fed1() { + let schema_str = r#" + type T @key(fields: "id") { + id: Int + foo: String @shareable + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [( + "INVALID_GRAPHQL", + r#"[S] Unknown directive "@shareable". If you meant the \"@shareable\" federation 2 directive, note that this schema is a federation 1 schema. To be a federation 2 schema, it needs to @link to the federation specification v2."#, + )] + ); + } + + #[test] + #[ignore = "temporary ignore for build break"] + #[should_panic(expected = r#"Mismatched error counts: 1 != 2"#)] + fn has_suggestions_if_a_fed2_directive_is_used_under_wrong_name_for_the_schema() { + let schema_str = r#" + extend schema + @link( + url: "https://specs.apollo.dev/federation/v2.0" + import: [{ name: "@key", as: "@myKey" }] + ) + + type T @key(fields: "id") { + id: Int + foo: String @shareable + } + "#; + let err = build_for_errors(schema_str); + + assert_errors!( + err, + [ + ( + "INVALID_GRAPHQL", + r#"[S] Unknown directive "@shareable". If you meant the \"@shareable\" federation directive, you should use fully-qualified name "@federation__shareable" or add "@shareable" to the \`import\` argument of the @link to the federation specification."#, + ), + ( + "INVALID_GRAPHQL", + r#"[S] Unknown directive "@key". If you meant the "@key" federation directive, you should use "@myKey" as it is imported under that name in the @link to the federation specification of this schema."#, + ), + ] + ); + } +} + +// PORT_NOTE: Corresponds to '@core/@link handling' tests in JS +#[cfg(test)] +mod link_handling_tests { + use super::*; + + // TODO(FED-543): Remaining directive definitions should be added to the schema + #[allow(dead_code)] + const EXPECTED_FULL_SCHEMA: &str = r#" + schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/federation/v2.0", import: ["@key"]) + { + query: Query + } + + directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + + directive @key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + + directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION + + directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION + + directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION + + directive @federation__tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + + directive @federation__extends on OBJECT | INTERFACE + + directive @federation__shareable on OBJECT | FIELD_DEFINITION + + directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + + directive @federation__override(from: String!) on FIELD_DEFINITION + + type T + @key(fields: "k") + { + k: ID! + } + + enum link__Purpose { + """ + \`SECURITY\` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + \`EXECUTION\` features provide metadata necessary for operation execution. + """ + EXECUTION + } + + scalar link__Import + + scalar federation__FieldSet + + scalar _Any + + type _Service { + sdl: String + } + + union _Entity = T + + type Query { + _entities(representations: [_Any!]!): [_Entity]! + _service: _Service! + } + "#; + + #[test] + fn expands_everything_if_only_the_federation_spec_is_linked() { + let subgraph = build_and_validate( + r#" + extend schema + @link(url: "https://specs.apollo.dev/federation/v2.0", import: ["@key"]) + + type T @key(fields: "k") { + k: ID! + } + "#, + ); + + // TODO(FED-543): `subgraph` is supposed to be compared against `EXPECTED_FULL_SCHEMA`, but + // it's failing due to missing directive definitions. So, we use + // `insta::assert_snapshot` for now. + // assert_eq!(subgraph.schema_string(), EXPECTED_FULL_SCHEMA); + insta::assert_snapshot!(subgraph.schema_string(), @r###" + schema @link(url: "https://specs.apollo.dev/link/v1.0") { + query: Query + } + + extend schema @link(url: "https://specs.apollo.dev/federation/v2.0", import: ["@key"]) + + directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + + directive @key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + + directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION + + directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION + + directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION + + directive @federation__shareable on OBJECT | FIELD_DEFINITION + + directive @federation__override(from: String!) on FIELD_DEFINITION + + directive @federation__tag repeatable on ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + + type T @key(fields: "k") { + k: ID! + } + + enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION + } + + scalar link__Import + + scalar federation__FieldSet + + scalar _Any + + type _Service { + sdl: String + } + + union _Entity = T + + type Query { + _entities(representations: [_Any!]!): [_Entity]! + _service: _Service! + } + "###); + } + + // TODO: FED-428 + #[test] + #[should_panic( + expected = r#"InvalidLinkDirectiveUsage { message: "Invalid use of @link in schema: the @link specification itself (\"https://specs.apollo.dev/link/v1.0\") is applied multiple times" }"# + )] + fn expands_definitions_if_both_the_federation_spec_and_link_spec_are_linked() { + let subgraph = build_and_validate( + r#" + extend schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/federation/v2.0", import: ["@key"]) + + type T @key(fields: "k") { + k: ID! + } + "#, + ); + + assert_eq!(subgraph.schema_string(), EXPECTED_FULL_SCHEMA); + } + + // TODO: FED-428 + #[test] + #[should_panic( + expected = r#"InvalidLinkDirectiveUsage { message: "Invalid use of @link in schema: the @link specification itself (\"https://specs.apollo.dev/link/v1.0\") is applied multiple times" }"# + )] + fn is_valid_if_a_schema_is_complete_from_the_get_go() { + let subgraph = build_and_validate(EXPECTED_FULL_SCHEMA); + assert_eq!(subgraph.schema_string(), EXPECTED_FULL_SCHEMA); + } + + // TODO: FED-428 + #[test] + #[should_panic( + expected = r#"InvalidLinkDirectiveUsage { message: "Invalid use of @link in schema: the @link specification itself (\"https://specs.apollo.dev/link/v1.0\") is applied multiple times" }"# + )] + fn expands_missing_definitions_when_some_are_partially_provided() { + let docs = [ + r#" + extend schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link( + url: "https://specs.apollo.dev/federation/v2.0" + import: ["@key"] + ) + + type T @key(fields: "k") { + k: ID! + } + + directive @key( + fields: federation__FieldSet! + resolvable: Boolean = true + ) repeatable on OBJECT | INTERFACE + + scalar federation__FieldSet + + scalar link__Import + "#, + r#" + extend schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link( + url: "https://specs.apollo.dev/federation/v2.0" + import: ["@key"] + ) + + type T @key(fields: "k") { + k: ID! + } + + scalar link__Import + "#, + r#" + extend schema + @link( + url: "https://specs.apollo.dev/federation/v2.0" + import: ["@key"] + ) + + type T @key(fields: "k") { + k: ID! + } + + scalar link__Import + "#, + r#" + extend schema + @link( + url: "https://specs.apollo.dev/federation/v2.0" + import: ["@key"] + ) + + type T @key(fields: "k") { + k: ID! + } + + directive @federation__external( + reason: String + ) on OBJECT | FIELD_DEFINITION + "#, + r#" + extend schema @link(url: "https://specs.apollo.dev/federation/v2.0") + + type T { + k: ID! + } + + enum link__Purpose { + EXECUTION + SECURITY + } + "#, + ]; + + // Note that we cannot use `validateFullSchema` as-is for those examples because the order + // or directive is going to be different. But that's ok, we mostly care that the validation + // doesn't fail, so we can be somewhat sure that if something necessary wasn't expanded + // properly, we would have an issue. The main reason we did validate the full schema in + // prior tests is so we had at least one full example of a subgraph expansion in the tests. + docs.iter().for_each(|doc| { + _ = build_and_validate(doc); + }); + } + + // TODO: FED-428 + #[test] + #[should_panic( + expected = r#"InvalidLinkDirectiveUsage { message: "Invalid use of @link in schema: the @link specification itself (\"https://specs.apollo.dev/link/v1.0\") is applied multiple times" }"# + )] + fn allows_known_directives_with_incomplete_but_compatible_definitions() { + let docs = [ + // @key has a `resolvable` argument in its full definition, but it is optional. + r#" + extend schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link( + url: "https://specs.apollo.dev/federation/v2.0" + import: ["@key"] + ) + + type T @key(fields: "k") { + k: ID! + } + + directive @key( + fields: federation__FieldSet! + ) repeatable on OBJECT | INTERFACE + + scalar federation__FieldSet + "#, + // @inaccessible can be put in a bunch of locations, but you're welcome to restrict + // yourself to just fields. + r#" + extend schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link( + url: "https://specs.apollo.dev/federation/v2.0" + import: ["@inaccessible"] + ) + + type T { + k: ID! @inaccessible + } + + directive @inaccessible on FIELD_DEFINITION + "#, + // @key is repeatable, but you're welcome to restrict yourself to never repeating it. + r#" + extend schema + @link( + url: "https://specs.apollo.dev/federation/v2.0" + import: ["@key"] + ) + + type T @key(fields: "k") { + k: ID! + } + + directive @key( + fields: federation__FieldSet! + resolvable: Boolean = true + ) on OBJECT | INTERFACE + + scalar federation__FieldSet + "#, + // @key `resolvable` argument is optional, but you're welcome to force users to always + // provide it. + r#" + extend schema + @link( + url: "https://specs.apollo.dev/federation/v2.0" + import: ["@key"] + ) + + type T @key(fields: "k", resolvable: true) { + k: ID! + } + + directive @key( + fields: federation__FieldSet! + resolvable: Boolean! + ) repeatable on OBJECT | INTERFACE + + scalar federation__FieldSet + "#, + // @link `url` argument is allowed to be `null` now, but it used not too, so making + // sure we still accept definition where it's mandatory. + r#" + extend schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link( + url: "https://specs.apollo.dev/federation/v2.0" + import: ["@key"] + ) + + type T @key(fields: "k") { + k: ID! + } + + directive @link( + url: String! + as: String + for: link__Purpose + import: [link__Import] + ) repeatable on SCHEMA + + scalar link__Import + scalar link__Purpose + "#, + ]; + + // Like above, we really only care that the examples validate. + docs.iter().for_each(|doc| { + _ = build_and_validate(doc); + }); + } + + #[test] + fn errors_on_invalid_known_directive_location() { + // @external is not allowed on 'schema' and likely never will. + let doc = r#" + extend schema + @link(url: "https://specs.apollo.dev/federation/v2.0", import: ["@key"]) + + type T @key(fields: "k") { + k: ID! + } + + directive @federation__external( + reason: String + ) on OBJECT | FIELD_DEFINITION | SCHEMA + "#; + assert_errors!( + build_for_errors_with_option(doc, BuildOption::AsIs), + [( + "DIRECTIVE_DEFINITION_INVALID", + r#"[S] Invalid definition for directive "@federation__external": "@federation__external" should have locations OBJECT, FIELD_DEFINITION, but found (non-subset) OBJECT, FIELD_DEFINITION, SCHEMA"#, + )] + ); + } + + #[test] + fn errors_on_invalid_non_repeatable_directive_marked_repeatable() { + let doc = r#" + extend schema + @link(url: "https://specs.apollo.dev/federation/v2.0" import: ["@key"]) + + type T @key(fields: "k") { + k: ID! + } + + directive @federation__external repeatable on OBJECT | FIELD_DEFINITION + "#; + assert_errors!( + build_for_errors_with_option(doc, BuildOption::AsIs), + [( + "DIRECTIVE_DEFINITION_INVALID", + r#"[S] Invalid definition for directive "@federation__external": "@federation__external" should not be repeatable"#, + )] + ); + } + + #[test] + fn errors_on_unknown_argument_of_known_directive() { + let doc = r#" + extend schema + @link(url: "https://specs.apollo.dev/federation/v2.0", import: ["@key"]) + + type T @key(fields: "k") { + k: ID! + } + + directive @federation__external(foo: Int) on OBJECT | FIELD_DEFINITION + "#; + assert_errors!( + build_for_errors_with_option(doc, BuildOption::AsIs), + [( + "DIRECTIVE_DEFINITION_INVALID", + r#"[S] Invalid definition for directive "@federation__external": unknown/unsupported argument "foo""#, + )] + ); + } + + #[test] + fn errors_on_invalid_type_for_a_known_argument() { + let doc = r#" + extend schema + @link(url: "https://specs.apollo.dev/federation/v2.0", import: ["@key"]) + + type T @key(fields: "k") { + k: ID! + } + + directive @key( + fields: String! + resolvable: String + ) repeatable on OBJECT | INTERFACE + "#; + assert_errors!( + build_for_errors_with_option(doc, BuildOption::AsIs), + [( + "DIRECTIVE_DEFINITION_INVALID", + r#"[S] Invalid definition for directive "@key": argument "resolvable" should have type "Boolean" but found type "String""#, + )] + ); + } + + #[test] + fn errors_on_a_required_argument_defined_as_optional() { + let doc = r#" + extend schema + @link(url: "https://specs.apollo.dev/federation/v2.0", import: ["@key"]) + + type T @key(fields: "k") { + k: ID! + } + + directive @key( + fields: federation__FieldSet + resolvable: Boolean = true + ) repeatable on OBJECT | INTERFACE + + scalar federation__FieldSet + "#; + assert_errors!( + build_for_errors_with_option(doc, BuildOption::AsIs), + [( + "DIRECTIVE_DEFINITION_INVALID", + r#"[S] Invalid definition for directive "@key": argument "fields" should have type "federation__FieldSet!" but found type "federation__FieldSet""#, + )] + ); + } + + #[test] + fn errors_on_invalid_definition_for_link_purpose() { + let doc = r#" + extend schema @link(url: "https://specs.apollo.dev/federation/v2.0") + + type T { + k: ID! + } + + enum link__Purpose { + EXECUTION + RANDOM + } + "#; + assert_errors!( + build_for_errors_with_option(doc, BuildOption::AsIs), + [( + "TYPE_DEFINITION_INVALID", + r#"[S] Invalid definition for type "Purpose": expected values [EXECUTION, SECURITY] but found [EXECUTION, RANDOM]."#, + )] + ); + } + + #[test] + fn allows_any_non_scalar_type_in_redefinition_when_expected_type_is_a_scalar() { + // Just making sure this doesn't error out. + build_and_validate( + r#" + extend schema + @link(url: "https://specs.apollo.dev/federation/v2.0", import: ["@key"]) + + type T @key(fields: "k") { + k: ID! + } + + # 'fields' should be of type 'federation_FieldSet!', but ensure we allow 'String!' alternatively. + directive @key( + fields: String! + resolvable: Boolean = true + ) repeatable on OBJECT | INTERFACE + "#, + ); + } + + #[test] + fn allows_defining_a_repeatable_directive_as_non_repeatable_but_validates_usages() { + let doc = r#" + type T @key(fields: "k1") @key(fields: "k2") { + k1: ID! + k2: ID! + } + + directive @key(fields: String!) on OBJECT + "#; + + // Test for fed2 (with @key being @link-ed) + assert_errors!( + build_for_errors(doc), + [( + "INVALID_GRAPHQL", + r###" + [S] Error: non-repeatable directive key can only be used once per location + ╭─[ S:2:39 ] + │ + 2 │ type T @key(fields: "k1") @key(fields: "k2") { + │ ──┬─ ─────────┬──────── + │ ╰──────────────────────────────────── directive `@key` first called here + │ │ + │ ╰────────── directive `@key` called again here + ───╯ + "### + )] + ); + + // Test for fed1 + assert_errors!( + build_for_errors_with_option(doc, BuildOption::AsIs), + [( + "INVALID_GRAPHQL", + r###" + [S] Error: non-repeatable directive key can only be used once per location + ╭─[ S:2:39 ] + │ + 2 │ type T @key(fields: "k1") @key(fields: "k2") { + │ ──┬─ ─────────┬──────── + │ ╰──────────────────────────────────── directive `@key` first called here + │ │ + │ ╰────────── directive `@key` called again here + ───╯ + "### + )] + ); + } +} + +mod federation_1_schema_tests { + use super::*; + + #[test] + fn accepts_federation_directive_definitions_without_arguments() { + let doc = r#" + type Query { + a: Int + } + + directive @key on OBJECT | INTERFACE + directive @requires on FIELD_DEFINITION + "#; + build_and_validate(doc); + } + + #[test] + fn accepts_federation_directive_definitions_with_nullable_arguments() { + let doc = r#" + type Query { + a: Int + } + + type T @key(fields: "id") { + id: ID! @requires(fields: "x") + x: Int @external + } + + # Tests with the _FieldSet argument non-nullable + scalar _FieldSet + directive @key(fields: _FieldSet) on OBJECT | INTERFACE + + # Tests with the argument as String and non-nullable + directive @requires(fields: String) on FIELD_DEFINITION + "#; + build_and_validate(doc); + } + + #[test] + fn accepts_federation_directive_definitions_with_fieldset_type_instead_of_underscore_fieldset() + { + // accepts federation directive definitions with "FieldSet" type instead of "_FieldSet" + let doc = r#" + type Query { + a: Int + } + + type T @key(fields: "id") { + id: ID! + } + + scalar FieldSet + directive @key(fields: FieldSet) on OBJECT | INTERFACE + "#; + build_and_validate(doc); + } + + #[test] + fn rejects_federation_directive_definition_with_unknown_arguments() { + let doc = r#" + type Query { + a: Int + } + + type T @key(fields: "id", unknown: 42) { + id: ID! + } + + scalar _FieldSet + directive @key(fields: _FieldSet!, unknown: Int) on OBJECT | INTERFACE + "#; + assert_errors!( + build_for_errors_with_option(doc, BuildOption::AsIs), + [( + "DIRECTIVE_DEFINITION_INVALID", + r#"[S] Invalid definition for directive "@key": unknown/unsupported argument "unknown""# + )] + ); + } +} + +mod shareable_tests { + use super::*; + + #[test] + #[should_panic(expected = r#"subgraph error was expected: "#)] + fn can_only_be_applied_to_fields_of_object_types() { + let doc = r#" + interface I { + a: Int @shareable + } + "#; + assert_errors!( + build_for_errors(doc), + [( + "INVALID_SHAREABLE_USAGE", + r#"[S] Invalid use of @shareable on field "I.a": only object type fields can be marked with @shareable"# + )] + ); + } + + #[test] + #[should_panic(expected = r#"subgraph error was expected:"#)] + fn rejects_duplicate_shareable_on_the_same_definition_declaration() { + let doc = r#" + type E @shareable @key(fields: "id") @shareable { + id: ID! + a: Int + } + "#; + assert_errors!( + build_for_errors(doc), + [( + "INVALID_SHAREABLE_USAGE", + r#"[S] Invalid duplicate application of @shareable on the same type declaration of "E": @shareable is only repeatable on types so it can be used simultaneously on a type definition and its extensions, but it should not be duplicated on the same definition/extension declaration"# + )] + ); + } + + #[test] + #[should_panic(expected = r#"subgraph error was expected: "#)] + fn rejects_duplicate_shareable_on_the_same_extension_declaration() { + let doc = r#" + type E @shareable { + id: ID! + a: Int + } + + extend type E @shareable @shareable { + b: Int + } + "#; + assert_errors!( + build_for_errors(doc), + [( + "INVALID_SHAREABLE_USAGE", + r#"[S] Invalid duplicate application of @shareable on the same type declaration of "E": @shareable is only repeatable on types so it can be used simultaneously on a type definition and its extensions, but it should not be duplicated on the same definition/extension declaration"# + )] + ); + } + + #[test] + #[should_panic(expected = r#"subgraph error was expected: "#)] + fn rejects_duplicate_shareable_on_a_field() { + let doc = r#" + type E { + a: Int @shareable @shareable + } + "#; + assert_errors!( + build_for_errors(doc), + [( + "INVALID_SHAREABLE_USAGE", + r#"[S] Invalid duplicate application of @shareable on field "E.a": @shareable is only repeatable on types so it can be used simultaneously on a type definition and its extensions, but it should not be duplicated on the same definition/extension declaration"# + )] + ); + } +} + +mod interface_object_and_key_on_interfaces_validation_tests { + use super::*; + + #[test] + #[ignore = "temporary ignore for build break"] + #[should_panic(expected = r#"subgraph error was expected:"#)] + fn key_on_interfaces_require_key_on_all_implementations() { + let doc = r#" + interface I @key(fields: "id1") @key(fields: "id2") { + id1: ID! + id2: ID! + } + + type A implements I @key(fields: "id2") { + id1: ID! + id2: ID! + a: Int + } + + type B implements I @key(fields: "id1") @key(fields: "id2") { + id1: ID! + id2: ID! + b: Int + } + + type C implements I @key(fields: "id2") { + id1: ID! + id2: ID! + c: Int + } + "#; + assert_errors!( + build_for_errors(doc), + [( + "INTERFACE_KEY_NOT_ON_IMPLEMENTATION", + r#"[S] Key @key(fields: "id1") on interface type "I" is missing on implementation types "A" and "C"."# + )] + ); + } + + #[test] + #[should_panic(expected = r#"subgraph error was expected:"#)] + fn key_on_interfaces_with_key_on_some_implementation_non_resolvable() { + let doc = r#" + interface I @key(fields: "id1") { + id1: ID! + } + + type A implements I @key(fields: "id1") { + id1: ID! + a: Int + } + + type B implements I @key(fields: "id1") { + id1: ID! + b: Int + } + + type C implements I @key(fields: "id1", resolvable: false) { + id1: ID! + c: Int + } + "#; + + assert_errors!( + build_for_errors(doc), + [( + "INTERFACE_KEY_NOT_ON_IMPLEMENTATION", + r#"[S] Key @key(fields: "id1") on interface type "I" should be resolvable on all implementation types, but is declared with argument "@key(resolvable:)" set to false in type "C"."# + )] + ); + } + + #[test] + fn ensures_order_of_fields_in_key_does_not_matter() { + let doc = r#" + interface I @key(fields: "a b c") { + a: Int + b: Int + c: Int + } + + type A implements I @key(fields: "c b a") { + a: Int + b: Int + c: Int + } + + type B implements I @key(fields: "a c b") { + a: Int + b: Int + c: Int + } + + type C implements I @key(fields: "a b c") { + a: Int + b: Int + c: Int + } + "#; + + // Ensure no errors are returned + build_and_validate(doc); + } + + #[test] + #[ignore = "temporary ignore for build break"] + #[should_panic(expected = r#"Mismatched errors:"#)] + fn only_allow_interface_object_on_entity_types() { + // There is no meaningful way to make @interfaceObject work on a value type at the moment, + // because if you have an @interfaceObject, some other subgraph needs to be able to resolve + // the concrete type, and that imply that you have key to go to that other subgraph. To be + // clear, the @key on the @interfaceObject technically don't need to be "resolvable", and + // the difference between no key and a non-resolvable key is arguably more of a convention + // than a genuine mechanical difference at the moment, but still a good idea to rely on + // that convention to help catching obvious mistakes early. + let doc = r#" + # This one shouldn't raise an error + type A @key(fields: "id", resolvable: false) @interfaceObject { + id: ID! + } + + # This one should + type B @interfaceObject { + x: Int + } + "#; + assert_errors!( + build_for_errors(doc), + [( + "INTERFACE_OBJECT_USAGE_ERROR", + r#"[S] The @interfaceObject directive can only be applied to entity types but type "B" has no @key in this subgraph."# + )] + ); + } +} + +mod cost_tests { + use super::*; + + #[test] + fn rejects_cost_applications_on_interfaces() { + let doc = r#" + extend schema + @link(url: "https://specs.apollo.dev/cost/v0.1", import: ["@cost"]) + + type Query { + a: A + } + + interface A { + x: Int @cost(weight: 10) + } + "#; + + assert_errors!( + build_for_errors(doc), + [( + "COST_APPLIED_TO_INTERFACE_FIELD", + r#"[S] @cost cannot be applied to interface "A.x""# + )] + ); + } +} + +mod list_size_tests { + use super::*; + + #[test] + fn rejects_applications_on_non_lists_unless_it_uses_sized_fields() { + let doc = r#" + extend schema + @link(url: "https://specs.apollo.dev/cost/v0.1", import: ["@listSize"]) + + type Query { + a1: A @listSize(assumedSize: 5) + a2: A @listSize(assumedSize: 10, sizedFields: ["ints"]) + } + + type A { + ints: [Int] + } + "#; + + assert_errors!( + build_for_errors(doc), + [( + "LIST_SIZE_APPLIED_TO_NON_LIST", + r#"[S] "Query.a1" is not a list"# + )] + ); + } + + #[test] + fn rejects_negative_assumed_size() { + let doc = r#" + extend schema + @link(url: "https://specs.apollo.dev/cost/v0.1", import: ["@listSize"]) + + type Query { + a: [Int] @listSize(assumedSize: -5) + b: [Int] @listSize(assumedSize: 0) + } + "#; + + assert_errors!( + build_for_errors(doc), + [( + "LIST_SIZE_INVALID_ASSUMED_SIZE", + r#"[S] Assumed size of "Query.a" cannot be negative"# + )] + ); + } + + #[test] + fn rejects_slicing_arguments_not_in_field_arguments() { + let doc = r#" + extend schema + @link(url: "https://specs.apollo.dev/cost/v0.1", import: ["@listSize"]) + + type Query { + myField(something: Int): [String] + @listSize(slicingArguments: ["missing1", "missing2"]) + myOtherField(somethingElse: String): [Int] + @listSize(slicingArguments: ["alsoMissing"]) + } + "#; + + assert_errors!( + build_for_errors(doc), + [ + ( + "LIST_SIZE_INVALID_SLICING_ARGUMENT", + r#"[S] Slicing argument "missing1" is not an argument of "Query.myField""# + ), + ( + "LIST_SIZE_INVALID_SLICING_ARGUMENT", + r#"[S] Slicing argument "missing2" is not an argument of "Query.myField""# + ), + ( + "LIST_SIZE_INVALID_SLICING_ARGUMENT", + r#"[S] Slicing argument "alsoMissing" is not an argument of "Query.myOtherField""# + ) + ] + ); + } + + #[test] + fn rejects_slicing_arguments_not_int_or_int_non_null() { + let doc = r#" + extend schema + @link(url: "https://specs.apollo.dev/cost/v0.1", import: ["@listSize"]) + + type Query { + sliced( + first: String + second: Int + third: Int! + fourth: [Int] + fifth: [Int]! + ): [String] + @listSize( + slicingArguments: ["first", "second", "third", "fourth", "fifth"] + ) + } + "#; + + assert_errors!( + build_for_errors(doc), + [ + ( + "LIST_SIZE_INVALID_SLICING_ARGUMENT", + r#"[S] Slicing argument "Query.sliced(first:)" must be Int or Int!"# + ), + ( + "LIST_SIZE_INVALID_SLICING_ARGUMENT", + r#"[S] Slicing argument "Query.sliced(fourth:)" must be Int or Int!"# + ), + ( + "LIST_SIZE_INVALID_SLICING_ARGUMENT", + r#"[S] Slicing argument "Query.sliced(fifth:)" must be Int or Int!"# + ) + ] + ); + } + + #[test] + fn rejects_sized_fields_when_output_type_is_not_object() { + let doc = r#" + extend schema + @link(url: "https://specs.apollo.dev/cost/v0.1", import: ["@listSize"]) + + type Query { + notObject: Int @listSize(assumedSize: 1, sizedFields: ["anything"]) + a: A @listSize(assumedSize: 5, sizedFields: ["ints"]) + b: B @listSize(assumedSize: 10, sizedFields: ["ints"]) + } + + type A { + ints: [Int] + } + + interface B { + ints: [Int] + } + "#; + + assert_errors!( + build_for_errors(doc), + [( + "LIST_SIZE_INVALID_SIZED_FIELD", + r#"[S] Sized fields cannot be used because "Int" is not a composite type"# + )] + ); + } + + #[test] + fn rejects_sized_fields_not_in_output_type() { + let doc = r#" + extend schema + @link(url: "https://specs.apollo.dev/cost/v0.1", import: ["@listSize"]) + + type Query { + a: A @listSize(assumedSize: 5, sizedFields: ["notOnA"]) + } + + type A { + ints: [Int] + } + "#; + + assert_errors!( + build_for_errors(doc), + [( + "LIST_SIZE_INVALID_SIZED_FIELD", + r#"[S] Sized field "notOnA" is not a field on type "A""# + )] + ); + } + + #[test] + fn rejects_sized_fields_not_lists() { + let doc = r#" + extend schema + @link(url: "https://specs.apollo.dev/cost/v0.1", import: ["@listSize"]) + + type Query { + a: A + @listSize( + assumedSize: 5 + sizedFields: ["list", "nonNullList", "notList"] + ) + } + + type A { + list: [String] + nonNullList: [String]! + notList: String + } + "#; + + assert_errors!( + build_for_errors(doc), + [( + "LIST_SIZE_APPLIED_TO_NON_LIST", + r#"[S] Sized field "A.notList" is not a list"# + )] + ); + } +} diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index fb819760b9..50f5a8c2dc 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "2.2.0" +version = "2.2.1" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index b8b639e968..6e61706bf6 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "2.2.0" +version = "2.2.1" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://docs.rs/apollo-router" @@ -58,7 +58,7 @@ snapshot = ["axum-server", "serde_regex"] [dependencies] anyhow = "1.0.86" apollo-compiler.workspace = true -apollo-federation = { path = "../apollo-federation", version = "=2.2.0", features = ["connect_v0.2"] } +apollo-federation = { path = "../apollo-federation", version = "=2.2.1" } async-compression = { version = "0.4.6", features = [ "tokio", "brotli", @@ -92,7 +92,7 @@ dhat = { version = "0.3.3", optional = true } diff = "0.1.13" displaydoc = "0.2" flate2 = "1.0.30" -fred = { version = "9.4.0", features = ["enable-rustls", "i-cluster"] } +fred = { version = "9.4.0", features = ["enable-rustls-ring", "i-cluster"] } futures = { version = "0.3.30", features = ["thread-pool"] } graphql_client = "0.14.0" hex.workspace = true @@ -105,7 +105,7 @@ humantime = "2.1.0" humantime-serde = "1.1.1" hyper = { version = "1.5.1", features = ["full"] } hyper-util = { version = "0.1.10", features = ["full"] } -hyper-rustls = { version = "0.27.3", features = ["http1", "http2", "rustls-native-certs"] } +hyper-rustls = { version = "0.27.3", default-features = false, features = ["http1", "http2", "rustls-native-certs"] } indexmap = { version = "2.2.6", features = ["serde"] } itertools = "0.14.0" jsonpath_lib = "0.3.0" @@ -178,7 +178,7 @@ proteus = "0.5.0" rand = "0.8.5" rhai = { version = "1.19.0", features = ["sync", "serde", "internals"] } regex = "1.10.5" -reqwest = { version = "0.12.9", default-features = false, features = [ +reqwest = { workspace = true, default-features = false, features = [ "rustls-tls", "rustls-tls-native-roots", "gzip", @@ -187,7 +187,7 @@ reqwest = { version = "0.12.9", default-features = false, features = [ ] } rust-embed = { version = "8.4.0", features = ["include-exclude"] } -rustls = "0.23.19" +rustls = { version = "0.23.19", default-features = false } rustls-native-certs = "0.8.1" rustls-pemfile = "2.2.0" schemars.workspace = true @@ -205,7 +205,7 @@ static_assertions = "1.1.0" strum_macros = "0.27.0" sys-info = "0.9.1" sysinfo = { version = "0.32.0", features = ["system", "windows"], default-features = false } -thiserror = "1.0.61" +thiserror = "2.0.0" tokio.workspace = true tokio-stream = { version = "0.1.15", features = ["sync", "net"] } tokio-util = { version = "0.7.11", features = ["net", "codec", "time"] } @@ -232,7 +232,7 @@ wsl = "0.1.0" tokio-tungstenite = { version = "0.26.1", features = [ "rustls-tls-native-roots", ] } -tokio-rustls = "0.26.0" +tokio-rustls = { version = "0.26.0", default-features = false } hickory-resolver = "0.24.1" http-serde = "2.1.1" hmac = "0.12.1" @@ -245,8 +245,10 @@ zstd-safe = "7.1.0" # note: hyper 1.0 update seems to mean this isn't true... aws-sigv4 = "1.2.6" aws-credential-types = "1.2.1" # XXX: This is the latest version -aws-config = "1.5.5" +aws-config = { version = "1.5.5", default-features = false } aws-types = "1.3.3" +aws-smithy-async = { version = "1.2.5", features = ["rt-tokio"] } +aws-smithy-http-client = { version = "1.0.1", default-features = false, features = ["default-client", "rustls-ring"] } aws-smithy-runtime-api = { version = "1.7.3", features = ["client"] } sha1.workspace = true tracing-serde = "0.1.3" @@ -277,7 +279,7 @@ tikv-jemallocator = "0.6.0" axum = { version = "0.8.1", features = ["http2", "ws"] } axum-server = "0.7.1" ecdsa = { version = "0.16.9", features = ["signing", "pem", "pkcs8"] } -fred = { version = "9.4.0", features = ["enable-rustls", "mocks", "i-cluster"] } +fred = { version = "9.4.0", features = ["enable-rustls-ring", "mocks", "i-cluster"] } futures-test = "0.3.30" insta.workspace = true maplit = "1.0.2" @@ -325,6 +327,7 @@ tracing-subscriber = { version = "0.3.18", default-features = false, features = ] } tracing-opentelemetry = "0.25.0" tracing-test = "0.2.5" +tracing-mock = "0.1.0-beta.1" walkdir = "2.5.0" wiremock = "0.5.22" libtest-mimic = "0.8.0" diff --git a/apollo-router/src/axum_factory/tests.rs b/apollo-router/src/axum_factory/tests.rs index f42c3eaf14..de347bd961 100644 --- a/apollo-router/src/axum_factory/tests.rs +++ b/apollo-router/src/axum_factory/tests.rs @@ -70,11 +70,11 @@ use crate::http_server_factory::HttpServerFactory; use crate::http_server_factory::HttpServerHandle; use crate::json_ext::Path; use crate::metrics::FutureMetricsExt; +use crate::plugins::content_negotiation::MULTIPART_DEFER_ACCEPT_HEADER_VALUE; +use crate::plugins::content_negotiation::MULTIPART_DEFER_CONTENT_TYPE_HEADER_VALUE; use crate::plugins::healthcheck::Config as HealthCheck; use crate::router_factory::Endpoint; use crate::router_factory::RouterFactory; -use crate::services::MULTIPART_DEFER_ACCEPT; -use crate::services::MULTIPART_DEFER_CONTENT_TYPE; use crate::services::RouterRequest; use crate::services::RouterResponse; use crate::services::SupergraphResponse; @@ -1723,7 +1723,7 @@ async fn deferred_response_shape() -> Result<(), ApolloRouterError> { let mut response = client .post(&url) .body(query.to_string()) - .header(ACCEPT, HeaderValue::from_static(MULTIPART_DEFER_ACCEPT)) + .header(ACCEPT, MULTIPART_DEFER_ACCEPT_HEADER_VALUE) .send() .await .unwrap(); @@ -1731,7 +1731,7 @@ async fn deferred_response_shape() -> Result<(), ApolloRouterError> { assert_eq!(response.status(), StatusCode::OK); assert_eq!( response.headers().get(CONTENT_TYPE), - Some(&HeaderValue::from_static(MULTIPART_DEFER_CONTENT_TYPE)) + Some(&MULTIPART_DEFER_CONTENT_TYPE_HEADER_VALUE) ); let first = response.chunk().await.unwrap().unwrap(); @@ -1783,7 +1783,7 @@ async fn multipart_response_shape_with_one_chunk() -> Result<(), ApolloRouterErr let mut response = client .post(&url) .body(query.to_string()) - .header(ACCEPT, HeaderValue::from_static(MULTIPART_DEFER_ACCEPT)) + .header(ACCEPT, MULTIPART_DEFER_ACCEPT_HEADER_VALUE) .send() .await .unwrap(); @@ -1791,7 +1791,7 @@ async fn multipart_response_shape_with_one_chunk() -> Result<(), ApolloRouterErr assert_eq!(response.status(), StatusCode::OK); assert_eq!( response.headers().get(CONTENT_TYPE), - Some(&HeaderValue::from_static(MULTIPART_DEFER_CONTENT_TYPE)) + Some(&MULTIPART_DEFER_CONTENT_TYPE_HEADER_VALUE) ); let first = response.chunk().await.unwrap().unwrap(); diff --git a/apollo-router/src/cache/redis.rs b/apollo-router/src/cache/redis.rs index b6d5be6496..05c8db29f1 100644 --- a/apollo-router/src/cache/redis.rs +++ b/apollo-router/src/cache/redis.rs @@ -1,5 +1,6 @@ use std::collections::HashMap; use std::fmt; +use std::ops::Deref; use std::pin::Pin; use std::sync::Arc; use std::time::Duration; @@ -51,9 +52,39 @@ pub(crate) struct RedisValue(pub(crate) V) where V: ValueType; +/// `DropSafeRedisPool` is a wrapper for `fred::prelude::RedisPool` which closes the pool's Redis +/// connections when it is dropped. +// +// Dev notes: +// * the inner `RedisPool` must be wrapped in an `Arc` because closing the connections happens +// in a spawned async task. +// * why not just implement this within `Drop` for `RedisCacheStorage`? Because `RedisCacheStorage` +// is cloned frequently throughout the router, and we don't want to close the connections +// when each clone is dropped, only when the last instance is dropped. +struct DropSafeRedisPool(Arc); +impl Deref for DropSafeRedisPool { + type Target = RedisPool; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl Drop for DropSafeRedisPool { + fn drop(&mut self) { + let inner = self.0.clone(); + tokio::spawn(async move { + let result = inner.quit().await; + if let Err(err) = result { + tracing::warn!("Caught error while closing unused Redis connections: {err:?}"); + } + }); + } +} + #[derive(Clone)] pub(crate) struct RedisCacheStorage { - inner: Arc, + inner: Arc, namespace: Option>, pub(crate) ttl: Option, is_cluster: bool, @@ -144,7 +175,7 @@ where } impl RedisCacheStorage { - pub(crate) async fn new(config: RedisCache) -> Result { + pub(crate) async fn new(config: RedisCache, caller: &'static str) -> Result { let url = Self::preprocess_urls(config.urls)?; let mut client_config = RedisConfig::from_url(url.as_str())?; let is_cluster = url.scheme() == "redis-cluster" || url.scheme() == "rediss-cluster"; @@ -180,6 +211,7 @@ impl RedisCacheStorage { config.ttl, config.reset_ttl, is_cluster, + caller, ) .await } @@ -199,10 +231,12 @@ impl RedisCacheStorage { None, false, false, + "test", ) .await } + #[allow(clippy::too_many_arguments)] async fn create_client( client_config: RedisConfig, timeout: Duration, @@ -211,6 +245,7 @@ impl RedisCacheStorage { ttl: Option, reset_ttl: bool, is_cluster: bool, + caller: &'static str, ) -> Result { let pooled_client = RedisPool::new( client_config, @@ -229,6 +264,14 @@ impl RedisCacheStorage { let mut error_rx = client.error_rx(); let mut reconnect_rx = client.reconnect_rx(); + i64_up_down_counter_with_unit!( + "apollo.router.cache.redis.connections", + "Number of Redis connections", + "{connection}", + 1, + kind = caller + ); + tokio::spawn(async move { while let Ok(error) = error_rx.recv().await { tracing::error!("Client disconnected with error: {:?}", error); @@ -238,6 +281,13 @@ impl RedisCacheStorage { while reconnect_rx.recv().await.is_ok() { tracing::info!("Redis client reconnected."); } + i64_up_down_counter_with_unit!( + "apollo.router.cache.redis.connections", + "Number of Redis connections", + "{connection}", + -1, + kind = caller + ); }); } @@ -250,7 +300,7 @@ impl RedisCacheStorage { tracing::trace!("redis connection established"); Ok(Self { - inner: Arc::new(pooled_client), + inner: Arc::new(DropSafeRedisPool(Arc::new(pooled_client))), namespace: namespace.map(Arc::new), ttl, is_cluster, diff --git a/apollo-router/src/cache/storage.rs b/apollo-router/src/cache/storage.rs index c75b662674..288622dc07 100644 --- a/apollo-router/src/cache/storage.rs +++ b/apollo-router/src/cache/storage.rs @@ -80,7 +80,7 @@ where inner: Arc::new(Mutex::new(LruCache::new(max_capacity))), redis: if let Some(config) = config { let required_to_start = config.required_to_start; - match RedisCacheStorage::new(config).await { + match RedisCacheStorage::new(config, caller).await { Err(e) => { tracing::error!( cache = caller, diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index 5028576b10..48a3390352 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -1,7 +1,6 @@ --- source: apollo-router/src/configuration/tests.rs expression: "&schema" -snapshot_kind: text --- { "$schema": "http://json-schema.org/draft-07/schema#", @@ -1343,8 +1342,8 @@ snapshot_kind: text "description": "Telemetry configuration", "properties": { "apollo": { - "$ref": "#/definitions/Config12", - "description": "#/definitions/Config12" + "$ref": "#/definitions/Config13", + "description": "#/definitions/Config13" }, "exporters": { "$ref": "#/definitions/Exporters", @@ -1383,6 +1382,26 @@ snapshot_kind: text "type": "object" }, "Config10": { + "additionalProperties": false, + "description": "Configuration for exposing errors that originate from subgraphs", + "properties": { + "all": { + "$ref": "#/definitions/ErrorMode", + "description": "#/definitions/ErrorMode" + }, + "subgraphs": { + "additionalProperties": { + "$ref": "#/definitions/SubgraphConfig", + "description": "#/definitions/SubgraphConfig" + }, + "default": {}, + "description": "Overrides global configuration on a per-subgraph basis", + "type": "object" + } + }, + "type": "object" + }, + "Config11": { "additionalProperties": false, "description": "Configuration for entity caching", "properties": { @@ -1415,11 +1434,11 @@ snapshot_kind: text ], "type": "object" }, - "Config11": { + "Config12": { "description": "Configuration for the progressive override plugin", "type": "object" }, - "Config12": { + "Config13": { "additionalProperties": false, "properties": { "batch_processor": { @@ -1495,7 +1514,7 @@ snapshot_kind: text }, "type": "object" }, - "Config13": { + "Config14": { "additionalProperties": false, "properties": { "batch_processor": { @@ -1532,7 +1551,7 @@ snapshot_kind: text ], "type": "object" }, - "Config14": { + "Config15": { "additionalProperties": false, "description": "Prometheus configuration", "properties": { @@ -1553,7 +1572,7 @@ snapshot_kind: text }, "type": "object" }, - "Config15": { + "Config16": { "additionalProperties": false, "properties": { "batch_processor": { @@ -1574,7 +1593,7 @@ snapshot_kind: text ], "type": "object" }, - "Config16": { + "Config17": { "additionalProperties": false, "properties": { "batch_processor": { @@ -1633,7 +1652,7 @@ snapshot_kind: text ], "type": "object" }, - "Config17": { + "Config18": { "additionalProperties": false, "description": "Configuration for the experimental traffic shaping plugin", "properties": { @@ -1819,6 +1838,9 @@ snapshot_kind: text "type": "object" }, "Config8": { + "type": "object" + }, + "Config9": { "additionalProperties": false, "description": "Configuration for header propagation", "properties": { @@ -1842,26 +1864,6 @@ snapshot_kind: text }, "type": "object" }, - "Config9": { - "additionalProperties": false, - "description": "Configuration for exposing errors that originate from subgraphs", - "properties": { - "all": { - "$ref": "#/definitions/ErrorMode", - "description": "#/definitions/ErrorMode" - }, - "subgraphs": { - "additionalProperties": { - "$ref": "#/definitions/SubgraphConfig", - "description": "#/definitions/SubgraphConfig" - }, - "default": {}, - "description": "Overrides global configuration on a per-subgraph basis", - "type": "object" - } - }, - "type": "object" - }, "ConnectorAttributes": { "additionalProperties": false, "properties": { @@ -4368,12 +4370,12 @@ snapshot_kind: text "description": "#/definitions/MetricsCommon" }, "otlp": { - "$ref": "#/definitions/Config13", - "description": "#/definitions/Config13" - }, - "prometheus": { "$ref": "#/definitions/Config14", "description": "#/definitions/Config14" + }, + "prometheus": { + "$ref": "#/definitions/Config15", + "description": "#/definitions/Config15" } }, "type": "object" @@ -5452,8 +5454,7 @@ snapshot_kind: text }, "condition": { "$ref": "#/definitions/Condition_for_RouterSelector", - "description": "#/definitions/Condition_for_RouterSelector", - "nullable": true + "description": "#/definitions/Condition_for_RouterSelector" }, "context": { "$ref": "#/definitions/ContextConf", @@ -6616,8 +6617,7 @@ snapshot_kind: text }, "condition": { "$ref": "#/definitions/Condition_for_SubgraphSelector", - "description": "#/definitions/Condition_for_SubgraphSelector", - "nullable": true + "description": "#/definitions/Condition_for_SubgraphSelector" }, "context": { "$ref": "#/definitions/ContextConf", @@ -6662,8 +6662,7 @@ snapshot_kind: text }, "condition": { "$ref": "#/definitions/Condition_for_SubgraphSelector", - "description": "#/definitions/Condition_for_SubgraphSelector", - "nullable": true + "description": "#/definitions/Condition_for_SubgraphSelector" }, "context": { "$ref": "#/definitions/ContextConf", @@ -7377,8 +7376,7 @@ snapshot_kind: text }, "condition": { "$ref": "#/definitions/Condition_for_SupergraphSelector", - "description": "#/definitions/Condition_for_SupergraphSelector", - "nullable": true + "description": "#/definitions/Condition_for_SupergraphSelector" }, "context": { "$ref": "#/definitions/ContextConf", @@ -7413,8 +7411,7 @@ snapshot_kind: text }, "condition": { "$ref": "#/definitions/Condition_for_SupergraphSelector", - "description": "#/definitions/Condition_for_SupergraphSelector", - "nullable": true + "description": "#/definitions/Condition_for_SupergraphSelector" }, "context": { "$ref": "#/definitions/ContextConf", @@ -7945,24 +7942,24 @@ snapshot_kind: text "description": "#/definitions/TracingCommon" }, "datadog": { - "$ref": "#/definitions/Config16", - "description": "#/definitions/Config16" + "$ref": "#/definitions/Config17", + "description": "#/definitions/Config17" }, "experimental_response_trace_id": { "$ref": "#/definitions/ExposeTraceId", "description": "#/definitions/ExposeTraceId" }, "otlp": { - "$ref": "#/definitions/Config13", - "description": "#/definitions/Config13" + "$ref": "#/definitions/Config14", + "description": "#/definitions/Config14" }, "propagation": { "$ref": "#/definitions/Propagation", "description": "#/definitions/Propagation" }, "zipkin": { - "$ref": "#/definitions/Config15", - "description": "#/definitions/Config15" + "$ref": "#/definitions/Config16", + "description": "#/definitions/Config16" } }, "type": "object" @@ -9087,6 +9084,10 @@ snapshot_kind: text "$ref": "#/definitions/ConnectorsConfig", "description": "#/definitions/ConnectorsConfig" }, + "content_negotiation": { + "$ref": "#/definitions/Config7", + "description": "#/definitions/Config7" + }, "coprocessor": { "$ref": "#/definitions/Conf4", "description": "#/definitions/Conf4" @@ -9104,8 +9105,8 @@ snapshot_kind: text "description": "#/definitions/DemandControlConfig" }, "enhanced_client_awareness": { - "$ref": "#/definitions/Config7", - "description": "#/definitions/Config7" + "$ref": "#/definitions/Config8", + "description": "#/definitions/Config8" }, "experimental_chaos": { "$ref": "#/definitions/Chaos", @@ -9125,8 +9126,8 @@ snapshot_kind: text "description": "#/definitions/ForbidMutationsConfig" }, "headers": { - "$ref": "#/definitions/Config8", - "description": "#/definitions/Config8" + "$ref": "#/definitions/Config9", + "description": "#/definitions/Config9" }, "health_check": { "$ref": "#/definitions/Config", @@ -9137,8 +9138,8 @@ snapshot_kind: text "description": "#/definitions/Homepage" }, "include_subgraph_errors": { - "$ref": "#/definitions/Config9", - "description": "#/definitions/Config9" + "$ref": "#/definitions/Config10", + "description": "#/definitions/Config10" }, "license_enforcement": { "$ref": "#/definitions/LicenseEnforcementConfig", @@ -9161,16 +9162,16 @@ snapshot_kind: text "description": "#/definitions/Plugins" }, "preview_entity_cache": { - "$ref": "#/definitions/Config10", - "description": "#/definitions/Config10" + "$ref": "#/definitions/Config11", + "description": "#/definitions/Config11" }, "preview_file_uploads": { "$ref": "#/definitions/FileUploadsConfig", "description": "#/definitions/FileUploadsConfig" }, "progressive_override": { - "$ref": "#/definitions/Config11", - "description": "#/definitions/Config11" + "$ref": "#/definitions/Config12", + "description": "#/definitions/Config12" }, "rhai": { "$ref": "#/definitions/Conf7", @@ -9201,8 +9202,8 @@ snapshot_kind: text "description": "#/definitions/Tls" }, "traffic_shaping": { - "$ref": "#/definitions/Config17", - "description": "#/definitions/Config17" + "$ref": "#/definitions/Config18", + "description": "#/definitions/Config18" } }, "title": "Configuration", diff --git a/apollo-router/src/configuration/tests.rs b/apollo-router/src/configuration/tests.rs index d4e26d45f2..89b84accb4 100644 --- a/apollo-router/src/configuration/tests.rs +++ b/apollo-router/src/configuration/tests.rs @@ -786,9 +786,6 @@ fn test_configuration_validate_and_sanitize() { #[test] fn load_tls() { - // Enable crypto - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); - let mut cert_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); cert_path.push("src"); cert_path.push("configuration"); diff --git a/apollo-router/src/executable.rs b/apollo-router/src/executable.rs index 8490a79e2a..32d80b4ae3 100644 --- a/apollo-router/src/executable.rs +++ b/apollo-router/src/executable.rs @@ -16,8 +16,6 @@ use clap::Args; use clap::Parser; use clap::Subcommand; use clap::builder::FalseyValueParser; -#[cfg(any(feature = "dhat-heap", feature = "dhat-ad-hoc"))] -use once_cell::sync::OnceCell; use parking_lot::Mutex; use regex::Captures; use regex::Regex; @@ -57,10 +55,10 @@ static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; pub(crate) static ALLOC: dhat::Alloc = dhat::Alloc; #[cfg(feature = "dhat-heap")] -pub(crate) static mut DHAT_HEAP_PROFILER: OnceCell = OnceCell::new(); +pub(crate) static DHAT_HEAP_PROFILER: Mutex> = Mutex::new(None); #[cfg(feature = "dhat-ad-hoc")] -pub(crate) static mut DHAT_AD_HOC_PROFILER: OnceCell = OnceCell::new(); +pub(crate) static DHAT_AD_HOC_PROFILER: Mutex> = Mutex::new(None); pub(crate) static APOLLO_ROUTER_DEV_MODE: AtomicBool = AtomicBool::new(false); pub(crate) static APOLLO_ROUTER_SUPERGRAPH_PATH_IS_SET: AtomicBool = AtomicBool::new(false); @@ -76,47 +74,31 @@ const INITIAL_UPLINK_POLL_INTERVAL: Duration = Duration::from_secs(10); // main completes, so don't use tracing, use println!() and eprintln!().. #[cfg(feature = "dhat-heap")] fn create_heap_profiler() { - unsafe { - match DHAT_HEAP_PROFILER.set(dhat::Profiler::new_heap()) { - Ok(p) => { - println!("heap profiler installed: {:?}", p); - libc::atexit(drop_heap_profiler); - } - Err(e) => eprintln!("heap profiler install failed: {:?}", e), - } - } + *DHAT_HEAP_PROFILER.lock() = Some(dhat::Profiler::new_heap()); + println!("heap profiler installed"); + unsafe { libc::atexit(drop_heap_profiler) }; } #[cfg(feature = "dhat-heap")] -#[no_mangle] +#[unsafe(no_mangle)] extern "C" fn drop_heap_profiler() { - unsafe { - if let Some(p) = DHAT_HEAP_PROFILER.take() { - drop(p); - } + if let Some(p) = DHAT_HEAP_PROFILER.lock().take() { + drop(p); } } #[cfg(feature = "dhat-ad-hoc")] fn create_ad_hoc_profiler() { - unsafe { - match DHAT_AD_HOC_PROFILER.set(dhat::Profiler::new_ad_hoc()) { - Ok(p) => { - println!("ad-hoc profiler installed: {:?}", p); - libc::atexit(drop_ad_hoc_profiler); - } - Err(e) => eprintln!("ad-hoc profiler install failed: {:?}", e), - } - } + *DHAT_AD_HOC_PROFILER.lock() = Some(dhat::Profiler::new_heap()); + println!("ad-hoc profiler installed"); + unsafe { libc::atexit(drop_ad_hoc_profiler) }; } #[cfg(feature = "dhat-ad-hoc")] -#[no_mangle] +#[unsafe(no_mangle)] extern "C" fn drop_ad_hoc_profiler() { - unsafe { - if let Some(p) = DHAT_AD_HOC_PROFILER.take() { - drop(p); - } + if let Some(p) = DHAT_AD_HOC_PROFILER.lock().take() { + drop(p); } } @@ -407,8 +389,6 @@ impl Executable { println!("{}", std::env!("CARGO_PKG_VERSION")); return Ok(()); } - // Enable crypto - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); *crate::services::APOLLO_KEY.lock() = opt.apollo_key.clone(); *crate::services::APOLLO_GRAPH_REF.lock() = opt.apollo_graph_ref.clone(); diff --git a/apollo-router/src/graphql/request.rs b/apollo-router/src/graphql/request.rs index 627a102757..1929266c5e 100644 --- a/apollo-router/src/graphql/request.rs +++ b/apollo-router/src/graphql/request.rs @@ -199,7 +199,7 @@ impl Request { u64_histogram!( "apollo.router.operations.batching.size", "Number of queries contained within each query batch", - result.len() as u64, + entries.len() as u64, mode = BatchingMode::BatchHttpLink.to_string() // Only supported mode right now ); @@ -227,7 +227,7 @@ impl Request { u64_histogram!( "apollo.router.operations.batching.size", "Number of queries contained within each query batch", - result.len() as u64, + entries.len() as u64, mode = BatchingMode::BatchHttpLink.to_string() // Only supported mode right now ); diff --git a/apollo-router/src/metrics/mod.rs b/apollo-router/src/metrics/mod.rs index b84549ee89..749fdc33f3 100644 --- a/apollo-router/src/metrics/mod.rs +++ b/apollo-router/src/metrics/mod.rs @@ -1524,6 +1524,8 @@ pub(crate) type MetricFuture = Pin::Out #[cfg(test)] pub(crate) trait FutureMetricsExt { + /// See [dev-docs/metrics.md](https://github.com/apollographql/router/blob/dev/dev-docs/metrics.md#testing-async) + /// for details on this function. fn with_metrics( self, ) -> tokio::task::futures::TaskLocalFuture< diff --git a/apollo-router/src/plugins/authentication/mod.rs b/apollo-router/src/plugins/authentication/mod.rs index 9d4ef1b4d7..9cdefe87b3 100644 --- a/apollo-router/src/plugins/authentication/mod.rs +++ b/apollo-router/src/plugins/authentication/mod.rs @@ -453,7 +453,7 @@ fn authenticate( let failed = true; increment_jwt_counter_metric(failed); - tracing::error!(message = %error, "jwt authentication failure"); + tracing::info!(message = %error, "jwt authentication failure"); let _ = request.context.insert_json_value( JWT_CONTEXT_KEY, diff --git a/apollo-router/src/plugins/authentication/subgraph.rs b/apollo-router/src/plugins/authentication/subgraph.rs index 438e2c03e0..56dd2a5943 100644 --- a/apollo-router/src/plugins/authentication/subgraph.rs +++ b/apollo-router/src/plugins/authentication/subgraph.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use std::time::Duration; use std::time::SystemTime; +use aws_config::provider_config::ProviderConfig; use aws_credential_types::Credentials; use aws_credential_types::provider::ProvideCredentials; use aws_credential_types::provider::error::CredentialsError; @@ -11,6 +12,10 @@ use aws_sigv4::http_request::SignableBody; use aws_sigv4::http_request::SignableRequest; use aws_sigv4::http_request::SigningSettings; use aws_sigv4::http_request::sign; +use aws_smithy_async::rt::sleep::TokioSleep; +use aws_smithy_async::time::SystemTimeSource; +use aws_smithy_http_client::tls::Provider; +use aws_smithy_http_client::tls::rustls_provider::CryptoMode; use aws_smithy_runtime_api::client::identity::Identity; use aws_types::region::Region; use aws_types::sdk_config::SharedCredentialsProvider; @@ -117,9 +122,7 @@ impl AWSSigV4Config { match self { Self::DefaultChain(config) => { - let aws_config = - aws_config::default_provider::credentials::DefaultCredentialsChain::builder() - .region(region.clone()); + let aws_config = credentials_chain_builder().region(region.clone()); let aws_config = if let Some(profile_name) = &config.profile_name { aws_config.profile_name(profile_name.as_str()) @@ -135,10 +138,7 @@ impl AWSSigV4Config { } } Self::Hardcoded(config) => { - let chain = - aws_config::default_provider::credentials::DefaultCredentialsChain::builder() - .build() - .await; + let chain = credentials_chain_builder().build().await; if let Some(assume_role_provider) = role_provider_builder { Arc::new(assume_role_provider.build_from_provider(chain).await) } else { @@ -171,6 +171,19 @@ impl AWSSigV4Config { } } +fn credentials_chain_builder() -> aws_config::default_provider::credentials::Builder { + aws_config::default_provider::credentials::DefaultCredentialsChain::builder().configure( + ProviderConfig::default() + .with_http_client( + aws_smithy_http_client::Builder::new() + .tls_provider(Provider::Rustls(CryptoMode::Ring)) + .build_https(), + ) + .with_sleep_impl(TokioSleep::new()) + .with_time_source(SystemTimeSource::new()), + ) +} + #[derive(Clone, Debug, JsonSchema, Deserialize, Serialize)] #[serde(deny_unknown_fields)] pub(crate) enum AuthConfig { diff --git a/apollo-router/src/plugins/authorization/authenticated.rs b/apollo-router/src/plugins/authorization/authenticated.rs index 97e2d4c3f6..62bed55c92 100644 --- a/apollo-router/src/plugins/authorization/authenticated.rs +++ b/apollo-router/src/plugins/authorization/authenticated.rs @@ -538,7 +538,7 @@ mod tests { use crate::plugin::test::MockSubgraph; use crate::plugins::authorization::APOLLO_AUTHENTICATION_JWT_CLAIMS; use crate::plugins::authorization::authenticated::AuthenticatedVisitor; - use crate::services::router::ClientRequestAccepts; + use crate::plugins::content_negotiation::ClientRequestAccepts; use crate::services::supergraph; use crate::spec::query::transform; diff --git a/apollo-router/src/plugins/cache/entity.rs b/apollo-router/src/plugins/cache/entity.rs index a4b378727e..e29979db88 100644 --- a/apollo-router/src/plugins/cache/entity.rs +++ b/apollo-router/src/plugins/cache/entity.rs @@ -212,7 +212,7 @@ impl Plugin for EntityCache { let required_to_start = redis_config.required_to_start; // we need to explicitly disable TTL reset because it is managed directly by this plugin redis_config.reset_ttl = false; - all = match RedisCacheStorage::new(redis_config).await { + all = match RedisCacheStorage::new(redis_config, "entity").await { Ok(storage) => Some(storage), Err(e) => { tracing::error!( @@ -234,7 +234,7 @@ impl Plugin for EntityCache { // we need to explicitly disable TTL reset because it is managed directly by this plugin let mut redis_config = redis.clone(); redis_config.reset_ttl = false; - let storage = match RedisCacheStorage::new(redis_config).await { + let storage = match RedisCacheStorage::new(redis_config, "entity").await { Ok(storage) => Some(storage), Err(e) => { tracing::error!( diff --git a/apollo-router/src/plugins/connectors/testdata/batch-query.graphql b/apollo-router/src/plugins/connectors/testdata/batch-query.graphql new file mode 100644 index 0000000000..f160ab5046 --- /dev/null +++ b/apollo-router/src/plugins/connectors/testdata/batch-query.graphql @@ -0,0 +1,71 @@ +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) + @link(url: "https://specs.apollo.dev/connect/v0.2", for: EXECUTION) + @join__directive(graphs: [CONNECTORS], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.2", import: ["@source", "@connect"]}) + @join__directive(graphs: [CONNECTORS], name: "source", args: {name: "json", http: {baseURL: "http://localhost:4001/api"}}) +{ + query: Query +} + +directive @join__directive(graphs: [join__Graph!], name: String!, args: join__DirectiveArguments) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean, overrideLabel: String, contextArguments: [join__ContextArgument!]) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! +} + +scalar join__DirectiveArguments + +scalar join__FieldSet + +scalar join__FieldValue + +enum join__Graph { + CONNECTORS @join__graph(name: "connectors", url: "http://none") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query + @join__type(graph: CONNECTORS) +{ + users: [User!]! @join__directive(graphs: [CONNECTORS], name: "connect", args: {source: "json", http: {GET: "/users"}, selection: "id"}) +} + +type User + @join__type(graph: CONNECTORS) + @join__directive(graphs: [CONNECTORS], name: "connect", args: {source: "json", http: {GET: "/user-details?ids={$batch.id->joinNotNull(',')}"}, selection: "id name username"}) +{ + id: ID! + name: String + username: String +} diff --git a/apollo-router/src/plugins/connectors/testdata/batch-query.yaml b/apollo-router/src/plugins/connectors/testdata/batch-query.yaml new file mode 100644 index 0000000000..21772a0ada --- /dev/null +++ b/apollo-router/src/plugins/connectors/testdata/batch-query.yaml @@ -0,0 +1,25 @@ +subgraphs: + connectors: + routing_url: http://none + schema: + sdl: | + extend schema + @link(url: "https://specs.apollo.dev/federation/v2.11") + @link(url: "https://specs.apollo.dev/connect/v0.2", import: ["@source", "@connect"]) + @source(name: "json", http: { baseURL: "http://localhost:4001/api" }) + + type Query { + users: [User!]! + @connect(source: "json", http: { GET: "/users" }, selection: "id") + } + + type User + @connect(source: "json" + http: { GET: "/users-details/?ids={$$batch.id->joinNotNull(',')}" } + selection: "id name username" + ) + { + id: ID! + name: String + username: String + } diff --git a/apollo-router/src/plugins/connectors/testdata/progressive-override.graphql b/apollo-router/src/plugins/connectors/testdata/progressive-override.graphql new file mode 100644 index 0000000000..3c8bbdc52d --- /dev/null +++ b/apollo-router/src/plugins/connectors/testdata/progressive-override.graphql @@ -0,0 +1,72 @@ +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) + @link(url: "https://specs.apollo.dev/connect/v0.1", for: EXECUTION) + @join__directive(graphs: [CONNECTORS], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1", import: ["@connect", "@source"]}) + @join__directive(graphs: [CONNECTORS], name: "source", args: {name: "json", http: {baseURL: "https://jsonplaceholder.typicode.com/"}}) +{ + query: Query +} + +directive @join__directive(graphs: [join__Graph!], name: String!, args: join__DirectiveArguments) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean, overrideLabel: String, contextArguments: [join__ContextArgument!]) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! +} + +scalar join__DirectiveArguments + +scalar join__FieldSet + +scalar join__FieldValue + +enum join__Graph { + CONNECTORS @join__graph(name: "connectors", url: "none") + GRAPHQL @join__graph(name: "graphql", url: "https://localhost:4001") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query + @join__type(graph: CONNECTORS) + @join__type(graph: GRAPHQL) +{ + users: [User] @join__field(graph: CONNECTORS, override: "graphql", overrideLabel: "percent(100)") @join__field(graph: GRAPHQL, overrideLabel: "percent(100)") @join__directive(graphs: [CONNECTORS], name: "connect", args: {source: "json", http: {GET: "/users"}, selection: "id name"}) +} + +type User + @join__type(graph: CONNECTORS) + @join__type(graph: GRAPHQL) +{ + id: ID! + name: String +} diff --git a/apollo-router/src/plugins/connectors/testdata/progressive-override.yaml b/apollo-router/src/plugins/connectors/testdata/progressive-override.yaml new file mode 100644 index 0000000000..a4cc02737b --- /dev/null +++ b/apollo-router/src/plugins/connectors/testdata/progressive-override.yaml @@ -0,0 +1,56 @@ +subgraphs: + connectors: + routing_url: none + schema: + sdl: | + extend schema + @link( + url: "https://specs.apollo.dev/federation/v2.10" + import: ["@shareable", "@override"] + ) + @link( + url: "https://specs.apollo.dev/connect/v0.1" + import: ["@connect", "@source"] + ) + @source( + name: "json" + http: { + baseURL: "https://jsonplaceholder.typicode.com/" + } + ) + + type Query { + users: [User] + @override(from: "graphql", label: "percent(100)") + @connect( + source: "json" + http: { + GET: "/users" + } + selection: "id name" + ) + } + + type User @shareable { + id: ID! + name: String + } + + graphql: + routing_url: https://localhost:4001 + schema: + sdl: | + extend schema + @link( + url: "https://specs.apollo.dev/federation/v2.7" + import: ["@shareable"] + ) + + type Query { + users: [User] + } + + type User @shareable { + id: ID! + name: String + } diff --git a/apollo-router/src/plugins/connectors/tests/connect_on_type.rs b/apollo-router/src/plugins/connectors/tests/connect_on_type.rs index 86f3528824..d585865ea9 100644 --- a/apollo-router/src/plugins/connectors/tests/connect_on_type.rs +++ b/apollo-router/src/plugins/connectors/tests/connect_on_type.rs @@ -87,6 +87,165 @@ async fn basic_batch() { ); } +#[tokio::test] +async fn basic_batch_query_params() { + let mock_server = MockServer::start().await; + Mock::given(method("GET")) + .and(path("/users")) + .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!([ + { "id": 3 }, + { "id": 1 }, + { "id": 2 }]))) + .mount(&mock_server) + .await; + Mock::given(method("GET")) + .and(path("/user-details")) + .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!([ + { + "id": 1, + "name": "Leanne Graham", + "username": "Bret" + }, + { + "id": 2, + "name": "Ervin Howell", + "username": "Antonette" + }, + { + "id": 3, + "name": "Clementine Bauch", + "username": "Samantha" + }]))) + .mount(&mock_server) + .await; + + let response = super::execute( + include_str!("../testdata/batch-query.graphql"), + &mock_server.uri(), + "query { users { id name username } }", + Default::default(), + None, + |_| {}, + ) + .await; + + insta::assert_json_snapshot!(response, @r#" + { + "data": { + "users": [ + { + "id": 3, + "name": "Clementine Bauch", + "username": "Samantha" + }, + { + "id": 1, + "name": "Leanne Graham", + "username": "Bret" + }, + { + "id": 2, + "name": "Ervin Howell", + "username": "Antonette" + } + ] + } + } + "#); + + super::req_asserts::matches( + &mock_server.received_requests().await.unwrap(), + vec![ + Matcher::new().method("GET").path("/users"), + Matcher::new() + .method("GET") + .path("/user-details") + .query("ids=3%2C1%2C2"), + ], + ); +} + +#[tokio::test] +async fn batch_missing_items() { + let mock_server = MockServer::start().await; + Mock::given(method("GET")) + .and(path("/users")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!([ + { "id": 3 }, + { "id": 1 }, + { "id": 2 }, + { "id": 4 }, + ]))) + .mount(&mock_server) + .await; + Mock::given(method("POST")) + .and(path("/users-batch")) + .respond_with(ResponseTemplate::new(200).set_body_json(json!([ + // 1 & 4 are not returned, so the extra fields should just null out (not be an error) + { + "id": 2, + "name": "Ervin Howell", + "username": "Antonette" + }, + { + "id": 3, + "name": "Clementine Bauch", + "username": "Samantha" + }]))) + .mount(&mock_server) + .await; + + let response = super::execute( + include_str!("../testdata/batch.graphql"), + &mock_server.uri(), + "query { users { id name username } }", + Default::default(), + None, + |_| {}, + ) + .await; + + insta::assert_json_snapshot!(response, @r#" + { + "data": { + "users": [ + { + "id": 3, + "name": "Clementine Bauch", + "username": "Samantha" + }, + { + "id": 1, + "name": null, + "username": null + }, + { + "id": 2, + "name": "Ervin Howell", + "username": "Antonette" + }, + { + "id": 4, + "name": null, + "username": null + } + ] + } + } + "#); + + super::req_asserts::matches( + &mock_server.received_requests().await.unwrap(), + vec![ + Matcher::new().method("GET").path("/users"), + Matcher::new() + .method("POST") + .path("/users-batch") + .body(json!({ "ids": [3,1,2,4] })), + ], + ); +} + #[tokio::test] async fn connect_on_type() { let mock_server = MockServer::start().await; diff --git a/apollo-router/src/plugins/connectors/tests/mod.rs b/apollo-router/src/plugins/connectors/tests/mod.rs index e1a54daf37..995135f535 100644 --- a/apollo-router/src/plugins/connectors/tests/mod.rs +++ b/apollo-router/src/plugins/connectors/tests/mod.rs @@ -41,6 +41,7 @@ use crate::uplink::license_enforcement::LicenseState; mod connect_on_type; mod mock_api; +mod progressive_override; mod quickstart; mod req_asserts; mod url_properties; diff --git a/apollo-router/src/plugins/connectors/tests/progressive_override.rs b/apollo-router/src/plugins/connectors/tests/progressive_override.rs new file mode 100644 index 0000000000..7860b7b82f --- /dev/null +++ b/apollo-router/src/plugins/connectors/tests/progressive_override.rs @@ -0,0 +1,56 @@ +use wiremock::Mock; +use wiremock::MockServer; +use wiremock::ResponseTemplate; +use wiremock::matchers::method; +use wiremock::matchers::path; + +use super::req_asserts::Matcher; + +#[tokio::test] +async fn progressive_override() { + let mock_server = MockServer::start().await; + Mock::given(method("GET")) + .and(path("/users")) + .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!([ + { "id": 3, "name": "Clementine Bauch" }, + { "id": 1, "name": "Leanne Graham" }, + { "id": 2, "name": "Ervin Howell" }]))) + .mount(&mock_server) + .await; + + let response = super::execute( + include_str!("../testdata/progressive-override.graphql"), + &mock_server.uri(), + "query { users { id name } }", + Default::default(), + None, + |_| {}, + ) + .await; + + insta::assert_json_snapshot!(response, @r#" + { + "data": { + "users": [ + { + "id": 3, + "name": "Clementine Bauch" + }, + { + "id": 1, + "name": "Leanne Graham" + }, + { + "id": 2, + "name": "Ervin Howell" + } + ] + } + } + "#); + + super::req_asserts::matches( + &mock_server.received_requests().await.unwrap(), + vec![Matcher::new().method("GET").path("/users")], + ); +} diff --git a/apollo-router/src/plugins/content_negotiation.rs b/apollo-router/src/plugins/content_negotiation.rs new file mode 100644 index 0000000000..d982ca45ba --- /dev/null +++ b/apollo-router/src/plugins/content_negotiation.rs @@ -0,0 +1,453 @@ +//! The content negotiation plugin performs HTTP content negotiation using the `accept` and +//! `content-type` headers, working at the router stage. +use std::ops::ControlFlow; + +use http::HeaderMap; +use http::HeaderValue; +use http::Method; +use http::StatusCode; +use http::header::ACCEPT; +use http::header::CONTENT_TYPE; +use http::header::VARY; +use mediatype::MediaType; +use mediatype::MediaTypeList; +use mediatype::ReadParams; +use schemars::JsonSchema; +use serde::Deserialize; +use tower::BoxError; +use tower::ServiceBuilder; +use tower::ServiceExt; + +use crate::graphql; +use crate::layers::ServiceBuilderExt; +use crate::plugin::Plugin; +use crate::plugin::PluginInit; +use crate::protocols::multipart::ProtocolMode; +use crate::services::router; +use crate::services::router::body::RouterBody; + +register_plugin!("apollo", "content_negotiation", ContentNegotiation); + +const APPLICATION_JSON: &str = "application/json"; +pub(crate) const APPLICATION_GRAPHQL_JSON: &str = "application/graphql-response+json"; + +const ORIGIN_HEADER_VALUE: HeaderValue = HeaderValue::from_static("origin"); + +// set the supported `@defer` specification version to https://github.com/graphql/graphql-spec/pull/742/commits/01d7b98f04810c9a9db4c0e53d3c4d54dbf10b82 +const MULTIPART_DEFER_SPEC_PARAMETER: &str = "deferSpec"; +const MULTIPART_DEFER_SPEC_VALUE: &str = "20220824"; +pub(crate) const MULTIPART_DEFER_ACCEPT_HEADER_VALUE: HeaderValue = + HeaderValue::from_static("multipart/mixed;deferSpec=20220824"); +pub(crate) const MULTIPART_DEFER_CONTENT_TYPE_HEADER_VALUE: HeaderValue = + HeaderValue::from_static("multipart/mixed;boundary=\"graphql\";deferSpec=20220824"); + +const MULTIPART_SUBSCRIPTION_ACCEPT: &str = "multipart/mixed;subscriptionSpec=1.0"; +const MULTIPART_SUBSCRIPTION_SPEC_PARAMETER: &str = "subscriptionSpec"; +const MULTIPART_SUBSCRIPTION_SPEC_VALUE: &str = "1.0"; +pub(crate) const MULTIPART_SUBSCRIPTION_CONTENT_TYPE_HEADER_VALUE: HeaderValue = + HeaderValue::from_static("multipart/mixed;boundary=\"graphql\";subscriptionSpec=1.0"); + +/// The `ClientRequestAccepts` struct is effectively a parsed version of a request's `accept` header. +/// +/// Note that multiple values here can be set to true. For example, if the request provides +/// header value `application/json,*/*`, both `json` and `wildcard` in the struct will be set to true. +#[derive(Clone, Default, Debug)] +pub(crate) struct ClientRequestAccepts { + pub(crate) multipart_defer: bool, + pub(crate) multipart_subscription: bool, + pub(crate) json: bool, + pub(crate) wildcard: bool, +} + +impl ClientRequestAccepts { + /// Returns true if any of the struct's members are true, ie the request includes an `accept` + /// value that the router supports. + fn is_valid(&self) -> bool { + self.json || self.wildcard || self.multipart_defer || self.multipart_subscription + } +} + +/// The `ContentNegotiation` plugin provides request and response layers at the router service. +/// +/// # Request +/// The request layer rejects requests that do not have an expected `Content-Type`, or that have an +/// `Accept` header that is not supported by the router. +/// +/// In particular: +/// * the request must be a `GET` or have `CONTENT_TYPE = JSON`, and +/// * the accept header must include `*/*`, one of the JSON/GraphQL MIME types, or one of the +/// multipart types. +/// +/// It will also add a `ClientRequestAccepts` value to the context if the request is valid. +/// +/// # Response +/// The response layer sets the `CONTENT_TYPE` header, using the `ClientRequestAccepts` value from +/// the context (set on the request side of this plugin). It will also set the `VARY` header if it +/// is not present. +/// +/// # Context +/// If the request is valid, this layer adds a [`ClientRequestAccepts`] value to the context. +struct ContentNegotiation {} +#[derive(Debug, Default, Deserialize, JsonSchema)] +struct Config {} + +impl ContentNegotiation { + fn handle_request(request: router::Request) -> ControlFlow { + let valid_content_type_header = request.router_request.method() == Method::GET + || content_type_includes_json(request.router_request.headers()); + if !valid_content_type_header { + return ControlFlow::Break(invalid_content_type_header_response().into()); + } + + let accepts = parse_accept_header(request.router_request.headers()); + if !accepts.is_valid() { + return ControlFlow::Break(invalid_accept_header_response().into()); + } + + request + .context + .extensions() + .with_lock(|lock| lock.insert(accepts)); + ControlFlow::Continue(request) + } + + fn handle_response(mut response: router::Response) -> router::Response { + let ClientRequestAccepts { + multipart_defer: accepts_multipart_defer, + multipart_subscription: accepts_multipart_subscription, + json: accepts_json, + wildcard: accepts_wildcard, + } = response.context.extensions().with_lock(|lock| { + lock.get::() + .cloned() + .unwrap_or_default() + }); + + let headers = response.response.headers_mut(); + process_vary_header(headers); + + let protocol_mode = response + .context + .extensions() + .with_lock(|lock| lock.get::().cloned()); + + let content_type = match protocol_mode { + Some(ProtocolMode::Defer) if accepts_multipart_defer => { + MULTIPART_DEFER_CONTENT_TYPE_HEADER_VALUE + } + Some(ProtocolMode::Subscription) if accepts_multipart_subscription => { + MULTIPART_SUBSCRIPTION_CONTENT_TYPE_HEADER_VALUE + } + None if accepts_json || accepts_wildcard => HeaderValue::from_static(APPLICATION_JSON), + _ => { + // XX(@carodewig) this should be unreachable, but provide fallback of APPLICATION_JSON + HeaderValue::from_static(APPLICATION_JSON) + } + }; + headers.insert(CONTENT_TYPE, content_type); + + response + } +} + +#[async_trait::async_trait] +impl Plugin for ContentNegotiation { + type Config = Config; + + async fn new(_init: PluginInit) -> Result + where + Self: Sized, + { + Ok(ContentNegotiation {}) + } + + fn router_service(&self, service: router::BoxService) -> router::BoxService { + ServiceBuilder::new() + .checkpoint(|request: router::Request| Ok(Self::handle_request(request))) + .service(service) + .map_response(Self::handle_response) + .boxed() + } +} + +/// Returns `true` if the media type is either `application/json` or `application/graphql-response+json`. +fn is_json_type(mime: &MediaType) -> bool { + use mediatype::names::APPLICATION; + use mediatype::names::JSON; + let is_json = |mime: &MediaType| mime.subty == JSON; + let is_gql_json = + |mime: &MediaType| mime.subty.as_str() == "graphql-response" && mime.suffix == Some(JSON); + + mime.ty == APPLICATION && (is_json(mime) || is_gql_json(mime)) +} + +/// Returns `true` if the media type is `*/*`. +fn is_wildcard(mime: &MediaType) -> bool { + use mediatype::names::_STAR; + mime.ty == _STAR && mime.subty == _STAR +} + +/// Returns `true` if media type is a multipart defer, ie `multipart/mixed;deferSpec=20220824`. +fn is_multipart_defer(mime: &MediaType) -> bool { + use mediatype::names::MIXED; + use mediatype::names::MULTIPART; + + let Some(parameter) = mediatype::Name::new(MULTIPART_DEFER_SPEC_PARAMETER) else { + return false; + }; + let Some(value) = mediatype::Value::new(MULTIPART_DEFER_SPEC_VALUE) else { + return false; + }; + + mime.ty == MULTIPART && mime.subty == MIXED && mime.get_param(parameter) == Some(value) +} + +/// Returns `true` if media type is a multipart subscription, ie `multipart/mixed;subscriptionSpec=1.0`. +fn is_multipart_subscription(mime: &MediaType) -> bool { + use mediatype::names::MIXED; + use mediatype::names::MULTIPART; + + let Some(parameter) = mediatype::Name::new(MULTIPART_SUBSCRIPTION_SPEC_PARAMETER) else { + return false; + }; + let Some(value) = mediatype::Value::new(MULTIPART_SUBSCRIPTION_SPEC_VALUE) else { + return false; + }; + + mime.ty == MULTIPART && mime.subty == MIXED && mime.get_param(parameter) == Some(value) +} + +/// Returns `true` if the `CONTENT_TYPE` header contains `application/json` or +/// `application/graphql-response+json`. +fn content_type_includes_json(headers: &HeaderMap) -> bool { + headers + .get_all(CONTENT_TYPE) + .iter() + .filter_map(|header| header.to_str().ok()) + .flat_map(MediaTypeList::new) + .any(|mime_result| mime_result.as_ref().is_ok_and(is_json_type)) +} + +/// Builds and returns `ClientRequestAccepts` from the `ACCEPT` content header. +fn parse_accept_header(headers: &HeaderMap) -> ClientRequestAccepts { + let mut accept_header_present = false; + let mut accepts = ClientRequestAccepts::default(); + + headers + .get_all(ACCEPT) + .iter() + .filter_map(|header| { + accept_header_present = true; + header.to_str().ok() + }) + .flat_map(MediaTypeList::new) + .flatten() + .for_each(|mime| { + accepts.json = accepts.json || is_json_type(&mime); + accepts.wildcard = accepts.wildcard || is_wildcard(&mime); + accepts.multipart_defer = accepts.multipart_defer || is_multipart_defer(&mime); + accepts.multipart_subscription = + accepts.multipart_subscription || is_multipart_subscription(&mime); + }); + + if !accept_header_present { + accepts.json = true; + } + + accepts +} + +/// Process the headers to make sure that `VARY` is set correctly. +fn process_vary_header(headers: &mut HeaderMap) { + if headers.get(VARY).is_none() { + // We don't have a VARY header, add one with value "origin" + headers.insert(VARY, ORIGIN_HEADER_VALUE); + } +} + +/// Helper to build a `RouterBody` containing a `graphql::Error` with the provided extension +/// code and message. +fn error_response_body(extension_code: &str, message: String) -> RouterBody { + router::body::from_bytes( + serde_json::json!({ + "errors": [ + graphql::Error::builder() + .message(message) + .extension_code(extension_code) + .build() + ] + }) + .to_string(), + ) +} + +/// Helper to build an HTTP response with a standardized invalid `CONTENT_TYPE` header message. +fn invalid_content_type_header_response() -> http::Response { + let message = format!( + r#"'content-type' header must be one of: {:?} or {:?}"#, + APPLICATION_JSON, APPLICATION_GRAPHQL_JSON, + ); + http::Response::builder() + .status(StatusCode::UNSUPPORTED_MEDIA_TYPE) + .header(CONTENT_TYPE, HeaderValue::from_static(APPLICATION_JSON)) + .body(error_response_body("INVALID_CONTENT_TYPE_HEADER", message)) + .expect("cannot fail") +} + +/// Helper to build an HTTP response with a standardized invalid `ACCEPT` header message. +pub(crate) fn invalid_accept_header_response() -> http::Response { + let message = format!( + r#"'accept' header must be one of: \"*/*\", {:?}, {:?}, {:?} or {:?}"#, + APPLICATION_JSON, + APPLICATION_GRAPHQL_JSON, + MULTIPART_SUBSCRIPTION_ACCEPT, + MULTIPART_DEFER_ACCEPT_HEADER_VALUE + ); + http::Response::builder() + .status(StatusCode::NOT_ACCEPTABLE) + .header(CONTENT_TYPE, HeaderValue::from_static(APPLICATION_JSON)) + .body(error_response_body("INVALID_ACCEPT_HEADER", message)) + .expect("cannot fail") +} + +#[cfg(test)] +mod tests { + use http::HeaderMap; + use http::header::ACCEPT; + use http::header::CONTENT_TYPE; + use http::header::HeaderValue; + use http::header::VARY; + + use super::APPLICATION_GRAPHQL_JSON; + use super::APPLICATION_JSON; + use super::MULTIPART_DEFER_ACCEPT_HEADER_VALUE; + use super::content_type_includes_json; + use super::parse_accept_header; + use super::process_vary_header; + + const VALID_CONTENT_TYPES: [&str; 2] = [APPLICATION_JSON, APPLICATION_GRAPHQL_JSON]; + const INVALID_CONTENT_TYPES: [&str; 3] = ["invalid", "application/invalid", "application/yaml"]; + + #[test] + fn test_content_type_includes_json_handles_valid_content_types() { + for content_type in VALID_CONTENT_TYPES { + let mut headers = HeaderMap::new(); + headers.insert(CONTENT_TYPE, HeaderValue::from_static(content_type)); + assert!(content_type_includes_json(&headers)); + } + } + + #[test] + fn test_content_type_includes_json_handles_invalid_content_types() { + for content_type in INVALID_CONTENT_TYPES { + let mut headers = HeaderMap::new(); + headers.insert(CONTENT_TYPE, HeaderValue::from_static(content_type)); + assert!(!content_type_includes_json(&headers)); + } + } + + #[test] + fn test_content_type_includes_json_can_process_multiple_content_types() { + let mut headers = HeaderMap::new(); + for content_type in INVALID_CONTENT_TYPES { + headers.insert(CONTENT_TYPE, HeaderValue::from_static(content_type)); + } + for content_type in VALID_CONTENT_TYPES { + headers.insert(CONTENT_TYPE, HeaderValue::from_static(content_type)); + } + + assert!(content_type_includes_json(&headers)); + + let mut headers = HeaderMap::new(); + headers.insert( + CONTENT_TYPE, + INVALID_CONTENT_TYPES.join(", ").parse().unwrap(), + ); + headers.insert( + CONTENT_TYPE, + VALID_CONTENT_TYPES.join(", ").parse().unwrap(), + ); + assert!(content_type_includes_json(&headers)); + } + + #[test] + fn test_parse_accept_header_behaves_as_expected() { + let mut default_headers = HeaderMap::new(); + default_headers.insert(ACCEPT, HeaderValue::from_static(VALID_CONTENT_TYPES[0])); + default_headers.append(ACCEPT, HeaderValue::from_static("foo/bar")); + let accepts = parse_accept_header(&default_headers); + assert!(accepts.json); + + let mut default_headers = HeaderMap::new(); + default_headers.insert(ACCEPT, HeaderValue::from_static("*/*")); + default_headers.append(ACCEPT, HeaderValue::from_static("foo/bar")); + let accepts = parse_accept_header(&default_headers); + assert!(accepts.wildcard); + + let mut default_headers = HeaderMap::new(); + // real life browser example + default_headers.insert(ACCEPT, HeaderValue::from_static("text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8")); + let accepts = parse_accept_header(&default_headers); + assert!(accepts.wildcard); + + let mut default_headers = HeaderMap::new(); + default_headers.insert(ACCEPT, HeaderValue::from_static(APPLICATION_GRAPHQL_JSON)); + default_headers.append(ACCEPT, HeaderValue::from_static("foo/bar")); + let accepts = parse_accept_header(&default_headers); + assert!(accepts.json); + + let mut default_headers = HeaderMap::new(); + default_headers.insert(ACCEPT, HeaderValue::from_static(APPLICATION_GRAPHQL_JSON)); + default_headers.append(ACCEPT, MULTIPART_DEFER_ACCEPT_HEADER_VALUE); + let accepts = parse_accept_header(&default_headers); + assert!(accepts.multipart_defer); + + // Multiple accepted types, including one with a parameter we are interested in + let mut default_headers = HeaderMap::new(); + default_headers.insert( + ACCEPT, + HeaderValue::from_static("multipart/mixed;subscriptionSpec=1.0, application/json"), + ); + let accepts = parse_accept_header(&default_headers); + assert!(accepts.multipart_subscription); + + // No accept header present + let default_headers = HeaderMap::new(); + let accepts = parse_accept_header(&default_headers); + assert!(accepts.json); + } + + #[test] + fn it_adds_default_with_value_origin_if_no_vary_header() { + let mut default_headers = HeaderMap::new(); + process_vary_header(&mut default_headers); + let vary_opt = default_headers.get(VARY); + assert!(vary_opt.is_some()); + let vary = vary_opt.expect("has a value"); + assert_eq!(vary, "origin"); + } + + #[test] + fn it_leaves_vary_alone_if_set() { + let mut default_headers = HeaderMap::new(); + default_headers.insert(VARY, HeaderValue::from_static("*")); + process_vary_header(&mut default_headers); + let vary_opt = default_headers.get(VARY); + assert!(vary_opt.is_some()); + let vary = vary_opt.expect("has a value"); + assert_eq!(vary, "*"); + } + + #[test] + fn it_leaves_varys_alone_if_there_are_more_than_one() { + let mut default_headers = HeaderMap::new(); + default_headers.insert(VARY, HeaderValue::from_static("one")); + default_headers.append(VARY, HeaderValue::from_static("two")); + process_vary_header(&mut default_headers); + let vary = default_headers.get_all(VARY); + assert_eq!(vary.iter().count(), 2); + for value in vary { + assert!(value == "one" || value == "two"); + } + } +} diff --git a/apollo-router/src/plugins/coprocessor/execution.rs b/apollo-router/src/plugins/coprocessor/execution.rs index 96ba713f9d..c28fd19ce4 100644 --- a/apollo-router/src/plugins/coprocessor/execution.rs +++ b/apollo-router/src/plugins/coprocessor/execution.rs @@ -5,7 +5,6 @@ use futures::future; use futures::stream; use schemars::JsonSchema; use serde::Deserialize; -use serde::Serialize; use tower::BoxError; use tower::ServiceBuilder; use tower_service::Service; @@ -19,7 +18,7 @@ use crate::plugins::coprocessor::EXTERNAL_SPAN_NAME; use crate::services::execution; /// What information is passed to a router request/response stage -#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize, JsonSchema)] +#[derive(Clone, Debug, Default, Deserialize, PartialEq, JsonSchema)] #[serde(default, deny_unknown_fields)] pub(super) struct ExecutionRequestConf { /// Send the headers @@ -37,7 +36,7 @@ pub(super) struct ExecutionRequestConf { } /// What information is passed to a router request/response stage -#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize, JsonSchema)] +#[derive(Clone, Debug, Default, Deserialize, PartialEq, JsonSchema)] #[serde(default, deny_unknown_fields)] pub(super) struct ExecutionResponseConf { /// Send the headers @@ -52,7 +51,7 @@ pub(super) struct ExecutionResponseConf { pub(super) status_code: bool, } -#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize, JsonSchema)] +#[derive(Clone, Debug, Default, Deserialize, PartialEq, JsonSchema)] #[serde(default)] pub(super) struct ExecutionStage { /// The request configuration diff --git a/apollo-router/src/plugins/coprocessor/mod.rs b/apollo-router/src/plugins/coprocessor/mod.rs index 340f0af422..6d0b881324 100644 --- a/apollo-router/src/plugins/coprocessor/mod.rs +++ b/apollo-router/src/plugins/coprocessor/mod.rs @@ -24,7 +24,6 @@ use hyper_util::client::legacy::connect::HttpConnector; use hyper_util::rt::TokioExecutor; use schemars::JsonSchema; use serde::Deserialize; -use serde::Serialize; use tower::BoxError; use tower::Service; use tower::ServiceBuilder; @@ -96,9 +95,6 @@ impl Plugin for CoprocessorPlugin { http_connector.set_keepalive(Some(std::time::Duration::from_secs(60))); http_connector.enforce_http(false); - // Enable crypto - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); - let tls_config = rustls::ClientConfig::builder() .with_native_roots()? .with_no_client_auth(); @@ -115,6 +111,71 @@ impl Plugin for CoprocessorPlugin { builder.wrap_connector(http_connector) }; + if matches!( + init.config.router.request.context, + ContextConf::Deprecated(true) + ) { + tracing::warn!( + "Configuration `coprocessor.router.request.context: true` is deprecated. See https://go.apollo.dev/o/coprocessor-context" + ); + } + if matches!( + init.config.router.response.context, + ContextConf::Deprecated(true) + ) { + tracing::warn!( + "Configuration `coprocessor.router.response.context: true` is deprecated. See https://go.apollo.dev/o/coprocessor-context" + ); + } + if matches!( + init.config.supergraph.request.context, + ContextConf::Deprecated(true) + ) { + tracing::warn!( + "Configuration `coprocessor.supergraph.request.context: true` is deprecated. See https://go.apollo.dev/o/coprocessor-context" + ); + } + if matches!( + init.config.supergraph.response.context, + ContextConf::Deprecated(true) + ) { + tracing::warn!( + "Configuration `coprocessor.supergraph.response.context: true` is deprecated. See https://go.apollo.dev/o/coprocessor-context" + ); + } + if matches!( + init.config.execution.request.context, + ContextConf::Deprecated(true) + ) { + tracing::warn!( + "Configuration `coprocessor.execution.request.context: true` is deprecated. See https://go.apollo.dev/o/coprocessor-context" + ); + } + if matches!( + init.config.execution.response.context, + ContextConf::Deprecated(true) + ) { + tracing::warn!( + "Configuration `coprocessor.execution.response.context: true` is deprecated. See https://go.apollo.dev/o/coprocessor-context" + ); + } + if matches!( + init.config.subgraph.all.request.context, + ContextConf::Deprecated(true) + ) { + tracing::warn!( + "Configuration `coprocessor.subgraph.all.request.context: true` is deprecated. See https://go.apollo.dev/o/coprocessor-context" + ); + } + if matches!( + init.config.subgraph.all.response.context, + ContextConf::Deprecated(true) + ) { + tracing::warn!( + "Configuration `coprocessor.subgraph.all.response.context: true` is deprecated. See https://go.apollo.dev/o/coprocessor-context" + ); + } + let http_client = ServiceBuilder::new() .map_response( |http_response: http::Response| -> http::Response { @@ -247,11 +308,10 @@ where } } /// What information is passed to a router request/response stage -#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize, JsonSchema)] +#[derive(Clone, Debug, Default, Deserialize, PartialEq, JsonSchema)] #[serde(default, deny_unknown_fields)] pub(super) struct RouterRequestConf { /// Condition to trigger this stage - #[serde(skip_serializing)] pub(super) condition: Option>, /// Send the headers pub(super) headers: bool, @@ -268,12 +328,11 @@ pub(super) struct RouterRequestConf { } /// What information is passed to a router request/response stage -#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize, JsonSchema)] +#[derive(Clone, Debug, Default, Deserialize, PartialEq, JsonSchema)] #[serde(default, deny_unknown_fields)] pub(super) struct RouterResponseConf { /// Condition to trigger this stage - #[serde(skip_serializing)] - pub(super) condition: Option>, + pub(super) condition: Condition, /// Send the headers pub(super) headers: bool, /// Send the context @@ -286,12 +345,11 @@ pub(super) struct RouterResponseConf { pub(super) status_code: bool, } /// What information is passed to a subgraph request/response stage -#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize, JsonSchema)] +#[derive(Clone, Debug, Default, Deserialize, PartialEq, JsonSchema)] #[serde(default, deny_unknown_fields)] pub(super) struct SubgraphRequestConf { /// Condition to trigger this stage - #[serde(skip_serializing)] - pub(super) condition: Option>, + pub(super) condition: Condition, /// Send the headers pub(super) headers: bool, /// Send the context @@ -309,12 +367,11 @@ pub(super) struct SubgraphRequestConf { } /// What information is passed to a subgraph request/response stage -#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize, JsonSchema)] +#[derive(Clone, Debug, Default, Deserialize, PartialEq, JsonSchema)] #[serde(default, deny_unknown_fields)] pub(super) struct SubgraphResponseConf { /// Condition to trigger this stage - #[serde(skip_serializing)] - pub(super) condition: Option>, + pub(super) condition: Condition, /// Send the headers pub(super) headers: bool, /// Send the context @@ -356,7 +413,7 @@ struct Conf { } /// Configures the context -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] +#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq)] #[serde(deny_unknown_fields, untagged)] pub(super) enum ContextConf { /// Deprecated configuration using a boolean @@ -371,7 +428,7 @@ impl Default for ContextConf { } /// Configures the context -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq)] +#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq)] #[serde(deny_unknown_fields, rename_all = "snake_case")] pub(super) enum NewContextConf { /// Send all context keys to coprocessor @@ -427,7 +484,7 @@ fn record_coprocessor_duration(stage: PipelineStep, duration: Duration) { ); } -#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize, JsonSchema)] +#[derive(Clone, Debug, Default, Deserialize, PartialEq, JsonSchema)] #[serde(default)] pub(super) struct RouterStage { /// The request configuration @@ -554,7 +611,7 @@ impl RouterStage { // ----------------------------------------------------------------------------------------- /// What information is passed to a subgraph request/response stage -#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize, JsonSchema)] +#[derive(Clone, Debug, Default, Deserialize, PartialEq, JsonSchema)] #[serde(default, deny_unknown_fields)] pub(super) struct SubgraphStages { #[serde(default)] @@ -562,7 +619,7 @@ pub(super) struct SubgraphStages { } /// What information is passed to a subgraph request/response stage -#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize, JsonSchema)] +#[derive(Clone, Debug, Default, Deserialize, PartialEq, JsonSchema)] #[serde(default, deny_unknown_fields)] pub(super) struct SubgraphStage { #[serde(default)] @@ -871,12 +928,7 @@ where + 'static, >>::Future: Send + 'static, { - let should_be_executed = response_config - .condition - .as_ref() - .map(|c| c.evaluate_response(&response)) - .unwrap_or(true); - if !should_be_executed { + if !response_config.condition.evaluate_response(&response) { return Ok(response); } // split the response into parts + body @@ -1075,12 +1127,7 @@ where + 'static, >>::Future: Send + 'static, { - let should_be_executed = request_config - .condition - .as_mut() - .map(|c| c.evaluate_request(&request) == Some(true)) - .unwrap_or(true); - if !should_be_executed { + if request_config.condition.evaluate_request(&request) != Some(true) { return Ok(ControlFlow::Continue(request)); } // Call into our out of process processor with a body of our body @@ -1241,12 +1288,7 @@ where + 'static, >>::Future: Send + 'static, { - let should_be_executed = response_config - .condition - .as_ref() - .map(|c| c.evaluate_response(&response)) - .unwrap_or(true); - if !should_be_executed { + if !response_config.condition.evaluate_response(&response) { return Ok(response); } // Call into our out of process processor with a body of our body diff --git a/apollo-router/src/plugins/coprocessor/supergraph.rs b/apollo-router/src/plugins/coprocessor/supergraph.rs index 565675c548..567116dbc3 100644 --- a/apollo-router/src/plugins/coprocessor/supergraph.rs +++ b/apollo-router/src/plugins/coprocessor/supergraph.rs @@ -5,7 +5,6 @@ use futures::future; use futures::stream; use schemars::JsonSchema; use serde::Deserialize; -use serde::Serialize; use tower::BoxError; use tower::ServiceBuilder; use tower_service::Service; @@ -21,12 +20,11 @@ use crate::plugins::telemetry::config_new::supergraph::selectors::SupergraphSele use crate::services::supergraph; /// What information is passed to a router request/response stage -#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize, JsonSchema)] +#[derive(Clone, Debug, Default, Deserialize, PartialEq, JsonSchema)] #[serde(default, deny_unknown_fields)] pub(super) struct SupergraphRequestConf { /// Condition to trigger this stage - #[serde(skip_serializing)] - pub(super) condition: Option>, + pub(super) condition: Condition, /// Send the headers pub(super) headers: bool, /// Send the context @@ -40,12 +38,11 @@ pub(super) struct SupergraphRequestConf { } /// What information is passed to a router request/response stage -#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize, JsonSchema)] +#[derive(Clone, Debug, Default, Deserialize, PartialEq, JsonSchema)] #[serde(default, deny_unknown_fields)] pub(super) struct SupergraphResponseConf { /// Condition to trigger this stage - #[serde(skip_serializing)] - pub(super) condition: Option>, + pub(super) condition: Condition, /// Send the headers pub(super) headers: bool, /// Send the context @@ -58,7 +55,7 @@ pub(super) struct SupergraphResponseConf { pub(super) status_code: bool, } -#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize, JsonSchema)] +#[derive(Clone, Debug, Default, Deserialize, PartialEq, JsonSchema)] #[serde(default)] pub(super) struct SupergraphStage { /// The request configuration @@ -198,12 +195,7 @@ where + 'static, >>::Future: Send + 'static, { - let should_be_executed = request_config - .condition - .as_mut() - .map(|c| c.evaluate_request(&request) == Some(true)) - .unwrap_or(true); - if !should_be_executed { + if request_config.condition.evaluate_request(&request) != Some(true) { return Ok(ControlFlow::Continue(request)); } // Call into our out of process processor with a body of our body @@ -349,12 +341,7 @@ where + 'static, >>::Future: Send + 'static, { - let should_be_executed = response_config - .condition - .as_ref() - .map(|c| c.evaluate_response(&response)) - .unwrap_or(true); - if !should_be_executed { + if !response_config.condition.evaluate_response(&response) { return Ok(response); } // split the response into parts + body @@ -447,9 +434,7 @@ where let generator_id = map_context.id.clone(); let should_be_executed = response_config .condition - .as_ref() - .map(|c| c.evaluate_event_response(&deferred_response, &map_context)) - .unwrap_or(true); + .evaluate_event_response(&deferred_response, &map_context); let response_config_context = response_config.context.clone(); async move { if !should_be_executed { @@ -743,8 +728,7 @@ mod tests { default: None, }), SelectorOrValue::Value("value".to_string().into()), - ]) - .into(), + ]), headers: false, context: ContextConf::Deprecated(false), body: true, @@ -1130,8 +1114,7 @@ mod tests { is_primary_response: true, }), SelectorOrValue::Value(true.into()), - ]) - .into(), + ]), headers: true, context: ContextConf::NewContextConf(NewContextConf::All), body: true, diff --git a/apollo-router/src/plugins/coprocessor/test.rs b/apollo-router/src/plugins/coprocessor/test.rs index 1c6c4c2951..a488e6ec06 100644 --- a/apollo-router/src/plugins/coprocessor/test.rs +++ b/apollo-router/src/plugins/coprocessor/test.rs @@ -820,8 +820,7 @@ mod tests { default: None, }), SelectorOrValue::Value("value".to_string().into()), - ]) - .into(), + ]), body: true, ..Default::default() }, @@ -1451,8 +1450,7 @@ mod tests { response_context: String::from("context_value"), redact: None, default: None, - }) - .into(), + }), body: true, ..Default::default() }, diff --git a/apollo-router/src/plugins/mod.rs b/apollo-router/src/plugins/mod.rs index dbb6672b37..61510ba818 100644 --- a/apollo-router/src/plugins/mod.rs +++ b/apollo-router/src/plugins/mod.rs @@ -24,6 +24,7 @@ pub(crate) mod authentication; pub(crate) mod authorization; pub(crate) mod cache; pub(crate) mod connectors; +pub(crate) mod content_negotiation; mod coprocessor; pub(crate) mod csrf; pub(crate) mod demand_control; diff --git a/apollo-router/src/plugins/telemetry/config_new/router/events.rs b/apollo-router/src/plugins/telemetry/config_new/router/events.rs index 8f67e343ea..b0daed3fa2 100644 --- a/apollo-router/src/plugins/telemetry/config_new/router/events.rs +++ b/apollo-router/src/plugins/telemetry/config_new/router/events.rs @@ -22,7 +22,7 @@ use crate::services::router; #[derive(Clone)] pub(crate) struct DisplayRouterRequest(pub(crate) EventLevel); #[derive(Default, Clone)] -pub(crate) struct DisplayRouterResponse(pub(crate) bool); +pub(crate) struct DisplayRouterResponse; #[derive(Default, Clone)] pub(crate) struct RouterResponseBodyExtensionType(pub(crate) String); @@ -44,7 +44,7 @@ impl CustomEvents Result, BoxError> { let _ = tracing_subscriber::fmt::try_init(); - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); + let mut plugin = create_plugin().await?; // Replace the apollo metrics sender so we can test metrics collection. let (tx, rx) = tokio::sync::mpsc::channel(100); @@ -434,7 +434,6 @@ mod test { } fn create_plugin() -> impl Future> { - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); create_plugin_with_apollo_config(apollo::Config { endpoint: Url::parse(ENDPOINT_DEFAULT).expect("default endpoint must be parseable"), apollo_key: Some("key".to_string()), diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index 10295418c9..f92040e1ad 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -437,17 +437,13 @@ impl PluginPrivate for Telemetry { .attributes .on_request(request); - custom_attributes.extend([ - KeyValue::new(CLIENT_NAME_KEY, client_name.unwrap_or("").to_string()), - KeyValue::new(CLIENT_VERSION_KEY, client_version.unwrap_or("").to_string()), - KeyValue::new( - Key::from_static_str("apollo_private.http.request_headers"), - filter_headers( - request.router_request.headers(), - &config_request.apollo.send_headers, - ), + custom_attributes.push(KeyValue::new( + Key::from_static_str("apollo_private.http.request_headers"), + filter_headers( + request.router_request.headers(), + &config_request.apollo.send_headers, ), - ]); + )); let custom_instruments: RouterInstruments = config_request .instrumentation @@ -466,7 +462,7 @@ impl PluginPrivate for Telemetry { request.context.clone(), ) }, - move |(custom_attributes, custom_instruments, mut custom_events, ctx): ( + move |(mut custom_attributes, custom_instruments, mut custom_events, ctx): ( Vec, RouterInstruments, RouterEvents, @@ -480,6 +476,20 @@ impl PluginPrivate for Telemetry { Self::plugin_metrics(&config); async move { + // NB: client name and version must be picked up here, rather than in the + // `req_fn` of this `map_future_with_request_data` call, to allow plugins + // at the router service to modify the name and version. + let get_from_context = + |ctx: &Context, key| ctx.get::<&str, String>(key).ok().flatten(); + let client_name = get_from_context(&ctx, CLIENT_NAME) + .or_else(|| get_from_context(&ctx, DEPRECATED_CLIENT_NAME)); + let client_version = get_from_context(&ctx, CLIENT_VERSION) + .or_else(|| get_from_context(&ctx, DEPRECATED_CLIENT_VERSION)); + custom_attributes.extend([ + KeyValue::new(CLIENT_NAME_KEY, client_name.unwrap_or_default()), + KeyValue::new(CLIENT_VERSION_KEY, client_version.unwrap_or_default()), + ]); + let span = Span::current(); span.set_span_dyn_attributes(custom_attributes); let response: Result = fut.await; diff --git a/apollo-router/src/plugins/telemetry/span_factory.rs b/apollo-router/src/plugins/telemetry/span_factory.rs index cf9cccfecc..1eb81c83ac 100644 --- a/apollo-router/src/plugins/telemetry/span_factory.rs +++ b/apollo-router/src/plugins/telemetry/span_factory.rs @@ -44,7 +44,7 @@ impl SpanMode { REQUEST_SPAN_NAME, "http.method" = %request.method(), "http.request.method" = %request.method(), - "http.route" = %request.uri(), + "http.route" = %request.uri().path(), "http.flavor" = ?request.version(), "http.status" = 500, // This prevents setting later "otel.name" = ::tracing::field::Empty, @@ -59,7 +59,7 @@ impl SpanMode { REQUEST_SPAN_NAME, "http.method" = %request.method(), "http.request.method" = %request.method(), - "http.route" = %request.uri(), + "http.route" = %request.uri().path(), "http.flavor" = ?request.version(), "otel.name" = ::tracing::field::Empty, "otel.kind" = "SERVER", @@ -84,7 +84,7 @@ impl SpanMode { let span = info_span!(ROUTER_SPAN_NAME, "http.method" = %request.method(), "http.request.method" = %request.method(), - "http.route" = %request.uri(), + "http.route" = %request.uri().path(), "http.flavor" = ?request.version(), "trace_id" = %trace_id, "client.name" = ::tracing::field::Empty, @@ -100,7 +100,7 @@ impl SpanMode { SpanMode::SpecCompliant => { info_span!(ROUTER_SPAN_NAME, // Needed for apollo_telemetry and datadog span mapping - "http.route" = %request.uri(), + "http.route" = %request.uri().path(), "http.request.method" = %request.method(), "otel.name" = ::tracing::field::Empty, "otel.kind" = "SERVER", @@ -230,3 +230,104 @@ impl SpanMode { } } } + +#[cfg(test)] +mod tests { + use tracing_mock::expect; + use tracing_mock::subscriber; + + use crate::plugins::telemetry::SpanMode; + use crate::plugins::telemetry::consts::REQUEST_SPAN_NAME; + use crate::plugins::telemetry::consts::ROUTER_SPAN_NAME; + use crate::uplink::license_enforcement::LicenseState; + + #[test] + fn test_specific_span() { + // NB: this test checks the behavior of tracing_mock for a specific span. + // Most tests should probably follow the pattern of `test_http_route_on_array_of_router_spans` + // where they check a behavior across a variety of parameters. + let request = http::Request::builder() + .method("GET") + .uri("http://example.com/path/to/location?with=query&another=UN1QU3_query") + .header("apollographql-client-name", "client") + .body("useful info") + .unwrap(); + + let expected_fields = expect::field("http.route") + .with_value(&tracing::field::display("/path/to/location")) + .and(expect::field("http.request.method").with_value(&tracing::field::display("GET"))) + .and(expect::field("otel.kind").with_value(&"SERVER")) + .and(expect::field("apollo_private.request").with_value(&true)); + + let expected_span = expect::span() + .named(ROUTER_SPAN_NAME) + .with_fields(expected_fields); + + let (subscriber, handle) = subscriber::mock() + .new_span(expected_span) + .enter(ROUTER_SPAN_NAME) + .event(expect::event()) + .exit(ROUTER_SPAN_NAME) + .run_with_handle(); + tracing::subscriber::with_default(subscriber, || { + let span = SpanMode::SpecCompliant.create_router(&request); + let _guard = span.enter(); + tracing::info!("an event happened!"); + }); + handle.assert_finished(); + } + + #[test] + fn test_http_route_on_array_of_router_spans() { + let expected_routes = [ + ("https://www.example.com/", "/"), + ("https://www.example.com/path", "/path"), + ("http://example.com/path/to/location", "/path/to/location"), + ("http://www.example.com/path?with=query", "/path"), + ("/foo/bar?baz", "/foo/bar"), + ]; + + let span_modes = [SpanMode::SpecCompliant, SpanMode::Deprecated]; + let license_states = [ + LicenseState::LicensedHalt { limits: None }, + LicenseState::Unlicensed, + ]; + + for (uri, expected_route) in expected_routes { + let request = http::Request::builder().uri(uri).body("").unwrap(); + + // test `request` spans + for license_state in license_states { + let expected_span = expect::span().named(REQUEST_SPAN_NAME).with_fields( + expect::field("http.route") + .with_value(&tracing::field::display(expected_route)), + ); + + let span_mode = SpanMode::Deprecated; + let (subscriber, handle) = + subscriber::mock().new_span(expected_span).run_with_handle(); + tracing::subscriber::with_default(subscriber, || { + let span = span_mode.create_request(&request, license_state); + let _guard = span.enter(); + }); + handle.assert_finished(); + } + + // test `router` spans + for span_mode in span_modes { + let expected_span = expect::span().named(ROUTER_SPAN_NAME).with_fields( + expect::field("http.route") + .with_value(&tracing::field::display(expected_route)), + ); + + let (subscriber, handle) = + subscriber::mock().new_span(expected_span).run_with_handle(); + tracing::subscriber::with_default(subscriber, || { + let span = span_mode.create_router(&request); + let _guard = span.enter(); + }); + handle.assert_finished(); + } + } + } +} diff --git a/apollo-router/src/plugins/traffic_shaping/deduplication.rs b/apollo-router/src/plugins/traffic_shaping/deduplication.rs index 0a07316c4f..235302b355 100644 --- a/apollo-router/src/plugins/traffic_shaping/deduplication.rs +++ b/apollo-router/src/plugins/traffic_shaping/deduplication.rs @@ -124,35 +124,39 @@ where let authorization_cache_key = request.authorization.clone(); let id = request.id.clone(); let cache_key = ((&request.subgraph_request).into(), authorization_cache_key); - let res = { + let (res, handle) = { // when _drop_signal is dropped, either by getting out of the block, returning // the error from ready_oneshot or by cancellation, the drop_sentinel future will // return with Err(), then we remove the entry from the wait map let (_drop_signal, drop_sentinel) = oneshot::channel::<()>(); - tokio::task::spawn(async move { + let handle = tokio::task::spawn(async move { let _ = drop_sentinel.await; let mut locked_wait_map = wait_map.lock().await; locked_wait_map.remove(&cache_key); }); - service.call(request).await.map(CloneSubgraphResponse) + ( + service.call(request).await.map(CloneSubgraphResponse), + handle, + ) }; - // Let our waiters know - - // Clippy is wrong, the suggestion adds a useless clone of the error - #[allow(clippy::useless_asref)] - let broadcast_value = res - .as_ref() - .map(|response| response.clone()) - .map_err(|e| e.to_string()); + // Make sure that our spawned task has completed. Ignore the result to preserve + // existing behaviour. + let _ = handle.await; + // At this point we have removed ourselves from the wait_map, so we won't get + // any more receivers. If we have any receivers, let them know + if tx.receiver_count() > 0 { + // Clippy is wrong, the suggestion adds a useless clone of the error + #[allow(clippy::useless_asref)] + let broadcast_value = res + .as_ref() + .map(|response| response.clone()) + .map_err(|e: &BoxError| e.to_string()); - // We may get errors here, for instance if a task is cancelled, - // so just ignore the result of send - let _ = tokio::task::spawn_blocking(move || { - tx.send(broadcast_value) - }).await - .expect("can only fail if the task is aborted or if the internal code panics, neither is possible here; qed"); + // Ignore the result of send, receivers may drop... + let _ = tx.send(broadcast_value); + } return res.map(|response| { SubgraphResponse::new_from_response( @@ -197,3 +201,104 @@ where } } } + +#[cfg(test)] +mod tests { + + use std::sync::Arc; + use std::sync::atomic::AtomicU8; + use std::sync::atomic::Ordering; + use std::time::Duration; + + use tower::Service; + use tower::ServiceExt; + + use super::QueryDeduplicationService; + use crate::plugin::test::MockSubgraphService; + use crate::services::SubgraphRequest; + use crate::services::SubgraphResponse; + + // Testing strategy: + // - We make our subgraph invocations slow (100ms) to increase our chance of a positive dedup + // result + // - We count how many times our inner service is invoked across all service invocations + // - We never know exactly which inner service is going to be invoked (since we are driving + // the service requests concurrently and in parallel), so we set times to 0..2 (== 0 or 1) + // for each expectation. + // - Every time an inner service is invoked we increment our shared counter. + // - If our shared counter == 1 at the end, then our test passes. + // + // Note: If this test starts to fail it may be because we need to increase the sleep time for + // each inner service above 100ms. + // + #[tokio::test(flavor = "multi_thread")] + async fn test_dedup_service() { + let mut mock = MockSubgraphService::new(); + + let inner_invocation_count = Arc::new(AtomicU8::new(0)); + let inner_invocation_count_1 = inner_invocation_count.clone(); + let inner_invocation_count_2 = inner_invocation_count.clone(); + let inner_invocation_count_3 = inner_invocation_count.clone(); + + mock.expect_clone().returning(move || { + let mut mock = MockSubgraphService::new(); + + let inner_invocation_count_1 = inner_invocation_count_1.clone(); + mock.expect_clone().returning(move || { + let mut mock = MockSubgraphService::new(); + let inner_invocation_count_1 = inner_invocation_count_1.clone(); + mock.expect_call() + .times(0..2) + .returning(move |req: SubgraphRequest| { + std::thread::sleep(Duration::from_millis(100)); + inner_invocation_count_1.fetch_add(1, Ordering::Relaxed); + Ok(SubgraphResponse::fake_builder() + .context(req.context) + .build()) + }); + mock + }); + let inner_invocation_count_2 = inner_invocation_count_2.clone(); + mock.expect_call() + .times(0..2) + .returning(move |req: SubgraphRequest| { + std::thread::sleep(Duration::from_millis(100)); + inner_invocation_count_2.fetch_add(1, Ordering::Relaxed); + Ok(SubgraphResponse::fake_builder() + .context(req.context) + .build()) + }); + mock + }); + mock.expect_call() + .times(0..2) + .returning(move |req: SubgraphRequest| { + std::thread::sleep(Duration::from_millis(100)); + inner_invocation_count_3.fetch_add(1, Ordering::Relaxed); + Ok(SubgraphResponse::fake_builder() + .context(req.context) + .build()) + }); + + let mut svc = QueryDeduplicationService::new(mock); + + let request = SubgraphRequest::fake_builder().build(); + + // Spawn our service invocations so they execute in parallel + let fut1 = tokio::spawn( + svc.ready() + .await + .expect("it is ready") + .call(request.clone()), + ); + let fut2 = tokio::spawn(svc.ready().await.expect("it is ready").call(request)); + let (res1, res2) = tokio::join!(fut1, fut2); + + // We don't care about our actual request/responses, we just want to make sure that + // deduplication occurs... + res1.expect("fut1 spawned").expect("fut1 joined"); + res2.expect("fut2 spawned").expect("fut2 joined"); + + assert_eq!(1, inner_invocation_count.load(Ordering::Relaxed)); + } +} diff --git a/apollo-router/src/router_factory.rs b/apollo-router/src/router_factory.rs index 0c9239cbb0..0e7e083977 100644 --- a/apollo-router/src/router_factory.rs +++ b/apollo-router/src/router_factory.rs @@ -770,6 +770,7 @@ pub(crate) async fn create_plugins( add_optional_apollo_plugin!("preview_entity_cache"); add_mandatory_apollo_plugin!("progressive_override"); add_optional_apollo_plugin!("demand_control"); + add_mandatory_apollo_plugin!("content_negotiation"); // has to follow file_uploads // This relative ordering is documented in `docs/source/customizations/native.mdx`: add_optional_apollo_plugin!("connectors"); diff --git a/apollo-router/src/services/http/service.rs b/apollo-router/src/services/http/service.rs index 5eac48f348..1e07c2d69b 100644 --- a/apollo-router/src/services/http/service.rs +++ b/apollo-router/src/services/http/service.rs @@ -236,9 +236,6 @@ pub(crate) fn generate_tls_client_config( tls_cert_store: RootCertStore, client_cert_config: Option<&TlsClientAuth>, ) -> Result { - // Enable crypto - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); - let tls_builder = rustls::ClientConfig::builder(); Ok(match client_cert_config { diff --git a/apollo-router/src/services/http/tests.rs b/apollo-router/src/services/http/tests.rs index d1ebbef9e0..6af993d171 100644 --- a/apollo-router/src/services/http/tests.rs +++ b/apollo-router/src/services/http/tests.rs @@ -56,9 +56,6 @@ async fn tls_server( key: PrivateKeyDer<'static>, body: &'static str, ) { - // Enable crypto - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); - let tls_config = Arc::new( ServerConfig::builder() .with_no_client_auth() @@ -446,8 +443,6 @@ async fn tls_server_with_client_auth( #[tokio::test(flavor = "multi_thread")] async fn tls_client_auth() { - // Enable crypto - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); let server_certificate_pem = include_str!("./testdata/server.crt"); let ca_pem = include_str!("./testdata/CA/ca.crt"); let server_key_pem = include_str!("./testdata/server.key"); @@ -522,8 +517,6 @@ async fn tls_client_auth() { #[tokio::test(flavor = "multi_thread")] async fn tls_client_auth_connector() { - // Enable crypto - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); let server_certificate_pem = include_str!("./testdata/server.crt"); let ca_pem = include_str!("./testdata/CA/ca.crt"); let server_key_pem = include_str!("./testdata/server.key"); @@ -618,9 +611,6 @@ async fn emulate_h2c_server(listener: TcpListener) { #[tokio::test(flavor = "multi_thread")] async fn test_subgraph_h2c() { - // Enable crypto - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let socket_addr = listener.local_addr().unwrap(); tokio::task::spawn(emulate_h2c_server(listener)); @@ -703,7 +693,6 @@ async fn emulate_subgraph_compressed_response(listener: TcpListener) { #[tokio::test(flavor = "multi_thread")] async fn test_compressed_request_response_body() { // Though the server doesn't use TLS, the client still supports it, and so we need crypto stuff - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let socket_addr = listener.local_addr().unwrap(); diff --git a/apollo-router/src/services/layers/apq.rs b/apollo-router/src/services/layers/apq.rs index 7c6ecb1718..8ff9a96600 100644 --- a/apollo-router/src/services/layers/apq.rs +++ b/apollo-router/src/services/layers/apq.rs @@ -261,7 +261,7 @@ mod apq_tests { use crate::Context; use crate::error::Error; use crate::graphql::Response; - use crate::services::router::ClientRequestAccepts; + use crate::plugins::content_negotiation::ClientRequestAccepts; use crate::services::router::service::from_supergraph_mock_callback; use crate::services::router::service::from_supergraph_mock_callback_and_configuration; diff --git a/apollo-router/src/services/layers/content_negotiation.rs b/apollo-router/src/services/layers/content_negotiation.rs deleted file mode 100644 index 84261a5513..0000000000 --- a/apollo-router/src/services/layers/content_negotiation.rs +++ /dev/null @@ -1,316 +0,0 @@ -//! Layers that do HTTP content negotiation using the Accept and Content-Type headers. -//! -//! Content negotiation uses a pair of layers that work together at the router and supergraph stages. - -use std::ops::ControlFlow; - -use http::HeaderMap; -use http::Method; -use http::StatusCode; -use http::header::ACCEPT; -use http::header::CONTENT_TYPE; -use mediatype::MediaTypeList; -use mediatype::ReadParams; -use mediatype::names::_STAR; -use mediatype::names::APPLICATION; -use mediatype::names::JSON; -use mediatype::names::MIXED; -use mediatype::names::MULTIPART; -use mime::APPLICATION_JSON; -use tower::BoxError; -use tower::Layer; -use tower::Service; -use tower::ServiceExt; - -use crate::graphql; -use crate::layers::ServiceExt as _; -use crate::layers::sync_checkpoint::CheckpointService; -use crate::services::APPLICATION_JSON_HEADER_VALUE; -use crate::services::MULTIPART_DEFER_ACCEPT; -use crate::services::MULTIPART_DEFER_SPEC_PARAMETER; -use crate::services::MULTIPART_DEFER_SPEC_VALUE; -use crate::services::MULTIPART_SUBSCRIPTION_ACCEPT; -use crate::services::MULTIPART_SUBSCRIPTION_SPEC_PARAMETER; -use crate::services::MULTIPART_SUBSCRIPTION_SPEC_VALUE; -use crate::services::router; -use crate::services::router::ClientRequestAccepts; -use crate::services::router::service::MULTIPART_DEFER_CONTENT_TYPE_HEADER_VALUE; -use crate::services::router::service::MULTIPART_SUBSCRIPTION_CONTENT_TYPE_HEADER_VALUE; -use crate::services::supergraph; - -pub(crate) const GRAPHQL_JSON_RESPONSE_HEADER_VALUE: &str = "application/graphql-response+json"; - -/// A layer for the router service that rejects requests that do not have an expected Content-Type, -/// or that have an Accept header that is not supported by the router. -/// -/// In particular, the Content-Type must be JSON, and the Accept header must include */*, or one of -/// the JSON/GraphQL MIME types. -/// -/// # Context -/// If the request is valid, this layer adds a [`ClientRequestAccepts`] value to the context. -#[derive(Clone, Default)] -pub(crate) struct RouterLayer {} - -impl Layer for RouterLayer -where - S: Service + Send + 'static, - >::Future: Send + 'static, -{ - type Service = CheckpointService; - - fn layer(&self, service: S) -> Self::Service { - CheckpointService::new( - move |req| { - if req.router_request.method() != Method::GET - && !content_type_is_json(req.router_request.headers()) - { - let response = http::Response::builder() - .status(StatusCode::UNSUPPORTED_MEDIA_TYPE) - .header(CONTENT_TYPE, APPLICATION_JSON.essence_str()) - .body(router::body::from_bytes( - serde_json::json!({ - "errors": [ - graphql::Error::builder() - .message(format!( - r#"'content-type' header must be one of: {:?} or {:?}"#, - APPLICATION_JSON.essence_str(), - GRAPHQL_JSON_RESPONSE_HEADER_VALUE, - )) - .extension_code("INVALID_CONTENT_TYPE_HEADER") - .build() - ] - }) - .to_string(), - )) - .expect("cannot fail"); - - return Ok(ControlFlow::Break(response.into())); - } - - let accepts = parse_accept(req.router_request.headers()); - - if accepts.wildcard - || accepts.multipart_defer - || accepts.multipart_subscription - || accepts.json - { - req.context - .extensions() - .with_lock(|lock| lock.insert(accepts)); - - Ok(ControlFlow::Continue(req)) - } else { - let response = http::Response::builder() - .status(StatusCode::NOT_ACCEPTABLE) - .header(CONTENT_TYPE, APPLICATION_JSON.essence_str()) - .body(router::body::from_bytes( - serde_json::json!({ - "errors": [ - graphql::Error::builder() - .message(format!( - r#"'accept' header must be one of: \"*/*\", {:?}, {:?}, {:?} or {:?}"#, - APPLICATION_JSON.essence_str(), - GRAPHQL_JSON_RESPONSE_HEADER_VALUE, - MULTIPART_SUBSCRIPTION_ACCEPT, - MULTIPART_DEFER_ACCEPT - )) - .extension_code("INVALID_ACCEPT_HEADER") - .build() - ] - }) - .to_string() - )).expect("cannot fail"); - - Ok(ControlFlow::Break(response.into())) - } - }, - service, - ) - } -} - -/// A layer for the supergraph service that populates the Content-Type response header. -/// -/// The content type is decided based on the [`ClientRequestAccepts`] context value, which is -/// populated by the content negotiation [`RouterLayer`]. -// XXX(@goto-bus-stop): this feels a bit odd. It probably works fine because we can only ever respond -// with JSON, but maybe this should be done as close as possible to where we populate the response body..? -#[derive(Clone, Default)] -pub(crate) struct SupergraphLayer {} - -impl Layer for SupergraphLayer -where - S: Service - + Send - + 'static, - >::Future: Send + 'static, -{ - type Service = supergraph::BoxService; - - fn layer(&self, service: S) -> Self::Service { - service - .map_first_graphql_response(|context, mut parts, res| { - let ClientRequestAccepts { - wildcard: accepts_wildcard, - json: accepts_json, - multipart_defer: accepts_multipart_defer, - multipart_subscription: accepts_multipart_subscription, - } = context.extensions().with_lock(|lock| { - lock.get::() - .cloned() - .unwrap_or_default() - }); - - if !res.has_next.unwrap_or_default() && (accepts_json || accepts_wildcard) { - parts - .headers - .insert(CONTENT_TYPE, APPLICATION_JSON_HEADER_VALUE.clone()); - } else if accepts_multipart_defer { - parts.headers.insert( - CONTENT_TYPE, - MULTIPART_DEFER_CONTENT_TYPE_HEADER_VALUE.clone(), - ); - } else if accepts_multipart_subscription { - parts.headers.insert( - CONTENT_TYPE, - MULTIPART_SUBSCRIPTION_CONTENT_TYPE_HEADER_VALUE.clone(), - ); - } - (parts, res) - }) - .boxed() - } -} - -/// Returns true if the headers content type is `application/json` or `application/graphql-response+json` -fn content_type_is_json(headers: &HeaderMap) -> bool { - headers.get_all(CONTENT_TYPE).iter().any(|value| { - value - .to_str() - .map(|accept_str| { - let mut list = MediaTypeList::new(accept_str); - - list.any(|mime| { - mime.as_ref() - .map(|mime| { - (mime.ty == APPLICATION && mime.subty == JSON) - || (mime.ty == APPLICATION - && mime.subty.as_str() == "graphql-response" - && mime.suffix == Some(JSON)) - }) - .unwrap_or(false) - }) - }) - .unwrap_or(false) - }) -} -// Clippy suggests `for mime in MediaTypeList::new(str).flatten()` but less indentation -// does not seem worth making it invisible that Result is involved. -#[allow(clippy::manual_flatten)] -/// Returns (accepts_json, accepts_wildcard, accepts_multipart) -fn parse_accept(headers: &HeaderMap) -> ClientRequestAccepts { - let mut header_present = false; - let mut accepts = ClientRequestAccepts::default(); - for value in headers.get_all(ACCEPT) { - header_present = true; - if let Ok(str) = value.to_str() { - for result in MediaTypeList::new(str) { - if let Ok(mime) = result { - if !accepts.json - && ((mime.ty == APPLICATION && mime.subty == JSON) - || (mime.ty == APPLICATION - && mime.subty.as_str() == "graphql-response" - && mime.suffix == Some(JSON))) - { - accepts.json = true - } - if !accepts.wildcard && (mime.ty == _STAR && mime.subty == _STAR) { - accepts.wildcard = true - } - if !accepts.multipart_defer && (mime.ty == MULTIPART && mime.subty == MIXED) { - let parameter = mediatype::Name::new(MULTIPART_DEFER_SPEC_PARAMETER) - .expect("valid name"); - let value = - mediatype::Value::new(MULTIPART_DEFER_SPEC_VALUE).expect("valid value"); - if mime.get_param(parameter) == Some(value) { - accepts.multipart_defer = true - } - } - if !accepts.multipart_subscription - && (mime.ty == MULTIPART && mime.subty == MIXED) - { - let parameter = mediatype::Name::new(MULTIPART_SUBSCRIPTION_SPEC_PARAMETER) - .expect("valid name"); - let value = mediatype::Value::new(MULTIPART_SUBSCRIPTION_SPEC_VALUE) - .expect("valid value"); - if mime.get_param(parameter) == Some(value) { - accepts.multipart_subscription = true - } - } - } - } - } - } - if !header_present { - accepts.json = true - } - accepts -} - -#[cfg(test)] -mod tests { - use http::HeaderValue; - - use super::*; - - #[test] - fn it_checks_accept_header() { - let mut default_headers = HeaderMap::new(); - default_headers.insert( - ACCEPT, - HeaderValue::from_static(APPLICATION_JSON.essence_str()), - ); - default_headers.append(ACCEPT, HeaderValue::from_static("foo/bar")); - let accepts = parse_accept(&default_headers); - assert!(accepts.json); - - let mut default_headers = HeaderMap::new(); - default_headers.insert(ACCEPT, HeaderValue::from_static("*/*")); - default_headers.append(ACCEPT, HeaderValue::from_static("foo/bar")); - let accepts = parse_accept(&default_headers); - assert!(accepts.wildcard); - - let mut default_headers = HeaderMap::new(); - // real life browser example - default_headers.insert(ACCEPT, HeaderValue::from_static("text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8")); - let accepts = parse_accept(&default_headers); - assert!(accepts.wildcard); - - let mut default_headers = HeaderMap::new(); - default_headers.insert( - ACCEPT, - HeaderValue::from_static(GRAPHQL_JSON_RESPONSE_HEADER_VALUE), - ); - default_headers.append(ACCEPT, HeaderValue::from_static("foo/bar")); - let accepts = parse_accept(&default_headers); - assert!(accepts.json); - - let mut default_headers = HeaderMap::new(); - default_headers.insert( - ACCEPT, - HeaderValue::from_static(GRAPHQL_JSON_RESPONSE_HEADER_VALUE), - ); - default_headers.append(ACCEPT, HeaderValue::from_static(MULTIPART_DEFER_ACCEPT)); - let accepts = parse_accept(&default_headers); - assert!(accepts.multipart_defer); - - // Multiple accepted types, including one with a parameter we are interested in - let mut default_headers = HeaderMap::new(); - default_headers.insert( - ACCEPT, - HeaderValue::from_static("multipart/mixed;subscriptionSpec=1.0, application/json"), - ); - let accepts = parse_accept(&default_headers); - assert!(accepts.multipart_subscription); - } -} diff --git a/apollo-router/src/services/layers/mod.rs b/apollo-router/src/services/layers/mod.rs index 0741a189bc..ce3b6826ac 100644 --- a/apollo-router/src/services/layers/mod.rs +++ b/apollo-router/src/services/layers/mod.rs @@ -1,7 +1,6 @@ //! Layers that are internal to the execution pipeline. pub(crate) mod allow_only_http_post_mutations; pub(crate) mod apq; -pub(crate) mod content_negotiation; pub(crate) mod persisted_queries; pub(crate) mod query_analysis; pub(crate) mod static_page; diff --git a/apollo-router/src/services/mod.rs b/apollo-router/src/services/mod.rs index cd4ab3589a..1c555b5fc3 100644 --- a/apollo-router/src/services/mod.rs +++ b/apollo-router/src/services/mod.rs @@ -71,16 +71,3 @@ pub(crate) fn apollo_key() -> Option { pub(crate) fn apollo_graph_reference() -> Option { APOLLO_GRAPH_REF.lock().clone() } - -// set the supported `@defer` specification version to https://github.com/graphql/graphql-spec/pull/742/commits/01d7b98f04810c9a9db4c0e53d3c4d54dbf10b82 -pub(crate) const MULTIPART_DEFER_SPEC_PARAMETER: &str = "deferSpec"; -pub(crate) const MULTIPART_DEFER_SPEC_VALUE: &str = "20220824"; -pub(crate) const MULTIPART_DEFER_ACCEPT: &str = "multipart/mixed;deferSpec=20220824"; -pub(crate) const MULTIPART_DEFER_CONTENT_TYPE: &str = - "multipart/mixed;boundary=\"graphql\";deferSpec=20220824"; - -pub(crate) const MULTIPART_SUBSCRIPTION_ACCEPT: &str = "multipart/mixed;subscriptionSpec=1.0"; -pub(crate) const MULTIPART_SUBSCRIPTION_CONTENT_TYPE: &str = - "multipart/mixed;boundary=\"graphql\";subscriptionSpec=1.0"; -pub(crate) const MULTIPART_SUBSCRIPTION_SPEC_PARAMETER: &str = "subscriptionSpec"; -pub(crate) const MULTIPART_SUBSCRIPTION_SPEC_VALUE: &str = "1.0"; diff --git a/apollo-router/src/services/router.rs b/apollo-router/src/services/router.rs index e4dd46cd48..f91e820fcc 100644 --- a/apollo-router/src/services/router.rs +++ b/apollo-router/src/services/router.rs @@ -22,13 +22,13 @@ use static_assertions::assert_impl_all; use tower::BoxError; use self::body::RouterBody; -use self::service::MULTIPART_DEFER_CONTENT_TYPE_HEADER_VALUE; -use self::service::MULTIPART_SUBSCRIPTION_CONTENT_TYPE_HEADER_VALUE; use super::supergraph; use crate::Context; use crate::graphql; use crate::http_ext::header_map; use crate::json_ext::Path; +use crate::plugins::content_negotiation::MULTIPART_DEFER_CONTENT_TYPE_HEADER_VALUE; +use crate::plugins::content_negotiation::MULTIPART_SUBSCRIPTION_CONTENT_TYPE_HEADER_VALUE; use crate::services::TryIntoHeaderName; use crate::services::TryIntoHeaderValue; @@ -396,14 +396,6 @@ impl Response { } } -#[derive(Clone, Default, Debug)] -pub(crate) struct ClientRequestAccepts { - pub(crate) multipart_defer: bool, - pub(crate) multipart_subscription: bool, - pub(crate) json: bool, - pub(crate) wildcard: bool, -} - impl From> for Response where T: http_body::Body + Send + 'static, diff --git a/apollo-router/src/services/router/service.rs b/apollo-router/src/services/router/service.rs index b9668cc76f..f64d9d743e 100644 --- a/apollo-router/src/services/router/service.rs +++ b/apollo-router/src/services/router/service.rs @@ -13,20 +13,17 @@ use futures::future::join_all; use futures::future::ready; use futures::stream::StreamExt; use futures::stream::once; -use http::HeaderMap; use http::HeaderName; use http::HeaderValue; use http::Method; use http::StatusCode; use http::header::CONTENT_TYPE; -use http::header::VARY; use http::request::Parts; use mime::APPLICATION_JSON; use multimap::MultiMap; use opentelemetry::KeyValue; use opentelemetry_semantic_conventions::trace::HTTP_REQUEST_METHOD; use tower::BoxError; -use tower::Layer; use tower::ServiceBuilder; use tower::ServiceExt; use tower::buffer::Buffer; @@ -34,7 +31,6 @@ use tower_service::Service; use tracing::Instrument; use super::Body; -use super::ClientRequestAccepts; use crate::Configuration; use crate::Context; use crate::Endpoint; @@ -54,6 +50,8 @@ use crate::metrics::count_operation_error_codes; use crate::metrics::count_operation_errors; #[cfg(test)] use crate::plugin::test::MockSupergraphService; +use crate::plugins::content_negotiation::ClientRequestAccepts; +use crate::plugins::content_negotiation::invalid_accept_header_response; use crate::plugins::telemetry::apollo::Config as ApolloTelemetryConfig; use crate::plugins::telemetry::apollo::ErrorsConfiguration; use crate::plugins::telemetry::config::Conf as TelemetryConfig; @@ -69,21 +67,14 @@ use crate::protocols::multipart::Multipart; use crate::protocols::multipart::ProtocolMode; use crate::query_planner::InMemoryCachePlanner; use crate::router_factory::RouterFactory; -use crate::services::APPLICATION_JSON_HEADER_VALUE; use crate::services::HasPlugins; use crate::services::HasSchema; -use crate::services::MULTIPART_DEFER_ACCEPT; -use crate::services::MULTIPART_DEFER_CONTENT_TYPE; -use crate::services::MULTIPART_SUBSCRIPTION_ACCEPT; -use crate::services::MULTIPART_SUBSCRIPTION_CONTENT_TYPE; use crate::services::RouterRequest; use crate::services::RouterResponse; use crate::services::SupergraphCreator; use crate::services::SupergraphRequest; use crate::services::SupergraphResponse; use crate::services::layers::apq::APQLayer; -use crate::services::layers::content_negotiation; -use crate::services::layers::content_negotiation::GRAPHQL_JSON_RESPONSE_HEADER_VALUE; use crate::services::layers::persisted_queries::PersistedQueryLayer; use crate::services::layers::query_analysis::QueryAnalysisLayer; use crate::services::layers::static_page::StaticPageLayer; @@ -94,13 +85,8 @@ use crate::services::router::pipeline_handle::PipelineRef; use crate::services::supergraph; use crate::spec::query::EXTENSIONS_VALUE_COMPLETION_KEY; -pub(crate) static MULTIPART_DEFER_CONTENT_TYPE_HEADER_VALUE: HeaderValue = - HeaderValue::from_static(MULTIPART_DEFER_CONTENT_TYPE); -pub(crate) static MULTIPART_SUBSCRIPTION_CONTENT_TYPE_HEADER_VALUE: HeaderValue = - HeaderValue::from_static(MULTIPART_SUBSCRIPTION_CONTENT_TYPE); static ACCEL_BUFFERING_HEADER_NAME: HeaderName = HeaderName::from_static("x-accel-buffering"); static ACCEL_BUFFERING_HEADER_VALUE: HeaderValue = HeaderValue::from_static("no"); -static ORIGIN_HEADER_VALUE: HeaderValue = HeaderValue::from_static("origin"); /// Containing [`Service`] in the request lifecyle. #[derive(Clone)] @@ -292,9 +278,6 @@ impl RouterService { }, }; - // XXX(@goto-bus-stop): *all* of the code using these `accepts_` variables looks like it - // duplicates what the content_negotiation::SupergraphLayer is doing. We should delete one - // or the other, and absolutely not do it inline here. let ClientRequestAccepts { wildcard: accepts_wildcard, json: accepts_json, @@ -306,13 +289,11 @@ impl RouterService { .unwrap_or_default(); // XXX(@goto-bus-stop): I strongly suspect that it would be better to move this into its own layer. - let display_router_response: DisplayRouterResponse = context + let display_router_response = context .extensions() - .with_lock(|lock| lock.get().cloned()) - .unwrap_or_default(); + .with_lock(|lock| lock.get::().is_some()); let (mut parts, mut body) = response.into_parts(); - process_vary_header(&mut parts.headers); if context .extensions() @@ -357,9 +338,6 @@ impl RouterService { ); } - parts - .headers - .insert(CONTENT_TYPE, APPLICATION_JSON_HEADER_VALUE.clone()); let body: Result = tracing::trace_span!("serialize_response") .in_scope(|| { let body = serde_json::to_string(&response)?; @@ -367,7 +345,7 @@ impl RouterService { }); let body = body?; - if display_router_response.0 { + if display_router_response { context.extensions().with_lock(|ext| { ext.insert(RouterResponseBodyExtensionType(body.clone())); }); @@ -378,18 +356,6 @@ impl RouterService { context, }) } else if accepts_multipart_defer || accepts_multipart_subscription { - if accepts_multipart_defer { - parts.headers.insert( - CONTENT_TYPE, - MULTIPART_DEFER_CONTENT_TYPE_HEADER_VALUE.clone(), - ); - } else if accepts_multipart_subscription { - parts.headers.insert( - CONTENT_TYPE, - MULTIPART_SUBSCRIPTION_CONTENT_TYPE_HEADER_VALUE.clone(), - ); - } - if !response.errors.is_empty() { count_operation_errors( &response.errors, @@ -403,23 +369,31 @@ impl RouterService { ACCEL_BUFFERING_HEADER_NAME.clone(), ACCEL_BUFFERING_HEADER_VALUE.clone(), ); - let response = match response.subscribed { - Some(true) => http::Response::from_parts( - parts, - router::body::from_result_stream(Multipart::new( - body, - ProtocolMode::Subscription, - )), - ), - _ => http::Response::from_parts( - parts, - router::body::from_result_stream(Multipart::new( - once(ready(response)).chain(body), - ProtocolMode::Defer, - )), - ), + + // NB: here is where we decide what kind of streaming response we're going to + // send. insert it into the extensions so that the content negotiation plugin + // can read it. + let protocol_mode = if matches!(response.subscribed, Some(true)) { + ProtocolMode::Subscription + } else { + ProtocolMode::Defer + }; + context + .extensions() + .with_lock(|lock| lock.insert(protocol_mode)); + + let response_multipart = match protocol_mode { + ProtocolMode::Subscription => Multipart::new(body, protocol_mode), + ProtocolMode::Defer => { + Multipart::new(once(ready(response)).chain(body), protocol_mode) + } }; + let response = http::Response::from_parts( + parts, + router::body::from_result_stream(response_multipart), + ); + Ok(RouterResponse { response, context }) } else { count_operation_error_codes( @@ -429,23 +403,7 @@ impl RouterService { ); // this should be unreachable due to a previous check, but just to be sure... - Ok(router::Response::error_builder() - .error( - graphql::Error::builder() - .message(format!( - r#"'accept' header must be one of: \"*/*\", {:?}, {:?}, {:?} or {:?}"#, - APPLICATION_JSON.essence_str(), - GRAPHQL_JSON_RESPONSE_HEADER_VALUE, - MULTIPART_DEFER_ACCEPT, - MULTIPART_SUBSCRIPTION_ACCEPT, - )) - .extension_code("INVALID_ACCEPT_HEADER") - .build(), - ) - .status_code(StatusCode::NOT_ACCEPTABLE) - .header(CONTENT_TYPE, APPLICATION_JSON.essence_str()) - .context(context) - .build()?) + Ok(invalid_accept_header_response().into()) } } } @@ -815,57 +773,59 @@ impl RouterService { parts: &Parts, body: Body, ) -> Result, bool), TranslateError>, BoxError> { - let graphql_requests: Result<(Vec, bool), TranslateError> = - if parts.method == Method::GET { - self.translate_query_request(parts).await - } else { - let bytes = router::body::into_bytes(body) - .instrument(tracing::debug_span!("receive_body")) - .await?; - if let Some(level) = context - .extensions() - .with_lock(|ext| ext.get::().cloned()) - .map(|d| d.0) - { - let mut attrs = Vec::with_capacity(5); - #[cfg(test)] - let mut headers: indexmap::IndexMap = parts - .headers - .clone() - .into_iter() - .filter_map(|(name, val)| Some((name?.to_string(), val))) - .collect(); - #[cfg(test)] - headers.sort_keys(); - #[cfg(not(test))] - let headers = &parts.headers; - - attrs.push(KeyValue::new( - HTTP_REQUEST_HEADERS, - opentelemetry::Value::String(format!("{:?}", headers).into()), - )); - attrs.push(KeyValue::new( - HTTP_REQUEST_METHOD, - opentelemetry::Value::String(format!("{}", parts.method).into()), - )); - attrs.push(KeyValue::new( - HTTP_REQUEST_URI, - opentelemetry::Value::String(format!("{}", parts.uri).into()), - )); - attrs.push(KeyValue::new( - HTTP_REQUEST_VERSION, - opentelemetry::Value::String(format!("{:?}", parts.version).into()), - )); - attrs.push(KeyValue::new( - HTTP_REQUEST_BODY, - opentelemetry::Value::String( - format!("{:?}", String::from_utf8_lossy(&bytes)).into(), - ), - )); - log_event(level, "router.request", attrs, ""); - } - self.translate_bytes_request(&bytes) - }; + let graphql_requests: Result<(Vec, bool), TranslateError> = if parts + .method + == Method::GET + { + self.translate_query_request(parts).await + } else { + let bytes = router::body::into_bytes(body) + .instrument(tracing::debug_span!("receive_body")) + .await?; + if let Some(level) = context + .extensions() + .with_lock(|ext| ext.get::().cloned()) + .map(|d| d.0) + { + let mut attrs = Vec::with_capacity(5); + #[cfg(test)] + let mut headers: indexmap::IndexMap = parts + .headers + .clone() + .into_iter() + .filter_map(|(name, val)| Some((name?.to_string(), val))) + .collect(); + #[cfg(test)] + headers.sort_keys(); + #[cfg(not(test))] + let headers = &parts.headers; + + attrs.push(KeyValue::new( + HTTP_REQUEST_HEADERS, + opentelemetry::Value::String(format!("{:?}", headers).into()), + )); + attrs.push(KeyValue::new( + HTTP_REQUEST_METHOD, + opentelemetry::Value::String(format!("{}", parts.method).into()), + )); + attrs.push(KeyValue::new( + HTTP_REQUEST_URI, + opentelemetry::Value::String(format!("{}", parts.uri).into()), + )); + attrs.push(KeyValue::new( + HTTP_REQUEST_VERSION, + opentelemetry::Value::String(format!("{:?}", parts.version).into()), + )); + attrs.push(KeyValue::new( + HTTP_REQUEST_BODY, + opentelemetry::Value::String( + format!("{:?}", String::from_utf8_lossy(&bytes)).into(), + ), + )); + log_event(level, "router.request", attrs, ""); + } + self.translate_bytes_request(&bytes) + }; Ok(graphql_requests) } @@ -891,14 +851,6 @@ struct TranslateError { extension_details: String, } -// Process the headers to make sure that `VARY` is set correctly -pub(crate) fn process_vary_header(headers: &mut HeaderMap) { - if headers.get(VARY).is_none() { - // We don't have a VARY header, add one with value "origin" - headers.insert(VARY, ORIGIN_HEADER_VALUE.clone()); - } -} - /// A collection of services and data which may be used to create a "router". #[derive(Clone)] pub(crate) struct RouterCreator { @@ -971,14 +923,14 @@ impl RouterCreator { let config_hash = configuration.hash(); let pipeline_handle = PipelineHandle::new(schema_id, launch_id, config_hash); - let router_service = content_negotiation::RouterLayer::default().layer(RouterService::new( + let router_service = RouterService::new( supergraph_creator.create(), apq_layer, persisted_query_layer, query_analysis_layer, configuration.batching.clone(), TelemetryConfig::apollo(&configuration), - )); + ); // NOTE: This is the start of the router pipeline (router_service) let sb = Buffer::new( diff --git a/apollo-router/src/services/router/tests.rs b/apollo-router/src/services/router/tests.rs index 7a182f7eb1..7b2664207e 100644 --- a/apollo-router/src/services/router/tests.rs +++ b/apollo-router/src/services/router/tests.rs @@ -1,13 +1,10 @@ use std::sync::Arc; use futures::stream::StreamExt; -use http::HeaderMap; -use http::HeaderValue; use http::Method; use http::Request; use http::Uri; use http::header::CONTENT_TYPE; -use http::header::VARY; use mime::APPLICATION_JSON; use opentelemetry::KeyValue; use parking_lot::Mutex; @@ -22,58 +19,21 @@ use crate::context::OPERATION_NAME; use crate::graphql; use crate::json_ext::Path; use crate::metrics::FutureMetricsExt; +use crate::plugins::content_negotiation::MULTIPART_DEFER_CONTENT_TYPE_HEADER_VALUE; use crate::plugins::telemetry::CLIENT_NAME; use crate::plugins::telemetry::CLIENT_VERSION; use crate::query_planner::APOLLO_OPERATION_ID; -use crate::services::MULTIPART_DEFER_CONTENT_TYPE; use crate::services::SupergraphRequest; use crate::services::SupergraphResponse; use crate::services::router; use crate::services::router::body::RouterBody; use crate::services::router::service::from_supergraph_mock_callback; use crate::services::router::service::from_supergraph_mock_callback_and_configuration; -use crate::services::router::service::process_vary_header; use crate::services::subgraph; use crate::services::supergraph; use crate::spec::query::EXTENSIONS_VALUE_COMPLETION_KEY; use crate::test_harness::make_fake_batch; -// Test Vary processing - -#[test] -fn it_adds_default_with_value_origin_if_no_vary_header() { - let mut default_headers = HeaderMap::new(); - process_vary_header(&mut default_headers); - let vary_opt = default_headers.get(VARY); - assert!(vary_opt.is_some()); - let vary = vary_opt.expect("has a value"); - assert_eq!(vary, "origin"); -} - -#[test] -fn it_leaves_vary_alone_if_set() { - let mut default_headers = HeaderMap::new(); - default_headers.insert(VARY, HeaderValue::from_static("*")); - process_vary_header(&mut default_headers); - let vary_opt = default_headers.get(VARY); - assert!(vary_opt.is_some()); - let vary = vary_opt.expect("has a value"); - assert_eq!(vary, "*"); -} - -#[test] -fn it_leaves_varys_alone_if_there_are_more_than_one() { - let mut default_headers = HeaderMap::new(); - default_headers.insert(VARY, HeaderValue::from_static("one")); - default_headers.append(VARY, HeaderValue::from_static("two")); - process_vary_header(&mut default_headers); - let vary = default_headers.get_all(VARY); - assert_eq!(vary.iter().count(), 2); - for value in vary { - assert!(value == "one" || value == "two"); - } -} - #[tokio::test] async fn it_extracts_query_and_operation_name() { let query = "query"; @@ -314,16 +274,26 @@ async fn it_processes_a_valid_query_batch() { .await .unwrap() } - // Send a request - let response = with_config().await.response; - assert_eq!(response.status(), http::StatusCode::OK); - let data: serde_json::Value = serde_json::from_slice( - &router::body::into_bytes(response.into_body()) - .await - .unwrap(), - ) - .unwrap(); - assert_eq!(expected_response, data); + async move { + // Send a request + let response = with_config().await.response; + assert_eq!(response.status(), http::StatusCode::OK); + let data: serde_json::Value = serde_json::from_slice( + &router::body::into_bytes(response.into_body()) + .await + .unwrap(), + ) + .unwrap(); + assert_eq!(expected_response, data); + + assert_histogram_sum!( + "apollo.router.operations.batching.size", + 3, + "mode" = "batch_http_link" + ); + } + .with_metrics() + .await; } #[tokio::test] @@ -434,7 +404,10 @@ async fn it_will_process_a_non_batched_defered_query() { } "; let http_request = supergraph::Request::canned_builder() - .header(http::header::ACCEPT, MULTIPART_DEFER_CONTENT_TYPE) + .header( + http::header::ACCEPT, + MULTIPART_DEFER_CONTENT_TYPE_HEADER_VALUE, + ) .query(query) .build() .unwrap() @@ -491,7 +464,10 @@ async fn it_will_not_process_a_batched_deferred_query() { "; let http_request = make_fake_batch( supergraph::Request::canned_builder() - .header(http::header::ACCEPT, MULTIPART_DEFER_CONTENT_TYPE) + .header( + http::header::ACCEPT, + MULTIPART_DEFER_CONTENT_TYPE_HEADER_VALUE, + ) .query(query) .build() .unwrap() diff --git a/apollo-router/src/services/subgraph_service.rs b/apollo-router/src/services/subgraph_service.rs index 93830eba7a..e80e8d1545 100644 --- a/apollo-router/src/services/subgraph_service.rs +++ b/apollo-router/src/services/subgraph_service.rs @@ -43,7 +43,6 @@ use uuid::Uuid; use super::Plugins; use super::http::HttpClientServiceFactory; use super::http::HttpRequest; -use super::layers::content_negotiation::GRAPHQL_JSON_RESPONSE_HEADER_VALUE; use super::router::body::RouterBody; use super::subgraph::SubgraphRequestId; use crate::Configuration; @@ -61,6 +60,7 @@ use crate::graphql; use crate::json_ext::Object; use crate::layers::DEFAULT_BUFFER_SIZE; use crate::plugins::authentication::subgraph::SigningParamsConfig; +use crate::plugins::content_negotiation::APPLICATION_GRAPHQL_JSON; use crate::plugins::file_uploads; use crate::plugins::subscription::CallbackMode; use crate::plugins::subscription::SUBSCRIPTION_WS_CUSTOM_CONNECTION_PARAMS; @@ -183,9 +183,6 @@ pub(crate) fn generate_tls_client_config( tls_cert_store: Option, client_cert_config: Option<&TlsClientAuth>, ) -> Result { - // Enable crypto - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); - let tls_builder = rustls::ClientConfig::builder(); Ok(match (tls_cert_store, client_cert_config) { (None, None) => tls_builder.with_native_roots()?.with_no_client_auth(), @@ -1457,7 +1454,7 @@ fn get_graphql_content_type(service_name: &str, parts: &Parts) -> Result) -> Result, Infallible> { Ok(http::Response::builder() - .header(CONTENT_TYPE, GRAPHQL_JSON_RESPONSE_HEADER_VALUE) + .header( + CONTENT_TYPE, + HeaderValue::from_static(APPLICATION_GRAPHQL_JSON), + ) .status(StatusCode::UNAUTHORIZED) .body(r#"invalid"#.into()) .unwrap()) @@ -1839,7 +1839,10 @@ mod tests { async fn emulate_subgraph_application_graphql_response(listener: TcpListener) { async fn handle(_request: http::Request) -> Result, Infallible> { Ok(http::Response::builder() - .header(CONTENT_TYPE, GRAPHQL_JSON_RESPONSE_HEADER_VALUE) + .header( + CONTENT_TYPE, + HeaderValue::from_static(APPLICATION_GRAPHQL_JSON), + ) .status(StatusCode::OK) .body(r#"{"data": null}"#.into()) .unwrap()) diff --git a/apollo-router/src/services/supergraph/service.rs b/apollo-router/src/services/supergraph/service.rs index 41db590822..3113f4e3d2 100644 --- a/apollo-router/src/services/supergraph/service.rs +++ b/apollo-router/src/services/supergraph/service.rs @@ -44,6 +44,7 @@ use crate::plugin::DynPlugin; use crate::plugins::authentication::APOLLO_AUTHENTICATION_JWT_CLAIMS; use crate::plugins::connectors::query_plans::store_connectors; use crate::plugins::connectors::query_plans::store_connectors_labels; +use crate::plugins::content_negotiation::ClientRequestAccepts; use crate::plugins::subscription::APOLLO_SUBSCRIPTION_PLUGIN; use crate::plugins::subscription::Subscription; use crate::plugins::subscription::SubscriptionConfig; @@ -75,12 +76,10 @@ use crate::services::execution::QueryPlan; use crate::services::fetch_service::FetchServiceFactory; use crate::services::http::HttpClientServiceFactory; use crate::services::layers::allow_only_http_post_mutations::AllowOnlyHttpPostMutationsLayer; -use crate::services::layers::content_negotiation; use crate::services::layers::persisted_queries::PersistedQueryLayer; use crate::services::layers::query_analysis::QueryAnalysisLayer; use crate::services::new_service::ServiceFactory; use crate::services::query_planner; -use crate::services::router::ClientRequestAccepts; use crate::services::subgraph::BoxGqlStream; use crate::services::subgraph_service::MakeSubgraphService; use crate::services::supergraph; @@ -1018,7 +1017,6 @@ impl PluggableSupergraphServiceBuilder { let sb = Buffer::new( ServiceBuilder::new() - .layer(content_negotiation::SupergraphLayer::default()) .service( self.plugins .iter() diff --git a/apollo-router/src/services/supergraph/tests.rs b/apollo-router/src/services/supergraph/tests.rs index ae2656fbad..b6c7f37930 100644 --- a/apollo-router/src/services/supergraph/tests.rs +++ b/apollo-router/src/services/supergraph/tests.rs @@ -12,7 +12,7 @@ use crate::Notify; use crate::TestHarness; use crate::graphql; use crate::plugin::test::MockSubgraph; -use crate::services::router::ClientRequestAccepts; +use crate::plugins::content_negotiation::ClientRequestAccepts; use crate::services::subgraph; use crate::services::supergraph; use crate::spec::Schema; diff --git a/apollo-router/src/test_harness/http_snapshot.rs b/apollo-router/src/test_harness/http_snapshot.rs index 6bd613d6e0..d3bcdd055e 100644 --- a/apollo-router/src/test_harness/http_snapshot.rs +++ b/apollo-router/src/test_harness/http_snapshot.rs @@ -499,8 +499,6 @@ impl SnapshotServer { } }; - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); - let http_service = HttpClientService::new( "test", rustls::ClientConfig::builder() diff --git a/apollo-router/src/uplink/feature_gate_enforcement.rs b/apollo-router/src/uplink/feature_gate_enforcement.rs index 5dbf23d4d5..b45b1da800 100644 --- a/apollo-router/src/uplink/feature_gate_enforcement.rs +++ b/apollo-router/src/uplink/feature_gate_enforcement.rs @@ -48,19 +48,9 @@ impl FeatureGateEnforcementReport { .supergraph_schema() .schema_definition .directives - .get_all("join__directive") - .filter(|join| { - join.specified_argument_by_name("name") - .and_then(|name| name.as_str()) - .map(|name| name == LINK_DIRECTIVE_NAME) - .unwrap_or_default() - }) - .filter_map(|join| { - join.specified_argument_by_name("args") - .and_then(|arg| arg.as_object()) - }) + .get_all(LINK_DIRECTIVE_NAME) .filter_map(|link| { - ParsedLinkSpec::from_join_directive_args(link).map(|maybe_spec| { + ParsedLinkSpec::from_link_directive(link).map(|maybe_spec| { maybe_spec.ok().map(|spec| (spec.spec_url.to_owned(), spec)) })? }) diff --git a/apollo-router/src/uplink/testdata/feature_enforcement_connect_v0_2.graphql b/apollo-router/src/uplink/testdata/feature_enforcement_connect_v0_2.graphql index aefab23c8a..21dd70e72d 100644 --- a/apollo-router/src/uplink/testdata/feature_enforcement_connect_v0_2.graphql +++ b/apollo-router/src/uplink/testdata/feature_enforcement_connect_v0_2.graphql @@ -2,7 +2,7 @@ schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) @link(url: "https://specs.apollo.dev/connect/v0.2", for: EXECUTION) - @join__directive(graphs: [ONE], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.2", import: ["@source", "@connect"]}) + @join__directive(graphs: [ONE], name: "link", args: {url: "https://specs.apollo.dev/connect/v0.1", import: ["@source", "@connect"]}) { query: Query } @@ -56,7 +56,6 @@ enum link__Purpose { type Product @join__type(graph: ONE) - @join__directive(graphs: [ONE], name: "connect", args: {http: {GET: "http://localhost:4001/products", body: "ids: $batch.id"}, selection: "id"}) { id: ID! } diff --git a/apollo-router/tests/apollo_otel_traces.rs b/apollo-router/tests/apollo_otel_traces.rs index 827fe1e679..7cb10bf915 100644 --- a/apollo-router/tests/apollo_otel_traces.rs +++ b/apollo-router/tests/apollo_otel_traces.rs @@ -305,7 +305,6 @@ async fn get_traces< where Fut: Future, BoxCloneService)>, { - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); let _guard = TEST.lock().await; reports.lock().await.clear(); let (task, mut service) = service_fn(reports.clone(), use_legacy_request_span, mocked).await; diff --git a/apollo-router/tests/apollo_reports.rs b/apollo-router/tests/apollo_reports.rs index 2f000ecc23..c0e8ecdabb 100644 --- a/apollo-router/tests/apollo_reports.rs +++ b/apollo-router/tests/apollo_reports.rs @@ -358,7 +358,6 @@ async fn get_report bool + Send + Sync + Copy + 'static> where Fut: Future, BoxCloneService)>, { - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); let _guard = TEST.lock().await; reports.lock().await.clear(); let (task, mut service) = service_fn( @@ -418,7 +417,6 @@ async fn get_batch_stats_report bool + Send + Sync + Copy + ' request: router::Request, filter: T, ) -> u64 { - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); let _guard = TEST.lock().await; reports.lock().await.clear(); let (task, mut service) = diff --git a/apollo-router/tests/integration/content_negotiation.rs b/apollo-router/tests/integration/content_negotiation.rs new file mode 100644 index 0000000000..2a2f66856e --- /dev/null +++ b/apollo-router/tests/integration/content_negotiation.rs @@ -0,0 +1,44 @@ +use std::collections::HashMap; + +use http::HeaderValue; +use serde_json::json; +use tower::BoxError; + +use crate::integration::IntegrationTest; +use crate::integration::common::Query; + +#[tokio::test(flavor = "multi_thread")] +async fn test_content_negotiation() -> Result<(), BoxError> { + let mut router = IntegrationTest::builder() + .config("supergraph:") + .build() + .await; + + router.start().await; + router.assert_started().await; + + let query = json!({"query": "{ __typename }"}); + for accept_header in [ + "application/json", + "application/json,multipart/mixed;subscriptionSpec=1.0", + ] { + let (_, response) = router + .execute_query( + Query::builder() + .body(query.clone()) + .headers(HashMap::from([( + "accept".to_string(), + accept_header.to_string(), + )])) + .build(), + ) + .await; + assert_eq!(response.status(), 200); + assert_eq!( + response.headers().get("content-type").unwrap(), + HeaderValue::from_str("application/json").unwrap() + ); + } + + Ok(()) +} diff --git a/apollo-router/tests/integration/fixtures/redis_connection_closure.router.yaml b/apollo-router/tests/integration/fixtures/redis_connection_closure.router.yaml new file mode 100644 index 0000000000..6e7449ae65 --- /dev/null +++ b/apollo-router/tests/integration/fixtures/redis_connection_closure.router.yaml @@ -0,0 +1,16 @@ +supergraph: + query_planning: + cache: + redis: + required_to_start: true + urls: + - redis://localhost:6379 + ttl: 1s + pool_size: 4 +telemetry: + exporters: + metrics: + prometheus: + listen: 127.0.0.1:4000 + enabled: true + path: /metrics diff --git a/apollo-router/tests/integration/mod.rs b/apollo-router/tests/integration/mod.rs index 43a5236b63..2c5281448f 100644 --- a/apollo-router/tests/integration/mod.rs +++ b/apollo-router/tests/integration/mod.rs @@ -4,6 +4,7 @@ pub(crate) mod common; pub(crate) use common::IntegrationTest; mod connectors; +mod content_negotiation; mod coprocessor; mod docs; #[cfg(any(not(feature = "ci"), all(target_arch = "x86_64", target_os = "linux")))] diff --git a/apollo-router/tests/integration/redis.rs b/apollo-router/tests/integration/redis.rs index f45b0599c7..a5c3966eff 100644 --- a/apollo-router/tests/integration/redis.rs +++ b/apollo-router/tests/integration/redis.rs @@ -1115,3 +1115,29 @@ async fn test_redis_query_plan_config_update(updated_config: &str, new_cache_key .assert_redis_cache_contains(new_cache_key, Some(starting_key)) .await; } + +#[tokio::test(flavor = "multi_thread")] +async fn test_redis_connections_are_closed_on_router_reload() { + if !graph_os_enabled() { + return; + } + + let router_config = include_str!("fixtures/redis_connection_closure.router.yaml"); + let mut router = IntegrationTest::builder() + .config(router_config) + .build() + .await; + + router.start().await; + router.assert_started().await; + + let expected_metric = r#"apollo_router_cache_redis_connections{kind="query planner",otel_scope_name="apollo/router"} 4"#; + router.assert_metrics_contains(expected_metric, None).await; + + // check that reloading the schema yields the same number of redis connections + let new_router_config = format!("{router_config}\ninclude_subgraph_errors:\n all: true"); + router.update_config(&new_router_config).await; + router.assert_reloaded().await; + + router.assert_metrics_contains(expected_metric, None).await; +} diff --git a/apollo-router/tests/integration/telemetry/datadog.rs b/apollo-router/tests/integration/telemetry/datadog.rs index 10b261a002..45224648ac 100644 --- a/apollo-router/tests/integration/telemetry/datadog.rs +++ b/apollo-router/tests/integration/telemetry/datadog.rs @@ -990,6 +990,36 @@ async fn test_resources() -> Result<(), BoxError> { Ok(()) } +#[tokio::test(flavor = "multi_thread")] +async fn test_attributes() -> Result<(), BoxError> { + if !graph_os_enabled() { + return Ok(()); + } + let mut router = IntegrationTest::builder() + .telemetry(Telemetry::Datadog) + .config(include_str!("fixtures/datadog.router.yaml")) + .build() + .await; + + router.start().await; + router.assert_started().await; + + TraceSpec::builder() + .services(["client", "router", "subgraph"].into()) + .attribute("client.name", "foo") + .build() + .validate_datadog_trace( + &mut router, + Query::builder() + .traced(true) + .header("apollographql-client-name", "foo") + .build(), + ) + .await?; + router.graceful_shutdown().await; + Ok(()) +} + struct DatadogTraceSpec { trace_spec: TraceSpec, } @@ -1162,7 +1192,22 @@ impl Verifier for DatadogTraceSpec { Ok(()) } - fn verify_span_attributes(&self, _trace: &Value) -> Result<(), BoxError> { + fn verify_span_attributes(&self, trace: &Value) -> Result<(), BoxError> { + for (key, value) in self.attributes.iter() { + // extracts a list of span attribute values with the provided key + let binding = trace.select_path(&format!("$..meta..['{key}']"))?; + let matches_value = binding.iter().any(|v| match v { + Value::Bool(v) => (*v).to_string() == *value, + Value::Number(n) => (*n).to_string() == *value, + Value::String(s) => s == value, + _ => false, + }); + if !matches_value { + return Err(BoxError::from(format!( + "unexpected attribute values for key `{key}`, expected value `{value}` but got {binding:?}" + ))); + } + } Ok(()) } diff --git a/apollo-router/tests/integration/telemetry/fixtures/otlp_override_client_name.router.yaml b/apollo-router/tests/integration/telemetry/fixtures/otlp_override_client_name.router.yaml new file mode 100644 index 0000000000..a2b7a66400 --- /dev/null +++ b/apollo-router/tests/integration/telemetry/fixtures/otlp_override_client_name.router.yaml @@ -0,0 +1,31 @@ +rhai: + scripts: "tests/integration/telemetry/fixtures" + main: "override_client_name.rhai" +telemetry: + instrumentation: + spans: + mode: spec_compliant + events: + router: + request: info + response: info + error: info + exporters: + tracing: + common: + service_name: router + otlp: + enabled: true + protocol: http + endpoint: + batch_processor: + scheduled_delay: 10ms + metrics: + common: + service_name: router + otlp: + enabled: true + endpoint: /metrics + protocol: http + batch_processor: + scheduled_delay: 10ms diff --git a/apollo-router/tests/integration/telemetry/fixtures/override_client_name.rhai b/apollo-router/tests/integration/telemetry/fixtures/override_client_name.rhai new file mode 100644 index 0000000000..bd927291c3 --- /dev/null +++ b/apollo-router/tests/integration/telemetry/fixtures/override_client_name.rhai @@ -0,0 +1,8 @@ +fn router_service(service) { + const request_callback = Fn("process_request"); + service.map_request(request_callback); +} + +fn process_request(request) { + request.context["apollo::telemetry::client_name"] = "foo"; +} diff --git a/apollo-router/tests/integration/telemetry/mod.rs b/apollo-router/tests/integration/telemetry/mod.rs index b8aae9cccc..5f78d34e56 100644 --- a/apollo-router/tests/integration/telemetry/mod.rs +++ b/apollo-router/tests/integration/telemetry/mod.rs @@ -25,6 +25,7 @@ struct TraceSpec { subgraph_sampled: Option, trace_id: Option, resources: HashMap<&'static str, &'static str>, + attributes: HashMap<&'static str, &'static str>, } #[buildstructor::buildstructor] @@ -41,6 +42,7 @@ impl TraceSpec { subgraph_sampled: Option, trace_id: Option, resources: HashMap<&'static str, &'static str>, + attributes: HashMap<&'static str, &'static str>, ) -> Self { Self { operation_name, @@ -53,6 +55,7 @@ impl TraceSpec { subgraph_sampled, trace_id, resources, + attributes, } } } diff --git a/apollo-router/tests/integration/telemetry/otlp.rs b/apollo-router/tests/integration/telemetry/otlp.rs index f3731aa2a4..b2ea5dd9d9 100644 --- a/apollo-router/tests/integration/telemetry/otlp.rs +++ b/apollo-router/tests/integration/telemetry/otlp.rs @@ -697,6 +697,83 @@ async fn test_priority_sampling_no_parent_propagated() -> Result<(), BoxError> { Ok(()) } +#[tokio::test(flavor = "multi_thread")] +async fn test_attributes() -> Result<(), BoxError> { + if !graph_os_enabled() { + return Ok(()); + } + let mock_server = mock_otlp_server(1..).await; + let config = include_str!("fixtures/otlp.router.yaml") + .replace("", &mock_server.uri()); + let mut router = IntegrationTest::builder() + .telemetry(Telemetry::Otlp { + endpoint: Some(format!("{}/v1/traces", mock_server.uri())), + }) + .config(config) + .build() + .await; + + router.start().await; + router.assert_started().await; + + TraceSpec::builder() + .services(["client", "router", "subgraph"].into()) + .attribute("client.name", "foobar") + .build() + .validate_otlp_trace( + &mut router, + &mock_server, + Query::builder() + .traced(true) + .header("apollographql-client-name", "foobar") + .build(), + ) + .await?; + router.graceful_shutdown().await; + Ok(()) +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_plugin_overridden_client_name_is_included_in_telemetry() -> Result<(), BoxError> { + if !graph_os_enabled() { + return Ok(()); + } + let mock_server = mock_otlp_server(1..).await; + let config = include_str!("fixtures/otlp_override_client_name.router.yaml") + .replace("", &mock_server.uri()); + let mut router = IntegrationTest::builder() + .telemetry(Telemetry::Otlp { + endpoint: Some(format!("{}/v1/traces", mock_server.uri())), + }) + .config(config) + .build() + .await; + + router.start().await; + router.assert_started().await; + + // rhai script overrides client.name - no matter what client name we pass via headers, it should + // end up equalling the value set in the script (`foo`) + for header_value in [None, Some(""), Some("foo"), Some("bar")] { + let mut headers = HashMap::default(); + if let Some(value) = header_value { + headers.insert("apollographql-client-name".to_string(), value.to_string()); + } + + let query = Query::builder().traced(true).headers(headers).build(); + TraceSpec::builder() + .services(["client", "router", "subgraph"].into()) + .attribute("client.name", "foo") + .build() + .validate_otlp_trace(&mut router, &mock_server, query) + .await + .unwrap_or_else(|_| panic!("Failed with header value {header_value:?}")); + } + + router.graceful_shutdown().await; + Ok(()) +} + struct OtlpTraceSpec<'a> { trace_spec: TraceSpec, mock_server: &'a MockServer, @@ -710,10 +787,6 @@ impl Deref for OtlpTraceSpec<'_> { } impl Verifier for OtlpTraceSpec<'_> { - fn verify_span_attributes(&self, _span: &Value) -> Result<(), BoxError> { - // TODO - Ok(()) - } fn spec(&self) -> &TraceSpec { &self.trace_spec } @@ -953,6 +1026,27 @@ impl Verifier for OtlpTraceSpec<'_> { } Ok(()) } + + fn verify_span_attributes(&self, trace: &Value) -> Result<(), BoxError> { + for (key, value) in self.attributes.iter() { + // extracts a list of span attribute values with the provided key + let binding = trace.select_path(&format!( + "$..spans..attributes..[?(@.key == '{key}')].value.*" + ))?; + let matches_value = binding.iter().any(|v| match v { + Value::Bool(v) => (*v).to_string() == *value, + Value::Number(n) => (*n).to_string() == *value, + Value::String(s) => s == value, + _ => false, + }); + if !matches_value { + return Err(BoxError::from(format!( + "unexpected attribute values for key `{key}`, expected value `{value}` but got {binding:?}" + ))); + } + } + Ok(()) + } } async fn mock_otlp_server_delayed() -> MockServer { diff --git a/apollo-router/tests/set_context.rs b/apollo-router/tests/set_context.rs index cc9fe5c04b..a8c17daf5a 100644 --- a/apollo-router/tests/set_context.rs +++ b/apollo-router/tests/set_context.rs @@ -313,8 +313,6 @@ fn setup_from_mocks( configuration: serde_json::Value, mocks: &[(&'static str, &'static str)], ) -> TestHarness<'static> { - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); - let mut mocked_subgraphs = MockedSubgraphs::default(); for (name, m) in mocks { diff --git a/apollo-router/tests/type_conditions.rs b/apollo-router/tests/type_conditions.rs index fd05cf6e82..3dd4af8731 100644 --- a/apollo-router/tests/type_conditions.rs +++ b/apollo-router/tests/type_conditions.rs @@ -354,8 +354,6 @@ fn setup_from_mocks( configuration: serde_json::Value, mocks: &[(&'static str, &'static str)], ) -> TestHarness<'static> { - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); - let mut mocked_subgraphs = MockedSubgraphs::default(); for (name, m) in mocks { diff --git a/dev-docs/HYPER_1.0_REVIEW_NOTES.md b/dev-docs/HYPER_1.0_REVIEW_NOTES.md index 19d296f07f..4fdac014ba 100644 --- a/dev-docs/HYPER_1.0_REVIEW_NOTES.md +++ b/dev-docs/HYPER_1.0_REVIEW_NOTES.md @@ -6,6 +6,7 @@ Read HYPER_1.0_UPDATE.md first. This provides a lot of generally useful information. ### Crate updates + Many crates have been updated as part of the update. In some parts of codebase we had to continue using the older version of the crate so that opentelemetry (which has not been updated to by hyper 1.0 @@ -35,27 +36,6 @@ We removed this since it's not required in hyper 1.0 Anywhere you see a XXX comment is an indication that this should be reviewed carefully. -### default crypto - -At various places in the code base you'll see code like this: -``` - // Enable crypto - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); -``` - -This is because crypto initialisation is now done differently in rustls and -two crypto stacks are supported: `aws` and `ring`. - -If only one stack is enabled, then there is no need to specify a default -provider. Unfortunately, because some of our crates are quite old, both -aws and ring are specified. In this case no default is favoured by rustls -and the crate panics at runtime when crypto functionality is required. - -The way around this is to specify a default manually. This has to be done -once for the main binary and at various places in tests. - -Hopefully this situation will improve in the future. - ## Focussed Review Please pay particular attention to these files, since they proved tricky diff --git a/dev-docs/metrics.md b/dev-docs/metrics.md index f41fd72994..e0944a979d 100644 --- a/dev-docs/metrics.md +++ b/dev-docs/metrics.md @@ -183,6 +183,32 @@ Make sure to use `.with_metrics()` method on the async block to ensure that the } ``` +Note: this relies on metrics being updated within the same thread. Metrics that are updated from multiple threads will +not be collected correctly. + +```rust +#[tokio::test] +async fn test_spawned_metric_resolution() { + async { + u64_counter!("apollo.router.test", "metric", 1); + assert_counter!("apollo.router.test", 1); + + tokio::spawn(async move { + u64_counter!("apollo.router.test", "metric", 2); + }) + .await + .unwrap(); + + // In real operations, this metric resolves to a total of 3! + // However, in testing, it will resolve to 1, because the second incrementation happens in another thread. + // assert_counter!("apollo.router.test", 3); + assert_counter!("apollo.router.test", 1); + } + .with_metrics() + .await; +} +``` + ## Callsite instrument caching When using the new metrics macros a reference to an instrument is cached to ensure that the meter provider does not have to be queried over and over. diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index de78fdedb3..c2a4be334b 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -2,7 +2,7 @@ version: "3.9" services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v2.2.0 + image: ghcr.io/apollographql/router:v2.2.1 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index 2375f9233d..c943f1c872 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v2.2.0 + image: ghcr.io/apollographql/router:v2.2.1 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml diff --git a/docs/source/routing/customization/coprocessor.mdx b/docs/source/routing/customization/coprocessor.mdx index 49c4946ea1..e860f7753e 100644 --- a/docs/source/routing/customization/coprocessor.mdx +++ b/docs/source/routing/customization/coprocessor.mdx @@ -136,9 +136,19 @@ coprocessor: ### Context configuration -You can configure what you want to send from the context to your coprocessor. By default it's disabled (`false`) and doesn't send any context keys to the coprocessor. You now have the ability to give an array -containing all the context keys you want to send to your coprocessor. Sending only a selective list of context keys will increase the performances because it will lower the size of payloads between the router and coprocessor. -You can also set `context: deprecated` if you still want to use deprecated context key names from router 1.x. If you want to pass all context keys with new naming just set `context: all`. +The router request context is used to share data across stages of the request pipeline. The coprocessor can also use this context. +By default, the context is not sent to the coprocessor (`context: false`). +You can send _all_ context keys to your coprocessor using `context: all`. +You can also specify exactly which context keys you wish to send to a coprocessor by listing them under the `selective` key. This will reduce the size of the request/response and may improve performance. + +If you're upgrading from router 1.x, the [context key names changed](/docs/graphos/routing/upgrade/from-router-v1#renamed-context-keys) in router v2.0. You can specify `context: deprecated` to send all context with the old names, compatible with v1.x. Context keys are translated to their v1.x names before being sent to the coprocessor, and translated back to the v2.x names after being received from the coprocessor. + + + +`context: true` from router 1.x is still supported by the configuration, and is an alias for `context: deprecated`. +We strongly recommend using `context: deprecated` or `context: all` instead. + + Example: @@ -167,14 +177,16 @@ coprocessor: ``` - If you use the `selective` configuration, you'll have to specify new context key names (>=2.x) it doesn't work with deprecated keys. So for example if you try to specify `operation_name` instead of `apollo::supergraph::operation_name` it won't map to the new context key. + +If you use the `selective` configuration, you must use the new context key names from v2.x. It does not support the `deprecated` keys from v1.x. So for example, if you try to specify `operation_name` instead of `apollo::supergraph::operation_name`, it won't map to the new context key. + ### Client configuration -For example, to enable h2c (http2 cleartext) communication with a coprocessor you can use this configuration:: +For example, to enable h2c (http2 cleartext) communication with a coprocessor you can use this configuration: ```yaml title="router.yaml" coprocessor: diff --git a/docs/source/routing/observability/telemetry/metrics-exporters/datadog.mdx b/docs/source/routing/observability/telemetry/metrics-exporters/datadog.mdx index dea5e72616..4f37c4d5b2 100644 --- a/docs/source/routing/observability/telemetry/metrics-exporters/datadog.mdx +++ b/docs/source/routing/observability/telemetry/metrics-exporters/datadog.mdx @@ -1,12 +1,12 @@ --- -title: Datadog exporter (via OTLP) -subtitle: Configure the Datadog exporter for metrics -description: Configure the Datadog exporter for metrics via OpenTelemetry Protocol (OTLP) in the Apollo GraphOS Router or Apollo Router Core. +title: Datadog configuration of OTLP exporter +subtitle: Configure the OTLP metrics exporter for Datadog +description: Configure the OpenTelemetry Protocol (OTLP) metrics exporter for Datadog in the Apollo GraphOS Router or Apollo Router Core. context: - telemetry --- -Enable and configure the [OTLP exporter](/router/configuration/telemetry/exporters/metrics/otlp) for metrics in the GraphOS Router or Apollo Router Core for use with [Datadog](https://www.datadoghq.com/). +This metrics exporter is a configuration of the [OTLP exporter](/router/configuration/telemetry/exporters/metrics/otlp) to use with [Datadog](https://www.datadoghq.com/). For general tracing configuration, refer to [Router Metrics Configuration](/router/configuration/telemetry/exporters/metrics/overview). diff --git a/docs/source/routing/observability/telemetry/metrics-exporters/dynatrace.mdx b/docs/source/routing/observability/telemetry/metrics-exporters/dynatrace.mdx index db28900e05..866c43e686 100644 --- a/docs/source/routing/observability/telemetry/metrics-exporters/dynatrace.mdx +++ b/docs/source/routing/observability/telemetry/metrics-exporters/dynatrace.mdx @@ -1,12 +1,12 @@ --- -title: Dynatrace exporter (via OTLP) -subtitle: Configure the Dynatrace exporter for metrics -description: Configure the Dynatrace exporter for metrics via OpenTelemetry Protocol (OTLP) in the Apollo Router. +title: Dynatrace configuration of OTLP exporter +subtitle: Configure the OTLP exporter for Dynatrace +description: Configure the OTLP metrics exporter for Dynatrace via OpenTelemetry Protocol (OTLP) in the Apollo Router. context: - telemetry --- -Enable and configure the [OTLP exporter](/router/configuration/telemetry/exporters/metrics/otlp) for metrics in the Apollo Router for use with [Dynatrace](https://dynatrace.com/). +This metrics exporter is a configuration of the [OTLP exporter](/router/configuration/telemetry/exporters/metrics/otlp) to use with [Dynatrace](https://www.dynatrace.com/). For general tracing configuration, refer to [Router Metrics Configuration](/router/configuration/telemetry/exporters/metrics/overview). diff --git a/docs/source/routing/observability/telemetry/metrics-exporters/new-relic.mdx b/docs/source/routing/observability/telemetry/metrics-exporters/new-relic.mdx index 57a0a660bf..dedc42beaa 100644 --- a/docs/source/routing/observability/telemetry/metrics-exporters/new-relic.mdx +++ b/docs/source/routing/observability/telemetry/metrics-exporters/new-relic.mdx @@ -1,12 +1,12 @@ --- -title: New Relic exporter (via OTLP) +title: New Relic configuration of OTLP exporter subtitle: Configure the New Relic exporter for metrics description: Configure the New Relic exporter for metrics via OpenTelemetry Protocol (OTLP) in the Apollo GraphOS Router or Apollo Router Core. context: - telemetry --- -Enable and configure the [OTLP exporter](/router/configuration/telemetry/exporters/metrics/otlp) for metrics in the GraphOS Router or Apollo Router Core for use with [New Relic](https://newrelic.com/). +This metrics exporter is a configuration of the [OTLP exporter](/router/configuration/telemetry/exporters/metrics/otlp) to use with [New Relic](https://newrelic.com/). For general tracing configuration, refer to [Router Metrics Configuration](/router/configuration/telemetry/exporters/metrics/overview). diff --git a/docs/source/routing/observability/telemetry/metrics-exporters/otlp.mdx b/docs/source/routing/observability/telemetry/metrics-exporters/otlp.mdx index 6831abfb8f..dc71efdee6 100644 --- a/docs/source/routing/observability/telemetry/metrics-exporters/otlp.mdx +++ b/docs/source/routing/observability/telemetry/metrics-exporters/otlp.mdx @@ -16,6 +16,7 @@ Using the OTLP protocol, you can export metrics to any OTLP compatible receiver, * [OpenTelemetry Collector](https://opentelemetry.io/docs/collector/) * [Datadog](https://www.datadoghq.com/) (see [configuration instructions](/router/configuration/telemetry/exporters/metrics/datadog)) +* [Dynatrace](https://www.dynatrace.com/) (see [configuration instructions](/router/configuration/telemetry/exporters/metrics/dynatrace)) * [New Relic](https://www.newrelic.com/) (see [configuration instructions](/router/configuration/telemetry/exporters/metrics/new-relic)) ## OTLP configuration diff --git a/docs/source/routing/observability/telemetry/trace-exporters/datadog.mdx b/docs/source/routing/observability/telemetry/trace-exporters/datadog.mdx index a359423073..ac235e190f 100644 --- a/docs/source/routing/observability/telemetry/trace-exporters/datadog.mdx +++ b/docs/source/routing/observability/telemetry/trace-exporters/datadog.mdx @@ -1,14 +1,15 @@ --- -title: Datadog exporter (via OTLP) -subtitle: Configure the Datadog exporter for tracing -description: Configure the Datadog exporter for tracing via OpenTelemetry Protocol (OTLP) in the Apollo GraphOS Router or Apollo Router Core. +title: Datadog configuration of OTLP exporter +subtitle: Configure the OTLP trace exporter for Datadog +description: Configure the OpenTelemetry Protocol (OTLP) trace exporter for Datadog in the Apollo GraphOS Router or Apollo Router Core. context: - telemetry --- + import BatchProcessorPreamble from '../../../../../shared/batch-processor-preamble.mdx'; import BatchProcessorRef from '../../../../../shared/batch-processor-ref.mdx'; -Enable and configure the [Datadog](https://www.datadoghq.com/) exporter for tracing in the GraphOS Router or Apollo Router Core. +This tracing exporter is a configuration of the [OTLP exporter](/graphos/routing/observability/trace-exporters/otlp) to use with [Datadog](https://www.datadoghq.com/). For general tracing configuration, refer to [Router Tracing Configuration](/router/configuration/telemetry/exporters/tracing/overview). diff --git a/docs/source/routing/observability/telemetry/trace-exporters/dynatrace.mdx b/docs/source/routing/observability/telemetry/trace-exporters/dynatrace.mdx index 81f5dc8b86..9c0fbf98e1 100644 --- a/docs/source/routing/observability/telemetry/trace-exporters/dynatrace.mdx +++ b/docs/source/routing/observability/telemetry/trace-exporters/dynatrace.mdx @@ -1,12 +1,12 @@ --- -title: Dynatrace exporter (via OTLP) -subtitle: Configure the Dynatrace exporter for tracing -description: Configure the Dynatrace exporter for tracing via OpenTelemetry Protocol (OTLP) in the Apollo Router. +title: Dynatrace configuration of OTLP exporter +subtitle: Configure the OTLP trace exporter for Dynatrace +description: Configure the OpenTelemetry Protocol (OTLP) trace exporter for Dynatrace in the Apollo GraphOS Router or Apollo Router Core. context: - telemetry --- -Enable and configure the [OTLP exporter](/router/configuration/telemetry/exporters/tracing/otlp) for tracing in the Apollo Router for use with [Dynatrace](https://dynatrace.com/). +This tracing exporter is a configuration of the [OTLP exporter](/graphos/routing/observability/trace-exporters/otlp) to use with [Dynatrace](https://dynatrace.com/). For general tracing configuration, refer to [Router Tracing Configuration](/router/configuration/telemetry/exporters/tracing/overview). diff --git a/docs/source/routing/observability/telemetry/trace-exporters/jaeger.mdx b/docs/source/routing/observability/telemetry/trace-exporters/jaeger.mdx index 276846d297..9d1be7bd04 100644 --- a/docs/source/routing/observability/telemetry/trace-exporters/jaeger.mdx +++ b/docs/source/routing/observability/telemetry/trace-exporters/jaeger.mdx @@ -1,12 +1,12 @@ --- -title: Jaeger exporter (via OTLP) -subtitle: Configure the Jaeger exporter for tracing -description: Configure the Jaeger exporter for tracing via OpenTelemetry Protocol (OTLP) in the Apollo GraphOS Router or Apollo Router Core. +title: Jaeger configuration of OTLP exporter +subtitle: Configure the OTLP trace exporter for Jaeger +description: Configure the OpenTelemetry Protocol (OTLP) trace exporter for Jaeger in the Apollo GraphOS Router or Apollo Router Core. context: - telemetry --- -Enable and configure the [Jaeger exporter](https://www.jaegertracing.io/) for tracing in the GraphOS Router or Apollo Router Core. +This tracing exporter is a configuration of the [OTLP exporter](/graphos/routing/observability/trace-exporters/otlp) to use with [Jaeger](https://www.jaegertracing.io/). For general tracing configuration, refer to [Router Tracing Configuration](/router/configuration/telemetry/exporters/tracing/overview). diff --git a/docs/source/routing/observability/telemetry/trace-exporters/new-relic.mdx b/docs/source/routing/observability/telemetry/trace-exporters/new-relic.mdx index 8aa69558c9..5297edacf2 100644 --- a/docs/source/routing/observability/telemetry/trace-exporters/new-relic.mdx +++ b/docs/source/routing/observability/telemetry/trace-exporters/new-relic.mdx @@ -1,12 +1,12 @@ --- -title: New Relic exporter (via OTLP) -subtitle: Configure the New Relic exporter for tracing -description: Configure the New Relic exporter for tracing via OpenTelemetry Protocol (OTLP) in the Apollo GraphOS Router or Apollo Router Core. +title: New Relic configuration of OTLP exporter +subtitle: Configure the OTLP trace exporter for New Relic +description: Configure the OpenTelemetry Protocol (OTLP) trace exporter for New Relic in the Apollo GraphOS Router or Apollo Router Core. context: - telemetry --- -Enable and configure the [OTLP exporter](/router/configuration/telemetry/exporters/tracing/otlp) for tracing in the GraphOS Router or Apollo Router Core for use with [New Relic](https://newrelic.com/). +This tracing exporter is a configuration of the [OTLP exporter](/graphos/routing/observability/trace-exporters/otlp) to use with [New Relic](https://newrelic.com/). For general tracing configuration, refer to [Router Tracing Configuration](/router/configuration/telemetry/exporters/tracing/overview). diff --git a/docs/source/routing/observability/telemetry/trace-exporters/overview.mdx b/docs/source/routing/observability/telemetry/trace-exporters/overview.mdx index e0b5aaa4fc..f3a1654bac 100644 --- a/docs/source/routing/observability/telemetry/trace-exporters/overview.mdx +++ b/docs/source/routing/observability/telemetry/trace-exporters/overview.mdx @@ -6,13 +6,14 @@ context: - telemetry --- -The GraphOS Router and Apollo Router Core support collection of traces with [OpenTelemetry](https://opentelemetry.io/), with exporters for: +Apollo Router supports a collection of tracing exporters: -* [Jaeger](/router/configuration/telemetry/exporters/tracing/jaeger) -* [Zipkin](/router/configuration/telemetry/exporters/tracing/zipkin) -* [Datadog](/router/configuration/telemetry/exporters/tracing/datadog) -* [New Relic](/router/configuration/telemetry/exporters/tracing/new-relic) * [OpenTelemetry Protocol (OTLP)](/router/configuration/telemetry/exporters/tracing/otlp) over HTTP or gRPC +* [Datadog (via OTLP configuration)](/router/configuration/telemetry/exporters/tracing/datadog) +* [Dynatrace (via OTLP configuration)](/router/configuration/telemetry/exporters/tracing/dynatrace) +* [Jaeger (via OTLP configuration)](/router/configuration/telemetry/exporters/tracing/jaeger) +* [New Relic (via OTLP configuration)](/router/configuration/telemetry/exporters/tracing/new-relic) +* [Zipkin](/router/configuration/telemetry/exporters/tracing/zipkin). The router generates [**spans**](/router/configuration/telemetry/instrumentation/spans) that include the various phases of serving a request and associated dependencies. This is useful for showing how response time is affected by: diff --git a/docs/source/routing/performance/caching/distributed.mdx b/docs/source/routing/performance/caching/distributed.mdx index 2af986512b..eb0e8c21e9 100644 --- a/docs/source/routing/performance/caching/distributed.mdx +++ b/docs/source/routing/performance/caching/distributed.mdx @@ -181,4 +181,4 @@ When this option is active, accessing a cache entry in Redis will reset its expi ### Pool size -The `pool_size` option defines the number of connections to Redis that the router will open. By default, the router will open a single connection to Redis. If there is a lot of traffic between router and Redis and/or there is some latency in thos requests, it is recommended to increase the pool size to reduce that latency. \ No newline at end of file +The `pool_size` option defines the number of connections to Redis that the router will open. By default, the router will open a single connection to Redis. If there is a lot of traffic between router and Redis and/or there is some latency in those requests, it is recommended to increase the pool size to reduce that latency. \ No newline at end of file diff --git a/docs/source/routing/query-planning/query-planning-best-practices.mdx b/docs/source/routing/query-planning/query-planning-best-practices.mdx new file mode 100644 index 0000000000..1047205405 --- /dev/null +++ b/docs/source/routing/query-planning/query-planning-best-practices.mdx @@ -0,0 +1,186 @@ +--- +title: Best Practices for Query Planning +subtitle: Design your schemas and use features to optimize query planning performance +description: Learn best practices in GraphQL schema design to achieve efficient query planning of your graphs using Apollo Federation and Apollo Router +--- + +When working with Apollo Federation, changes in your schema can have unexpected impact on the complexity and performance of your graph. Adding one field or changing one directive may create a new supergraph that has hundreds, or even thousands, of new possible paths and edges to connect entities and resolve client operations. Consequently, query planning throughput and latency may degrade. +While validation errors can be found at build time with [schema composition](https://www.apollographql.com/docs/graphos/schema-design/federated-schemas/composition), other changes may lead to issues that only arise at runtime, during query plan generation or execution. + +Examples of changes that can impact query planning include: +* Adding or modifying `@key`, `@requires`, `@provides`, or `@shareable` directive usage +* Adding or removing a type implementation from an interface +* Using `interfaceObject` and adding new fields to an interface + +To help alleviate these issues as much as possible, we recommend following some of these best practices for your federated graph. + +## Use shared types and fields judiciously + +The [`@shareable` directive](https://www.apollographql.com/docs/graphos/schema-design/federated-schemas/sharing-types) allows multiple subgraphs to resolve the same types or fields on entities, giving the query planner options for potentially shorter query paths. However, it's important to use it judiciously. +- Extensive `@shareable` use can exponentially increase the number of possible query plans generated as the query planner will find the shortest path to the desired result. This can then potentially lead to performance degradation at runtime as we generate plans. +- Using `@shareable` at root fields on the `Query`, `Mutation`, and `Subscription` types indicates that any subgraph can resolve a given entry point. While query plans can be deterministic for a given version of Router + Federation, there are no guarantees across versions, meaning that your plans may change if new services get added or deleted. This could cause an unexpected change in traffic for a given service, even there were no changes in the operations. + - Using shared root types also implies that the fields return the same data in the same order across all subgraphs, even if the data is a list, which is often not the case for dynamic applications. + +## Minimize operations spanning multiple subgraphs + +Operations that need to query multiple subgraphs can impact performance because each additional subgraph queried adds complexity to the query plan, increasing the time in the Router for both generation and execution of the operation. +- Design your schema to minimize operations that span numerous subgraphs. +- Using directives like `@requires` or `@interfaceObject` carefully to control complexity. + + +### `@requires` directive + +The [`@requires` directive](https://www.apollographql.com/docs/graphos/schema-design/federated-schemas/reference/directives#requires) allows a subgraph to fetch additional fields needed to resolve an entity. This can be powerful but must be handled with care. +- Changes to fields utilized by `@requires` can impact the subgraph fetches that current operations depend on and may create larger and slower plans. +- When performing schema migrations involving `@requires`, ensure compatibility by deploying changes in a manner that avoids disrupting ongoing queries. Plan deployments and schema changes in an atomic fashion. + +#### Example + +Consider the following example of a `Products` subgraph and a `Reviews` subgraph: + +```graphql title="Products subgraph" showLineNumbers=false disableCopy=true +type Product @key(fields: "upc") { + upc: ID! + nameLowerCase: String! +} +``` + +```graphql title="Reviews subgraph" showLineNumbers=false disableCopy=true +type Product @key(fields: "upc") { + upc: ID! + nameLowercase: String! @external + reviews: [Review]! @requires(fields: "nameLowercase") +} +``` + +Suppose you want to deprecate the `nameLowercase` field and replace it with the `name` field, like so: + +```graphql title="Products subgraph" showLineNumbers=false disableCopy=true {3-4} +type Product @key(fields: "upc") { + upc: ID! + nameLowerCase: String! @deprecated + name: String! +} +``` + +```graphql title="Reviews subgraph" showLineNumbers=false disableCopy=true {3-5} +type Product @key(fields: "upc") { + upc: ID! + nameLowercase: String! @external + name: String! @external + reviews: [Review]! @requires(fields: "name") +} +``` + +To perform this migration in place: + +1. Modify the `Products` subgraph to add the new field using `rover subgraph publish` to push the new subgraph schema. +2. Deploy a new version of the `Reviews` subgraph with a resolver that accepts either `nameLowercase` or `name` in the source object. +3. Modify the Reviews subgraph's schema in the registry so that it `@requires(fields: "name")`. +4. Deploy a new version of the `Reviews` subgraph with a resolver that only accepts the `name` in its source object. + +Alternatively, you can perform this operation with an atomic migration at the subgraph level by modifying the subgraph's URL: + +1. Modify the `Products` subgraph to add the `name` field (as usual, first deploy all replicas, then use `rover subgraph publish` to push the new subgraph schema). +2. Deploy a new set of `Reviews` replicas to a new URL that reads from `name`. +3. Register the `Reviews` subgraph with the new URL and the schema changes above. + +With this atomic strategy, the query planner resolves all outstanding requests to the old subgraph URL that relied on `nameLowercase` with the old query-planning configuration, which `@requires` the `nameLowercase` field. All new requests are made to the new subgraph URL using the new query-planning configuration, which `@requires` the `name` field. + +## Manage interface migrations + +Interfaces are an essential part of GraphQL schema design, offering flexibility in defining polymorphic types. However, they can also be open for implementation across service boundaries, allowing subgraphs to contribute a new type that changes how existing operations execute. + +- Approach interface migrations similar to database migrations. Ensure that changes to interface implementations are performed safely, avoiding disruptions to query operations. + + +### Example + +Suppose you define a `Channel` interface in one subgraph and other types that implement `Channel` in two other subgraphs: + +```graphql disableCopy=true showLineNumbers=false title="Channel subgraph" +interface Channel @key(fields: "id") { + id: ID! +} +``` + +```graphql disableCopy=true showLineNumbers=false title="Web subgraph" +type WebChannel implements Channel @key(fields: "id") { + id: ID! + webHook: String! +} +``` + +```graphql disableCopy=true showLineNumbers=false title="Email subgraph" +type EmailChannel implements Channel @key(fields: "id") { + id: ID! + emailAddress: String! +} +``` + +To safely remove the `EmailChannel` type from your supergraph schema: + +1. Perform a `rover subgraph publish` of the `email` subgraph that removes the `EmailChannel` type from its schema. +2. Deploy a new version of the subgraph that removes the `EmailChannel` type. + +The first step causes the query planner to stop sending fragments `...on EmailChannel`, which would fail validation if sent to a subgraph that isn't aware of the type. + +If you want to keep the `EmailChannel` type but remove it from the `Channel` interface, the process is similar. Instead of removing the `EmailChannel` type altogether, only remove the `implements Channel` addendum to the type definition. This is because the query planner expands queries to interfaces or unions into fragments on their implementing types. + +For example, a query like this: + +```graphql +query FindChannel($id: ID!) { + channel(id: $id) { + id + } +} +``` + +generates two queries, one to each subgraph, like so: + + + + ```graphql title="Query to email subgraph" + query { + _entities(...) { + ...on EmailChannel { + id +} +} +} + ``` + + ```graphql title="Query to web subgraph" + query { + _entities(...) { + ...on WebChannel { + id +} +} +} + ``` + + + +Currently, the router expands all interfaces into implementing types. + +## Use recommended features + +GraphOS and router provide many features that help monitor and improve query planning performance, both at build time and runtime. + +### Build time + +* Use [schema proposals](https://www.apollographql.com/docs/graphos/platform/schema-management/proposals) to review changes that have a large impact across entities and interfaces +* Enable [common linter settings](https://www.apollographql.com/docs/graphos/platform/schema-management/linting) +* Setup [custom checks](https://www.apollographql.com/docs/graphos/platform/schema-management/checks/custom) to do advanced and specific validations, like [limiting the size of query plans](https://github.com/apollosolutions/example-graphos-custom-check-query-planner) + +### Runtime + +In the [router configuration](https://www.apollographql.com/docs/graphos/routing/configuration) there are many settings to help monitor and improve performance impacts. Here are some features all production graphs should consider: + +* Monitor your query planner performance with the [standard instruments](https://www.apollographql.com/docs/graphos/routing/observability/telemetry/instrumentation/standard-instruments#query-planning) +* Enabling and configuring the [in-memory cache for query plans](https://www.apollographql.com/docs/graphos/routing/performance/caching/in-memory) +* Using the cache [warm up features](https://www.apollographql.com/docs/graphos/routing/performance/caching/in-memory#cache-warm-up) included out of the box and using the `dry-run` headers for operations +* Enabling and configuring [distributed caches for query plans](https://www.apollographql.com/docs/graphos/routing/performance/caching/distributed) to share across router instances +* Limiting the size of operations (and therefore their query plans) with [request limits](https://www.apollographql.com/docs/graphos/routing/security/request-limits) and the cost with [demand control](https://www.apollographql.com/docs/graphos/routing/security/demand-control) diff --git a/docs/source/routing/security/persisted-queries.mdx b/docs/source/routing/security/persisted-queries.mdx index 67479b9e35..23af65e25a 100644 --- a/docs/source/routing/security/persisted-queries.mdx +++ b/docs/source/routing/security/persisted-queries.mdx @@ -82,7 +82,7 @@ If used with the [`safelist`](#safelist) option, the router logs unregistered an -By default, the router [prewarms the query plan cache](/router/configuration/in-memory-caching#cache-warm-up) using all operations on the PQL when a new schema is loaded, but not at startup. Using the `experimental_prewarm_query_plan_cache` option, you can tell the router to prewarm the cache using the PQL on startup as well, or tell it not to prewarm the cache when reloading the schema. (This does not affect whether the router prewarms the query plan cache with recently-used operations from its in-memory cache.) Prewarming the cache means can reduce request latency by ensuring that operations are pre-planned when requests are received, but can make startup or schema reloads slower. +By default, the router [prewarms the query plan cache](/router/configuration/in-memory-caching#cache-warm-up) using all operations on the PQL when a new schema is loaded, but not at startup. Using the `experimental_prewarm_query_plan_cache` option, you can tell the router to prewarm the cache using the PQL on startup as well, or tell it not to prewarm the cache when reloading the schema. (This does not affect whether the router prewarms the query plan cache with recently-used operations from its in-memory cache.) Prewarming the cache can reduce request latency by ensuring that operations are pre-planned when requests are received, but can make startup or schema reloads slower. ```yaml title="router.yaml" persisted_queries: diff --git a/docs/source/routing/upgrade/from-router-v1.mdx b/docs/source/routing/upgrade/from-router-v1.mdx index e802f294cf..b5c8f4b47d 100644 --- a/docs/source/routing/upgrade/from-router-v1.mdx +++ b/docs/source/routing/upgrade/from-router-v1.mdx @@ -325,7 +325,6 @@ Various metrics in router 2.x have been renamed to conform to the OpenTelemetry | `apollo_router_cache_miss_time` | `apollo.router.cache.miss.time` | | `apollo_router_state_change_total` | `apollo.router.state.change.total` | | `apollo_router_span_lru_size` | `apollo.router.exporter.span.lru.size` * | -| `apollo_router_session_count_active` | `apollo.router.session.count.active` | | `apollo_router_uplink_fetch_count_total` | `apollo.router.uplink.fetch.count.total` | | `apollo_router_uplink_fetch_duration_seconds` | `apollo.router.uplink.fetch.duration.seconds`| @@ -333,6 +332,8 @@ Various metrics in router 2.x have been renamed to conform to the OpenTelemetry \* `apollo.router.exporter.span.lru.size` now also has an additional `exporter` prefix. +\* `apollo_router_session_count_active` was removed and replaced by `http.server.active_requests`. + ### Changed trace default @@ -406,6 +407,12 @@ The [context key renames](#renamed-context-keys) may impact your coprocessor log You can specify `context: deprecated` to send all context with the old names, compatible with v1.x. Context keys are translated to their v1.x names before being sent to the coprocessor, and translated back to the v2.x names after being received from the coprocessor. + + +`context: true` is an alias for `context: deprecated`. In a future major release, the `context: true` setting will be removed. + + + You can now also specify exactly which context keys you wish to send to a coprocessor by listing them under the `selective` key. This will reduce the size of the request/response and may improve performance. **Upgrade step**: Either upgrade your coprocessor to use the new context keys, or add `context: deprecated` to your coprocessor configuration. diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index 890bb6197c..a5087b4c99 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -20,10 +20,10 @@ type: application # so it matches the shape of our release process and release automation. # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix # of "v" is not included. -version: 2.2.0 +version: 2.2.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v2.2.0" +appVersion: "v2.2.1" diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index 3e654c0ebe..126ecae097 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -2,7 +2,7 @@ [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation -![Version: 2.2.0](https://img.shields.io/badge/Version-2.2.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v2.2.0](https://img.shields.io/badge/AppVersion-v2.2.0-informational?style=flat-square) +![Version: 2.2.1](https://img.shields.io/badge/Version-2.2.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v2.2.1](https://img.shields.io/badge/AppVersion-v2.2.1-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ ## Get Repo Info ```console -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 2.2.0 +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 2.2.1 ``` ## Install Chart @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 2.2.0 **Important:** only helm3 is supported ```console -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 2.2.0 --values my-values.yaml +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 2.2.1 --values my-values.yaml ``` _See [configuration](#configuration) below._ diff --git a/licenses.html b/licenses.html index a3c53a3734..273e05593a 100644 --- a/licenses.html +++ b/licenses.html @@ -44,17 +44,16 @@

Third Party Licenses

Overview of licenses:

@@ -64,15 +63,21 @@

All license text:

Apache License 2.0

Used by:

                                  Apache License
@@ -261,7 +266,6 @@ 

Used by:

  • pin-project
  • portable-atomic
  • sync_wrapper
  • -
  • sync_wrapper
  • time-core
  • time-macros
  • time
  • @@ -1512,10 +1516,8 @@

    Used by:

    Apache License 2.0

    Used by:

    - -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    -                                 Apache License
    -                           Version 2.0, January 2004
    -                        http://www.apache.org/licenses/
    -
    -   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    -
    -   1. Definitions.
    -
    -      "License" shall mean the terms and conditions for use, reproduction,
    -      and distribution as defined by Sections 1 through 9 of this document.
    -
    -      "Licensor" shall mean the copyright owner or entity authorized by
    -      the copyright owner that is granting the License.
    -
    -      "Legal Entity" shall mean the union of the acting entity and all
    -      other entities that control, are controlled by, or are under common
    -      control with that entity. For the purposes of this definition,
    -      "control" means (i) the power, direct or indirect, to cause the
    -      direction or management of such entity, whether by contract or
    -      otherwise, or (ii) ownership of fifty percent (50%) or more of the
    -      outstanding shares, or (iii) beneficial ownership of such entity.
    -
    -      "You" (or "Your") shall mean an individual or Legal Entity
    -      exercising permissions granted by this License.
    -
    -      "Source" form shall mean the preferred form for making modifications,
    -      including but not limited to software source code, documentation
    -      source, and configuration files.
    -
    -      "Object" form shall mean any form resulting from mechanical
    -      transformation or translation of a Source form, including but
    -      not limited to compiled object code, generated documentation,
    -      and conversions to other media types.
    -
    -      "Work" shall mean the work of authorship, whether in Source or
    -      Object form, made available under the License, as indicated by a
    -      copyright notice that is included in or attached to the work
    -      (an example is provided in the Appendix below).
    -
    -      "Derivative Works" shall mean any work, whether in Source or Object
    -      form, that is based on (or derived from) the Work and for which the
    -      editorial revisions, annotations, elaborations, or other modifications
    -      represent, as a whole, an original work of authorship. For the purposes
    -      of this License, Derivative Works shall not include works that remain
    -      separable from, or merely link (or bind by name) to the interfaces of,
    -      the Work and Derivative Works thereof.
    -
    -      "Contribution" shall mean any work of authorship, including
    -      the original version of the Work and any modifications or additions
    -      to that Work or Derivative Works thereof, that is intentionally
    -      submitted to Licensor for inclusion in the Work by the copyright owner
    -      or by an individual or Legal Entity authorized to submit on behalf of
    -      the copyright owner. For the purposes of this definition, "submitted"
    -      means any form of electronic, verbal, or written communication sent
    -      to the Licensor or its representatives, including but not limited to
    -      communication on electronic mailing lists, source code control systems,
    -      and issue tracking systems that are managed by, or on behalf of, the
    -      Licensor for the purpose of discussing and improving the Work, but
    -      excluding communication that is conspicuously marked or otherwise
    -      designated in writing by the copyright owner as "Not a Contribution."
    -
    -      "Contributor" shall mean Licensor and any individual or Legal Entity
    -      on behalf of whom a Contribution has been received by Licensor and
    -      subsequently incorporated within the Work.
    -
    -   2. Grant of Copyright License. Subject to the terms and conditions of
    -      this License, each Contributor hereby grants to You a perpetual,
    -      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -      copyright license to reproduce, prepare Derivative Works of,
    -      publicly display, publicly perform, sublicense, and distribute the
    -      Work and such Derivative Works in Source or Object form.
    -
    -   3. Grant of Patent License. Subject to the terms and conditions of
    -      this License, each Contributor hereby grants to You a perpetual,
    -      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -      (except as stated in this section) patent license to make, have made,
    -      use, offer to sell, sell, import, and otherwise transfer the Work,
    -      where such license applies only to those patent claims licensable
    -      by such Contributor that are necessarily infringed by their
    -      Contribution(s) alone or by combination of their Contribution(s)
    -      with the Work to which such Contribution(s) was submitted. If You
    -      institute patent litigation against any entity (including a
    -      cross-claim or counterclaim in a lawsuit) alleging that the Work
    -      or a Contribution incorporated within the Work constitutes direct
    -      or contributory patent infringement, then any patent licenses
    -      granted to You under this License for that Work shall terminate
    -      as of the date such litigation is filed.
    -
    -   4. Redistribution. You may reproduce and distribute copies of the
    -      Work or Derivative Works thereof in any medium, with or without
    -      modifications, and in Source or Object form, provided that You
    -      meet the following conditions:
    -
    -      (a) You must give any other recipients of the Work or
    -          Derivative Works a copy of this License; and
    -
    -      (b) You must cause any modified files to carry prominent notices
    -          stating that You changed the files; and
    -
    -      (c) You must retain, in the Source form of any Derivative Works
    -          that You distribute, all copyright, patent, trademark, and
    -          attribution notices from the Source form of the Work,
    -          excluding those notices that do not pertain to any part of
    -          the Derivative Works; and
    -
    -      (d) If the Work includes a "NOTICE" text file as part of its
    -          distribution, then any Derivative Works that You distribute must
    -          include a readable copy of the attribution notices contained
    -          within such NOTICE file, excluding those notices that do not
    -          pertain to any part of the Derivative Works, in at least one
    -          of the following places: within a NOTICE text file distributed
    -          as part of the Derivative Works; within the Source form or
    -          documentation, if provided along with the Derivative Works; or,
    -          within a display generated by the Derivative Works, if and
    -          wherever such third-party notices normally appear. The contents
    -          of the NOTICE file are for informational purposes only and
    -          do not modify the License. You may add Your own attribution
    -          notices within Derivative Works that You distribute, alongside
    -          or as an addendum to the NOTICE text from the Work, provided
    -          that such additional attribution notices cannot be construed
    -          as modifying the License.
    -
    -      You may add Your own copyright statement to Your modifications and
    -      may provide additional or different license terms and conditions
    -      for use, reproduction, or distribution of Your modifications, or
    -      for any such Derivative Works as a whole, provided Your use,
    -      reproduction, and distribution of the Work otherwise complies with
    -      the conditions stated in this License.
    -
    -   5. Submission of Contributions. Unless You explicitly state otherwise,
    -      any Contribution intentionally submitted for inclusion in the Work
    -      by You to the Licensor shall be under the terms and conditions of
    -      this License, without any additional terms or conditions.
    -      Notwithstanding the above, nothing herein shall supersede or modify
    -      the terms of any separate license agreement you may have executed
    -      with Licensor regarding such Contributions.
    -
    -   6. Trademarks. This License does not grant permission to use the trade
    -      names, trademarks, service marks, or product names of the Licensor,
    -      except as required for reasonable and customary use in describing the
    -      origin of the Work and reproducing the content of the NOTICE file.
    -
    -   7. Disclaimer of Warranty. Unless required by applicable law or
    -      agreed to in writing, Licensor provides the Work (and each
    -      Contributor provides its Contributions) on an "AS IS" BASIS,
    -      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    -      implied, including, without limitation, any warranties or conditions
    -      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    -      PARTICULAR PURPOSE. You are solely responsible for determining the
    -      appropriateness of using or redistributing the Work and assume any
    -      risks associated with Your exercise of permissions under this License.
    -
    -   8. Limitation of Liability. In no event and under no legal theory,
    -      whether in tort (including negligence), contract, or otherwise,
    -      unless required by applicable law (such as deliberate and grossly
    -      negligent acts) or agreed to in writing, shall any Contributor be
    -      liable to You for damages, including any direct, indirect, special,
    -      incidental, or consequential damages of any character arising as a
    -      result of this License or out of the use or inability to use the
    -      Work (including but not limited to damages for loss of goodwill,
    -      work stoppage, computer failure or malfunction, or any and all
    -      other commercial damages or losses), even if such Contributor
    -      has been advised of the possibility of such damages.
    -
    -   9. Accepting Warranty or Additional Liability. While redistributing
    -      the Work or Derivative Works thereof, You may choose to offer,
    -      and charge a fee for, acceptance of support, warranty, indemnity,
    -      or other liability obligations and/or rights consistent with this
    -      License. However, in accepting such obligations, You may act only
    -      on Your own behalf and on Your sole responsibility, not on behalf
    -      of any other Contributor, and only if You agree to indemnify,
    -      defend, and hold each Contributor harmless for any liability
    -      incurred by, or claims asserted against, such Contributor by reason
    -      of your accepting any such warranty or additional liability.
    -
    -   END OF TERMS AND CONDITIONS
    -
    -   APPENDIX: How to apply the Apache License to your work.
    -
    -      To apply the Apache License to your work, attach the following
    -      boilerplate notice, with the fields enclosed by brackets "[]"
    -      replaced with your own identifying information. (Don't include
    -      the brackets!)  The text should be enclosed in the appropriate
    -      comment syntax for the file format. We also recommend that a
    -      file or class name and description of purpose be included on the
    -      same "printed page" as the copyright notice for easier
    -      identification within third-party archives.
    -
    -   Copyright [yyyy] [name of copyright owner]
    -
    -   Licensed under the Apache License, Version 2.0 (the "License");
    -   you may not use this file except in compliance with the License.
    -   You may obtain a copy of the License at
    -
    -       http://www.apache.org/licenses/LICENSE-2.0
    -
    -   Unless required by applicable law or agreed to in writing, software
    -   distributed under the License is distributed on an "AS IS" BASIS,
    -   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -   See the License for the specific language governing permissions and
    -   limitations under the License.
     
  • @@ -4764,8 +4556,190 @@

    Used by:

    Apache License 2.0

    Used by:

    +
                                     Apache License
    +                           Version 2.0, January 2004
    +                        http://www.apache.org/licenses/
    +
    +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +   1. Definitions.
    +
    +      "License" shall mean the terms and conditions for use, reproduction,
    +      and distribution as defined by Sections 1 through 9 of this document.
    +
    +      "Licensor" shall mean the copyright owner or entity authorized by
    +      the copyright owner that is granting the License.
    +
    +      "Legal Entity" shall mean the union of the acting entity and all
    +      other entities that control, are controlled by, or are under common
    +      control with that entity. For the purposes of this definition,
    +      "control" means (i) the power, direct or indirect, to cause the
    +      direction or management of such entity, whether by contract or
    +      otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +      outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +      "You" (or "Your") shall mean an individual or Legal Entity
    +      exercising permissions granted by this License.
    +
    +      "Source" form shall mean the preferred form for making modifications,
    +      including but not limited to software source code, documentation
    +      source, and configuration files.
    +
    +      "Object" form shall mean any form resulting from mechanical
    +      transformation or translation of a Source form, including but
    +      not limited to compiled object code, generated documentation,
    +      and conversions to other media types.
    +
    +      "Work" shall mean the work of authorship, whether in Source or
    +      Object form, made available under the License, as indicated by a
    +      copyright notice that is included in or attached to the work
    +      (an example is provided in the Appendix below).
    +
    +      "Derivative Works" shall mean any work, whether in Source or Object
    +      form, that is based on (or derived from) the Work and for which the
    +      editorial revisions, annotations, elaborations, or other modifications
    +      represent, as a whole, an original work of authorship. For the purposes
    +      of this License, Derivative Works shall not include works that remain
    +      separable from, or merely link (or bind by name) to the interfaces of,
    +      the Work and Derivative Works thereof.
    +
    +      "Contribution" shall mean any work of authorship, including
    +      the original version of the Work and any modifications or additions
    +      to that Work or Derivative Works thereof, that is intentionally
    +      submitted to Licensor for inclusion in the Work by the copyright owner
    +      or by an individual or Legal Entity authorized to submit on behalf of
    +      the copyright owner. For the purposes of this definition, "submitted"
    +      means any form of electronic, verbal, or written communication sent
    +      to the Licensor or its representatives, including but not limited to
    +      communication on electronic mailing lists, source code control systems,
    +      and issue tracking systems that are managed by, or on behalf of, the
    +      Licensor for the purpose of discussing and improving the Work, but
    +      excluding communication that is conspicuously marked or otherwise
    +      designated in writing by the copyright owner as "Not a Contribution."
    +
    +      "Contributor" shall mean Licensor and any individual or Legal Entity
    +      on behalf of whom a Contribution has been received by Licensor and
    +      subsequently incorporated within the Work.
    +
    +   2. Grant of Copyright License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      copyright license to reproduce, prepare Derivative Works of,
    +      publicly display, publicly perform, sublicense, and distribute the
    +      Work and such Derivative Works in Source or Object form.
    +
    +   3. Grant of Patent License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      (except as stated in this section) patent license to make, have made,
    +      use, offer to sell, sell, import, and otherwise transfer the Work,
    +      where such license applies only to those patent claims licensable
    +      by such Contributor that are necessarily infringed by their
    +      Contribution(s) alone or by combination of their Contribution(s)
    +      with the Work to which such Contribution(s) was submitted. If You
    +      institute patent litigation against any entity (including a
    +      cross-claim or counterclaim in a lawsuit) alleging that the Work
    +      or a Contribution incorporated within the Work constitutes direct
    +      or contributory patent infringement, then any patent licenses
    +      granted to You under this License for that Work shall terminate
    +      as of the date such litigation is filed.
    +
    +   4. Redistribution. You may reproduce and distribute copies of the
    +      Work or Derivative Works thereof in any medium, with or without
    +      modifications, and in Source or Object form, provided that You
    +      meet the following conditions:
    +
    +      (a) You must give any other recipients of the Work or
    +          Derivative Works a copy of this License; and
    +
    +      (b) You must cause any modified files to carry prominent notices
    +          stating that You changed the files; and
    +
    +      (c) You must retain, in the Source form of any Derivative Works
    +          that You distribute, all copyright, patent, trademark, and
    +          attribution notices from the Source form of the Work,
    +          excluding those notices that do not pertain to any part of
    +          the Derivative Works; and
    +
    +      (d) If the Work includes a "NOTICE" text file as part of its
    +          distribution, then any Derivative Works that You distribute must
    +          include a readable copy of the attribution notices contained
    +          within such NOTICE file, excluding those notices that do not
    +          pertain to any part of the Derivative Works, in at least one
    +          of the following places: within a NOTICE text file distributed
    +          as part of the Derivative Works; within the Source form or
    +          documentation, if provided along with the Derivative Works; or,
    +          within a display generated by the Derivative Works, if and
    +          wherever such third-party notices normally appear. The contents
    +          of the NOTICE file are for informational purposes only and
    +          do not modify the License. You may add Your own attribution
    +          notices within Derivative Works that You distribute, alongside
    +          or as an addendum to the NOTICE text from the Work, provided
    +          that such additional attribution notices cannot be construed
    +          as modifying the License.
    +
    +      You may add Your own copyright statement to Your modifications and
    +      may provide additional or different license terms and conditions
    +      for use, reproduction, or distribution of Your modifications, or
    +      for any such Derivative Works as a whole, provided Your use,
    +      reproduction, and distribution of the Work otherwise complies with
    +      the conditions stated in this License.
    +
    +   5. Submission of Contributions. Unless You explicitly state otherwise,
    +      any Contribution intentionally submitted for inclusion in the Work
    +      by You to the Licensor shall be under the terms and conditions of
    +      this License, without any additional terms or conditions.
    +      Notwithstanding the above, nothing herein shall supersede or modify
    +      the terms of any separate license agreement you may have executed
    +      with Licensor regarding such Contributions.
    +
    +   6. Trademarks. This License does not grant permission to use the trade
    +      names, trademarks, service marks, or product names of the Licensor,
    +      except as required for reasonable and customary use in describing the
    +      origin of the Work and reproducing the content of the NOTICE file.
    +
    +   7. Disclaimer of Warranty. Unless required by applicable law or
    +      agreed to in writing, Licensor provides the Work (and each
    +      Contributor provides its Contributions) on an "AS IS" BASIS,
    +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +      implied, including, without limitation, any warranties or conditions
    +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +      PARTICULAR PURPOSE. You are solely responsible for determining the
    +      appropriateness of using or redistributing the Work and assume any
    +      risks associated with Your exercise of permissions under this License.
    +
    +   8. Limitation of Liability. In no event and under no legal theory,
    +      whether in tort (including negligence), contract, or otherwise,
    +      unless required by applicable law (such as deliberate and grossly
    +      negligent acts) or agreed to in writing, shall any Contributor be
    +      liable to You for damages, including any direct, indirect, special,
    +      incidental, or consequential damages of any character arising as a
    +      result of this License or out of the use or inability to use the
    +      Work (including but not limited to damages for loss of goodwill,
    +      work stoppage, computer failure or malfunction, or any and all
    +      other commercial damages or losses), even if such Contributor
    +      has been advised of the possibility of such damages.
    +
    +   9. Accepting Warranty or Additional Liability. While redistributing
    +      the Work or Derivative Works thereof, You may choose to offer,
    +      and charge a fee for, acceptance of support, warranty, indemnity,
    +      or other liability obligations and/or rights consistent with this
    +      License. However, in accepting such obligations, You may act only
    +      on Your own behalf and on Your sole responsibility, not on behalf
    +      of any other Contributor, and only if You agree to indemnify,
    +      defend, and hold each Contributor harmless for any liability
    +      incurred by, or claims asserted against, such Contributor by reason
    +      of your accepting any such warranty or additional liability.
    +
    +   END OF TERMS AND CONDITIONS
    +
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    +
                                    Apache License
    @@ -5161,11 +5135,13 @@ 

    Used by:

    • anyhow
    • async-trait
    • +
    • basic-toml
    • dyn-clone
    • erased-serde
    • ghost
    • itoa
    • libc
    • +
    • linkme
    • paste
    • prettyplease
    • proc-macro2
    • @@ -5184,6 +5160,7 @@

      Used by:

    • thiserror-impl
    • thiserror-impl
    • thiserror
    • +
    • thiserror
    • unicode-ident
    • utf-8
    • utf8parse
    • @@ -5799,7 +5776,6 @@

      Apache License 2.0

      Used by:

                                    Apache License
                               Version 2.0, January 2004
      @@ -6638,7 +6614,6 @@ 

      Apache License 2.0

      Used by:

                                    Apache License
                               Version 2.0, January 2004
      @@ -8122,10 +8097,8 @@ 

      Used by:

    • bumpalo
    • bytes-utils
    • cc
    • -
    • cexpr
    • cfg-if
    • ci_info
    • -
    • cmake
    • concurrent-queue
    • const-random-macro
    • const-random
    • @@ -8168,7 +8141,6 @@

      Used by:

    • httparse
    • humantime-serde
    • hyper-rustls
    • -
    • hyper-rustls
    • hyper-timeout
    • idna
    • idna_adapter
    • @@ -8182,10 +8154,8 @@

      Used by:

    • jobserver
    • js-sys
    • lazy_static
    • -
    • lazycell
    • libfuzzer-sys
    • linux-raw-sys
    • -
    • linux-raw-sys
    • lock_api
    • log
    • maplit
    • @@ -8234,16 +8204,11 @@

      Used by:

    • rustc-hash
    • rustc_version
    • rustix
    • -
    • rustix
    • -
    • rustls-native-certs
    • rustls-native-certs
    • rustls-native-certs
    • rustls-pemfile
    • -
    • rustls-pemfile
    • -
    • rustls
    • rustls
    • scopeguard
    • -
    • sct
    • security-framework-sys
    • security-framework
    • security-framework
    • @@ -9732,7 +9697,6 @@

      Apache License 2.0

      Used by:

                                    Apache License
      @@ -11431,13 +11395,6 @@ 

      Apache License 2.0

      Used by:

      + +
    • +

      Apache License 2.0

      +

      Used by:

      + +
      Copyright [2022] [Bryn Cooke]
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +    http://www.apache.org/licenses/LICENSE-2.0
      +
       Unless required by applicable law or agreed to in writing, software
       distributed under the License is distributed on an "AS IS" BASIS,
       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      @@ -11687,43 +11660,6 @@ 

      Used by:

      contributors may be used to endorse or promote products derived from this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -
      -
    • -
    • -

      BSD 3-Clause "New" or "Revised" License

      -

      Used by:

      - -
      BSD 3-Clause License
      -
      -Copyright (c) 2013, Jyun-Yan You
      -All rights reserved.
      -
      -Redistribution and use in source and binary forms, with or without
      -modification, are permitted provided that the following conditions are met:
      -
      -* Redistributions of source code must retain the above copyright notice, this
      -  list of conditions and the following disclaimer.
      -
      -* Redistributions in binary form must reproduce the above copyright notice,
      -  this list of conditions and the following disclaimer in the documentation
      -  and/or other materials provided with the distribution.
      -
      -* Neither the name of the copyright holder nor the names of its
      -  contributors may be used to endorse or promote products derived from
      -  this software without specific prior written permission.
      -
       THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
       AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
       IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
      @@ -12479,31 +12415,8 @@ 

      Used by:

      ISC License

      Used by:

      -
      Copyright © 2015, Simonas Kazlauskas
      -
      -Permission to use, copy, modify, and/or distribute this software for any purpose with or without
      -fee is hereby granted, provided that the above copyright notice and this permission notice appear
      -in all copies.
      -
      -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
      -SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
      -AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
      -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
      -NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
      -THIS SOFTWARE.
      -
      -
    • -
    • -

      ISC License

      -

      Used by:

      -
      ISC License:
       
      @@ -12761,33 +12674,6 @@ 

      Used by:

      LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -
      -
    • -
    • -

      MIT License

      -

      Used by:

      - -
      Copyright (c) 2015 fangyuanziti
      -
      -Permission is hereby granted, free of charge, to any person obtaining a copy
      -of this software and associated documentation files (the "Software"), to deal
      -in the Software without restriction, including without limitation the rights
      -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
      -copies of the Software, and to permit persons to whom the Software is
      -furnished to do so, subject to the following conditions:
      -
      -The above copyright notice and this permission notice shall be included in
      -all copies or substantial portions of the Software.
      -
      -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
      -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
      -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
      -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
      -THE SOFTWARE.
       
    • @@ -13803,35 +13689,6 @@

      Used by:

      The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -
    • - -
    • -

      MIT License

      -

      Used by:

      - -
      MIT License
      -
      -Copyright (c) 2017 Denis Kurilenko
      -
      -Permission is hereby granted, free of charge, to any person obtaining a copy
      -of this software and associated documentation files (the "Software"), to deal
      -in the Software without restriction, including without limitation the rights
      -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
      -copies of the Software, and to permit persons to whom the Software is
      -furnished to do so, subject to the following conditions:
      -
      -The above copyright notice and this permission notice shall be included in all
      -copies or substantial portions of the Software.
      -
       THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
       IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
       FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
      @@ -16454,35 +16311,6 @@ 

      Used by:

      ***** END LICENSE BLOCK ***** @(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $ -
      -
    • -
    • -

      Mozilla Public License 2.0

      -

      Used by:

      - -
      This packge contains a modified version of ca-bundle.crt:
      -
      -ca-bundle.crt -- Bundle of CA Root Certificates
      -
      -Certificate data from Mozilla as of: Thu Nov  3 19:04:19 2011#
      -This is a bundle of X.509 certificates of public Certificate Authorities
      -(CA). These were automatically extracted from Mozilla's root certificates
      -file (certdata.txt).  This file can be found in the mozilla source tree:
      -http://mxr.mozilla.org/mozilla/source/security/nss/lib/ckfw/builtins/certdata.txt?raw=1#
      -It contains the certificates in PEM format and therefore
      -can be directly used with curl / libcurl / php_curl, or with
      -an Apache+mod_ssl webserver for SSL client authentication.
      -Just configure this file as the SSLCACertificateFile.#
      -
      -***** BEGIN LICENSE BLOCK *****
      -This Source Code Form is subject to the terms of the Mozilla Public License,
      -v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain
      -one at http://mozilla.org/MPL/2.0/.
      -
      -***** END LICENSE BLOCK *****
      -@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $
       
    • @@ -16506,62 +16334,6 @@

      Used by:

      * Neither the names of <Name of Development Group, Name of Institution>, nor the names of its contributors may be used to endorse or promote products derived from this Software without specific prior written permission. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. -
    • - -
    • -

      OpenSSL License

      -

      Used by:

      - -
      OpenSSL License
      -
      -Copyright (c) 1998-2008 The OpenSSL Project. All rights reserved.
      -
      -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
      -
      -1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
      -
      -2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
      -
      -3. All advertising materials mentioning features or use of this software must display the following acknowledgment: "This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
      -
      -4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to endorse or promote products derived from this software without prior written permission. For written permission, please contact openssl-core@openssl.org.
      -
      -5. Products derived from this software may not be called "OpenSSL" nor may "OpenSSL" appear in their names without prior written permission of the OpenSSL Project.
      -
      -6. Redistributions of any form whatsoever must retain the following acknowledgment: "This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/)"
      -
      -THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      -
      -This product includes cryptographic software written by Eric Young (eay@cryptsoft.com). This product includes software written by Tim Hudson (tjh@cryptsoft.com).
      -
      -
      -Original SSLeay License
      -
      -Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) All rights reserved.
      -
      -This package is an SSL implementation written by Eric Young (eay@cryptsoft.com). The implementation was written so as to conform with Netscapes SSL.
      -
      -This library is free for commercial and non-commercial use as long as the following conditions are aheared to. The following conditions apply to all code found in this distribution, be it the RC4, RSA, lhash, DES, etc., code; not just the SSL code. The SSL documentation included with this distribution is covered by the same copyright terms except that the holder is Tim Hudson (tjh@cryptsoft.com).
      -
      -Copyright remains Eric Young's, and as such any Copyright notices in the code are not to be removed. If this package is used in a product, Eric Young should be given attribution as the author of the parts of the library used. This can be in the form of a textual message at program startup or in documentation (online or textual) provided with the package.
      -
      -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
      -
      -1. Redistributions of source code must retain the copyright notice, this list of conditions and the following disclaimer.
      -
      -2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
      -
      -3. All advertising materials mentioning features or use of this software must display the following acknowledgement:
      -"This product includes cryptographic software written by Eric Young (eay@cryptsoft.com)"
      -The word 'cryptographic' can be left out if the rouines from the library being used are not cryptographic related :-).
      -
      -4. If you include any Windows specific code (or a derivative thereof) from the apps directory (application code) you must include an acknowledgement: "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
      -
      -THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      -
      -The licence and distribution terms for any publically available version or derivative of this code cannot be changed. i.e. this code cannot simply be copied and put under another distribution licence [including the GNU Public Licence.]
       
    • diff --git a/scripts/install.sh b/scripts/install.sh index 3bd56acf07..44d7da67c1 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="${APOLLO_ROUTER_BINARY_DOWNLOAD_PREFIX:="https://github. # Router version defined in apollo-router's Cargo.toml # Note: Change this line manually during the release steps. -PACKAGE_VERSION="v2.2.0" +PACKAGE_VERSION="v2.2.1" download_binary() { downloader --check diff --git a/xtask/Cargo.lock b/xtask/Cargo.lock index d6cf8961ae..b728a94287 100644 --- a/xtask/Cargo.lock +++ b/xtask/Cargo.lock @@ -660,9 +660,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.20" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", @@ -670,19 +670,13 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 1.9.3", + "indexmap", "slab", "tokio", "tokio-util", "tracing", ] -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - [[package]] name = "hashbrown" version = "0.15.2" @@ -806,16 +800,6 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown 0.12.3", -] - [[package]] name = "indexmap" version = "2.9.0" @@ -823,7 +807,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" dependencies = [ "equivalent", - "hashbrown 0.15.2", + "hashbrown", ] [[package]] @@ -2318,6 +2302,6 @@ dependencies = [ "arbitrary", "crc32fast", "crossbeam-utils", - "indexmap 2.9.0", + "indexmap", "memchr", ] diff --git a/xtask/src/commands/lint.rs b/xtask/src/commands/lint.rs index 8fbcff0981..59af1830fd 100644 --- a/xtask/src/commands/lint.rs +++ b/xtask/src/commands/lint.rs @@ -52,6 +52,7 @@ impl Lint { "clippy", "--all", "--all-targets", + "--all-features", "--no-deps", "--", "-D",