diff --git a/.circleci/config.yml b/.circleci/config.yml index 6d0b8def12..87acd53d8c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -51,7 +51,7 @@ parameters: cache_version: type: string # increment this to invalidate all the caches - default: v11.{{ arch}}-{{ checksum "rust-toolchain.toml" }} + default: v12.{{ arch}}-{{ checksum "rust-toolchain.toml" }} jaeger_version: type: string # update this as new versions of jaeger become available @@ -228,9 +228,19 @@ commands: - restore_cache: name: Restore .cargo keys: - - rust-cargo-<< pipeline.parameters.cache_version >>-build-<< parameters.os >>-{{ checksum "Cargo.lock" }} - - rust-cargo-<< pipeline.parameters.cache_version >>-build-<< parameters.os >> - - run: cargo xtask lint + - rust-cargo-<< pipeline.parameters.cache_version >>-xtask-<< parameters.os >>-{{ checksum "Cargo.lock" }}-{{ checksum "xtask/Cargo.lock" }} + - rust-cargo-<< pipeline.parameters.cache_version >>-xtask-<< parameters.os >> + - run: + name: Install xtask + command: | + set -e -o pipefail + xtask help || { cd xtask && cargo install --path . && cd ..; } + - run: xtask lint + - save_cache: + name: Save .cargo + key: rust-cargo-<< pipeline.parameters.cache_version >>-xtask-<< parameters.os >>-{{ checksum "Cargo.lock" }}-{{ checksum "xtask/Cargo.lock" }} + paths: + - ~/.cargo xtask_check_compliance: parameters: @@ -240,8 +250,8 @@ commands: - restore_cache: name: Restore .cargo keys: - - rust-cargo-<< pipeline.parameters.cache_version >>-build-<< parameters.os >>-{{ checksum "Cargo.lock" }} - - rust-cargo-<< pipeline.parameters.cache_version >>-build-<< parameters.os >> + - rust-cargo-<< pipeline.parameters.cache_version >>-xtask-<< parameters.os >>-{{ checksum "Cargo.lock" }}-{{ checksum "xtask/Cargo.lock" }} + - rust-cargo-<< pipeline.parameters.cache_version >>-xtask-<< parameters.os >> - install_extra_tools: os: << parameters.os >> # cargo-deny fetches a rustsec advisory DB, which has to happen on github.com over https @@ -273,6 +283,10 @@ commands: cargo_test_args: type: string default: "--" + # additional cache key to have a cache for test_updated + cache_key: + type: string + default: "" steps: - run: name: Start jaeger @@ -281,8 +295,8 @@ commands: - restore_cache: name: Restore .cargo keys: - - rust-cargo-<< pipeline.parameters.cache_version >>-test-<< parameters.os >>-{{ checksum "Cargo.lock" }} - - rust-cargo-<< pipeline.parameters.cache_version >>-test-<< parameters.os >> + - rust-cargo-<< pipeline.parameters.cache_version >>-test-<< parameters.os >><>-{{ checksum "Cargo.lock" }} + - rust-cargo-<< pipeline.parameters.cache_version >>-test-<< parameters.os >><> # As of rustc 1.61.0, must limit the number of linux jobs or we run out of memory (large executor/8GB) - when: @@ -386,10 +400,8 @@ jobs: - checkout - linux_amd_install_baseline - run: - name: Use latest Rust Nightly and update all Rust dependencies + name: Update all Rust dependencies command: | - sed -i '/channel/d' rust-toolchain.toml - echo 'channel = "nightly"' >> rust-toolchain.toml rm Cargo.lock cargo fetch - test_workspace: @@ -397,6 +409,7 @@ jobs: # schema_generation test skipped because schemars changed its representation of enums: # https://github.com/GREsau/schemars/blob/master/CHANGELOG.md#086---2021-09-26 cargo_test_args: --no-fail-fast -- --skip schema_generation + cache_key: .test_updated build_release: parameters: diff --git a/CHANGELOG.md b/CHANGELOG.md index fa4cb307ee..38c7b61088 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,225 @@ All notable changes to Router will be documented in this file. This project adheres to [Semantic Versioning v2.0.0](https://semver.org/spec/v2.0.0.html). +# [1.7.0] - 2022-12-22 + +## ๐Ÿš€ Features + +### Newly scaffolded projects now include a `Dockerfile` ([Issue #2295](https://github.com/apollographql/router/issues/2295)) + +Custom Router binary projects created using our [scaffolding tooling](https://www.apollographql.com/docs/router/customizations/custom-binary/) will now have a `Dockerfile` emitted to facilitate building custom Docker containers. + +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/2307 + +### Apollo Uplink communication timeout is configurable ([PR #2271](https://github.com/apollographql/router/pull/2271)) + +The amount of time which can elapse before timing out when communicating with Apollo Uplink is now configurable via the `APOLLO_UPLINK_TIMEOUT` environment variable and the `--apollo-uplink-timeout` CLI flag, in a similar fashion to how the interval can be configured. It still defaults to 30 seconds. + +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/2271 + +### Query plan cache is pre-warmed using existing operations when the supergraph changes ([Issue #2302](https://github.com/apollographql/router/issues/2302), [Issue #2308](https://github.com/apollographql/router/issues/2308)) + +A new `warmed_up_queries` configuration option has been introduced to pre-warm the query plan cache when the supergraph changes. + +Under normal operation, query plans are cached to avoid the recomputation cost. However, when the supergraph changes, previously-planned queries must be re-planned to account for implementation changes in the supergraph, even though the query itself may not have changed. Under load, this re-planning can cause performance variations due to the extra computation work. To reduce the impact, it is now possible to pre-warm the query plan cache for the incoming supergraph, prior to changing over to the new supergraph. Pre-warming slightly delays the roll-over to the incoming supergraph, but allows the most-requested operations to not be impacted by the additional computation work. + +To enable pre-warming, the following configuration can be introduced which sets `warmed_up_queries`: + +```yaml +supergraph: + query_planning: + # Pre-plan the 100 most used operations when the supergraph changes. (Default is "0", disabled.) + warmed_up_queries: 100 + experimental_cache: + in_memory: + # Sets the limit of entries in the query plan cache + limit: 512 +``` + +Query planning was also updated to finish executing and setting up the cache, even if the response couldn't be returned to the client which is important to avoid throwing away computationally-expensive work. + +By [@Geal](https://github.com/geal) in https://github.com/apollographql/router/pull/2309 + +## ๐Ÿ› Fixes + +### Propagate errors across inline fragments ([PR #2304](https://github.com/apollographql/router/pull/2304)) + +GraphQL errors are now correctly propagated across inline fragments. + +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/2304 + +### Only rebuild `protos` if `reports.proto` source changes + +Apollo Studio accepts traces and metrics from Apollo Router via the Protobuf specification which lives in the `reports.proto` file in the repository. With this contribution, we only re-build from the `reports.proto` file when the file has actually changed, as opposed to doing it on every build which was occurring previously. This change saves build time for developers. + +By [@scottdouglas1989](https://github.com/scottdouglas1989) in https://github.com/apollographql/router/pull/2283 + +### Return an error on duplicate keys in configuration ([Issue #1428](https://github.com/apollographql/router/issues/1428)) + +Repeat usage of the same keys in Router YAML can be hard to notice but indicate a misconfiguration which can cause unexpected behavior since only one of the values can be in effect. With this improvement, the following YAML configuration will raise an error at Router startup to alert the user of the misconfiguration: + +```yaml +telemetry: + tracing: + propagation: + jaeger: true + tracing: + propagation: + jaeger: false +``` + +In this particular example, the error produced would be: + +``` +ERROR duplicated keys detected in your yaml configuration: 'telemetry.tracing' +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/2270 + +### Return requested `__typename` in initial chunk of a deferred response ([Issue #1922](https://github.com/apollographql/router/issues/1922)) + +The special-case `__typename` field is no longer being treated incorrectly when requested at the root level on an operation which used `@defer`. For example, the following query: + +```graphql +{ + __typename + ...deferedFragment @defer +} + +fragment deferedFragment on Query { + slow +} +``` + +The Router now exhibits the correct behavior for this query with `__typename` being returned as soon as possible in the initial chunk, as follows: + +```json +{"data":{"__typename": "Query"},"hasNext":true} +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/2274 + +### Log retriable Apollo Uplink failures at the `debug` level ([Issue #2004](https://github.com/apollographql/router/issues/2004)) + +The log levels for messages pertaining to Apollo Uplink schema fetch failures are now emitted at `debug` level to reduce noise since such failures do not indicate an actual error since they can be and are retried immediately. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/2215 + +### Traces won't cause missing field-stats ([Issue #2267](https://github.com/apollographql/router/issues/2267)) + +Metrics are now correctly measured comprehensively and traces will obey the trace sampling configuration. Previously, if a request was sampled out of tracing it would not always contribute to metrics correctly. This was particularly problematic for users which had configured high sampling rates for their traces. + +By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2277 and https://github.com/apollographql/router/pull/2286 + +### Replace default `notify` watcher mechanism with `PollWatcher` ([Issue #2245](https://github.com/apollographql/router/issues/2245)) + +We have replaced the default mechanism used by our underlying file-system notification library, [`notify`](https://crates.io/crates/notify), to use [`PollWatcher`](https://docs.rs/notify/4.0.17/notify/poll/struct.PollWatcher.html). This more aggressive change has been taken on account of continued reports of failed hot-reloading and follows up our previous replacement of [`hotwatch`](https://crates.io/crates/hotwatch). We don't have very demanding file watching requirements, so while `PollWatcher` offers less sophisticated functionality and _slightly_ slower reactivity, it is at least consistent on all platforms and should provide the best developer experience. + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2276 + +### Preserve subgraph error's `path` property when redacting subgraph errors ([Issue #1818](https://github.com/apollographql/router/issues/1818)) + +The `path` property in errors is now preserved. Previously, error redaction was removing the error's `path` property, which made debugging difficult but also made it impossible to correctly match errors from deferred responses to the appropriate fields in the requested operation. Since the response shape for the primary and deferred responses are defined from the client-facing "API schema", rather than the supergraph, this change will not result in leaking internal supergraph implementation details to clients and the result will be consistent, even if the subgraph which provides a particular field changes over time. + +By [@Geal](https://github.com/geal) in https://github.com/apollographql/router/pull/2273 + +### Use correct URL decoding for `variables` in HTTP `GET` requests ([Issue #2248](https://github.com/apollographql/router/issues/2248)) + +The correct URL decoding will now be applied when making a `GET` request that passes in the `variables` query string parameter. Previously, _all_ '+' characters were being replaced with spaces which broke cases where the `+` symbol was not merely an encoding symbol (e.g., ISO8601 date time values with timezone information). + +By [@neominik](https://github.com/neominik) in https://github.com/apollographql/router/pull/2249 + +## ๐Ÿ›  Maintenance + +### Return additional details to client for invalid GraphQL requests ([Issue #2301](https://github.com/apollographql/router/issues/2301)) + +Additional context will be returned to clients in the error indicating the source of the error when an invalid GraphQL request is made. For example, passing a string instead of an object for the `variables` property will now inform the client of the mistake, providing a better developer experience: + +```json +{ + "errors": [ + { + "message": "Invalid GraphQL request", + "extensions": { + "details": "failed to deserialize the request body into JSON: invalid type: string \"null\", expected a map at line 1 column 100", + "code": "INVALID_GRAPHQL_REQUEST" + } + } + ] +} +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/2306 + +### OpenTelemetry spans to subgraphs now include the request URL ([Issue #2280](https://github.com/apollographql/router/issues/2280)) + +A new `http.url` attribute has been attached to `subgraph_request` OpenTelemetry trace spans which specifies the URL which the particular request was made to. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/2292 + +### Errors returned to clients are now more consistently formed ([Issue #2101](https://github.com/apollographql/router/issues/2101)) + +We now return errors in a more consistent shape to those which were returned by Apollo Gateway and Apollo Server, and seen in the [documentation](https://www.apollographql.com/docs/apollo-server/data/errors/). In particular, when available, a stable `code` field will be included in the error's `extensions`. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/2178 + +## ๐Ÿงช Experimental + +> **Note** +> +> These features are subject to change slightly (usually, in terms of naming or interfaces) before graduating to general availability. +> +> [Read more about how we treat experimental features](https://www.apollographql.com/docs/resources/product-launch-stages/#experimental-features). + +### Introduce a `router_service` layer ([Issue #1496](https://github.com/apollographql/router/issues/1496)) + +A `router_service` layer is now part of our service stack and allows plugin developers to process raw HTTP requests and responses from clients prior to those requests reaching the GraphQL processing within the `supergraph_service` layer. This will become a stable part of our API as we receive feedback from its early adopters. Please open a discussion with any feedback you might have! + +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/2170 + +### Request pipeline customization via HTTP ([Issue #1916](https://github.com/apollographql/router/issues/1916)) + +We now offer the ability to configure some aspects of the Router via the response to an HTTP `POST` request to an external endpoint. Initially, we are only offering this option to customize the newly introduced `router_service` (above, in these release notes), but our intention is to introduce customization of [existing service layers](https://www.apollographql.com/docs/router/customizations/overview/#how-customizations-work) as well (e.g., `supergraph_service, `subgraph_service`, etc.). Conceptually, this addition allows similar customizations that are possible with Rhai or Rust plugin by sending the operation's context as of a particular phase of the request pipeline "over the wire" as of a particular to an external HTTP service which has the ability to process its properties and return a (potentially) modified response to the Router. This will become a stable part of our API as we receive feedback from its early adopters. Please open a discussion with any feedback you might have! + +When this experimental option is enabled, contextual data will be transmitted as a JSON payload to an HTTP endpoint as a `POST` request. The response to such a request will be processed by the Router and any changes made by the external service will effect the remaining layers in the request pipeline. This allows external services to customize the Router behavior, but requires intentionally blocking Router's normal request pipeline. Therefore, any latency of a configured external service will have a direct impact on the performance of the Router and external services should be as performant as possible. + +To experiement with this behavior, consider adopting a configuration similar to the following which communicates with a service running on `http://127.0.0.1:8081` for the `router` service layer: + +```yaml +plugins: + experimental.external: + # A URL which will be called for each request for any configured stage. + url: http://127.0.0.1:8081 + + # A human-readable interval specifying the maximum allowed time. (Defaults to "1s", or one second) + timeout: 2s + + # A "stage" represents a phase of the request pipeline in which the external service will be invoked. + # They sit request pipeline as our Service Layers for Rust/Rhai, seen in our docs: + # https://www.apollographql.com/docs/router/customizations/overview/#how-customizations-work + stages: + + # Currently, the only supported value is "router". + router: + + # Define which properties of the request should be transmitted in the payload. + # Choosing the least amount of data will reduce the size of the payload. + # By default, all values are false and, when false, their presence in this map is optional. + request: + headers: true + context: true + body: true + sdl: true + + # Similar to "request", but which properties of the response should be sent. + # Again, all values are false by default and only must be specified if they are enabled. + response: + headers: true + context: true +``` + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2229 + # [1.6.0] - 2022-12-13 ## โ— BREAKING โ— diff --git a/Cargo.lock b/Cargo.lock index ae122caa4e..f2572fd016 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -69,9 +69,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "0.7.19" +version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" dependencies = [ "memchr", ] @@ -91,6 +91,12 @@ dependencies = [ "alloc-no-stdlib", ] +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + [[package]] name = "ansi_term" version = "0.12.1" @@ -102,17 +108,17 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.66" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" +checksum = "2cb2f989d18dd141ab8ae82f64d1a8cdd37e0840f73a406896cf5e99502fab61" [[package]] name = "apollo-compiler" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4e8b67b10b590ac58e555af24b7cc7863deacb4c7bc6ae2efd60b0256a2b654" +checksum = "f1466d86c8bec27cd5c28c80538a298729cf57ab3127389741336ac586634ec8" dependencies = [ - "apollo-parser 0.4.0", + "apollo-parser 0.4.1", "miette 4.7.1", "ordered-float 2.10.0", "rowan", @@ -127,7 +133,7 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b17d38f06e92256e9b0b271b878e20309822a587b2acfa234a60d36d92b6b43" dependencies = [ - "apollo-parser 0.3.1", + "apollo-parser 0.3.2", "thiserror", ] @@ -137,24 +143,24 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "555c85cfb5672ee5d5925db34c15b8bc53e97bf5c67eb0a75d54ee9fe51ec8f0" dependencies = [ - "apollo-parser 0.4.0", + "apollo-parser 0.4.1", "thiserror", ] [[package]] name = "apollo-parser" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a8f6cc3fa1313e045538ed2ce72ba916d52b501cd81e636a0bd5cdc703a0c73" +checksum = "d640c8fb7f9ab98a78a8086bb413d8ecf3ee44849976e1636e27265f09e9e544" dependencies = [ "rowan", ] [[package]] name = "apollo-parser" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bceda0395fd9cf784b4c6bb4adbaee52706ed7cbe7d2403e77e62cdc760145d2" +checksum = "8efddeb45af3f03212f39365abbeb0572fabc87a619efaedac38d40a9d604a9e" dependencies = [ "rowan", "thiserror", @@ -162,13 +168,13 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.6.0" +version = "1.7.0" dependencies = [ "access-json", "ansi_term", "anyhow", "apollo-encoder 0.4.0", - "apollo-parser 0.4.0", + "apollo-parser 0.4.1", "arc-swap", "askama", "async-compression", @@ -176,10 +182,10 @@ dependencies = [ "atty", "axum", "backtrace", - "base64 0.13.1", + "base64 0.20.0", "buildstructor 0.5.1", "bytes", - "clap 3.2.23", + "clap 4.0.29", "console-subscriber", "dashmap", "derivative", @@ -191,6 +197,7 @@ dependencies = [ "flate2", "futures", "graphql_client", + "heck 0.4.0", "hex", "http", "http-body", @@ -214,6 +221,7 @@ dependencies = [ "miette 5.5.0", "mime", "mockall", + "multer", "multimap", "notify", "once_cell", @@ -248,6 +256,7 @@ dependencies = [ "sha2", "shellexpand", "static_assertions", + "strum_macros", "sys-info", "tempfile", "test-log", @@ -278,7 +287,7 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "1.6.0" +version = "1.7.0" dependencies = [ "apollo-router", "async-trait", @@ -294,11 +303,11 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.6.0" +version = "1.7.0" dependencies = [ "anyhow", "cargo-scaffold", - "clap 3.2.23", + "clap 4.0.29", "copy_dir", "regex", "str_inflector", @@ -313,7 +322,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9c717390e188a27cbbe09c76042332bf6cc3bb6a83a73d71c0bceb6f2d73cb9" dependencies = [ "apollo-encoder 0.4.0", - "apollo-parser 0.4.0", + "apollo-parser 0.4.1", "arbitrary", "once_cell", "thiserror", @@ -407,9 +416,9 @@ dependencies = [ [[package]] name = "async-channel" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14485364214912d3b19cc3435dde4df66065127f05fa0d75c712f36f12c2f28" +checksum = "cf46fee83e5ccffc220104713af3292ff9bc7c64c7de289f66dae8e38d826833" dependencies = [ "concurrent-queue", "event-listener", @@ -468,7 +477,7 @@ version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ - "hermit-abi", + "hermit-abi 0.1.19", "libc", "winapi 0.3.9", ] @@ -494,7 +503,7 @@ dependencies = [ "http", "http-body", "hyper", - "itoa 1.0.4", + "itoa", "matchit", "memchr", "mime", @@ -571,6 +580,12 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" +[[package]] +name = "base64" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5" + [[package]] name = "base64ct" version = "1.5.3" @@ -640,10 +655,7 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" dependencies = [ - "lazy_static", "memchr", - "regex-automata", - "serde", ] [[package]] @@ -700,17 +712,11 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" -[[package]] -name = "cache-padded" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c" - [[package]] name = "cargo-scaffold" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29870394429099fd98f0a518aff593310b5c9fe25178a17949219a15ae3882eb" +checksum = "a14e2a7adeaf890958568da541ab7b52fefb784937d8ea7f18a03218b1701fe9" dependencies = [ "anyhow", "buildstructor 0.3.2", @@ -737,9 +743,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.76" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76a284da2e6fe2092f2353e51713435363112dfd60030e22add80be333fb928f" +checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d" dependencies = [ "jobserver", ] @@ -750,6 +756,33 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "ciborium" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c137568cc60b904a7724001b35ce2630fd00d5d84805fbb608ab89509d788f" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "346de753af073cc87b52b2083a506b38ac176a44cfb05497b622e27be899b369" + +[[package]] +name = "ciborium-ll" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213030a2b5a4e0c0892b6652260cf6ccac84827b83a85a534e178e3906c4cf1b" +dependencies = [ + "ciborium-io", + "half", +] + [[package]] name = "clap" version = "2.34.0" @@ -771,22 +804,32 @@ version = "3.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" dependencies = [ - "atty", "bitflags", - "clap_derive", - "clap_lex", + "clap_lex 0.2.4", "indexmap", + "textwrap 0.16.0", +] + +[[package]] +name = "clap" +version = "4.0.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d63b9e9c07271b9957ad22c173bae2a4d9a81127680962039296abcd2f8251d" +dependencies = [ + "bitflags", + "clap_derive", + "clap_lex 0.3.0", + "is-terminal", "once_cell", "strsim 0.10.0", "termcolor", - "textwrap 0.16.0", ] [[package]] name = "clap_derive" -version = "3.2.18" +version = "4.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0c8bce528c4be4da13ea6fead8965e95b6073585a2f05204bd8f4119f82a65" +checksum = "0177313f9f02afc995627906bbd8967e2be069f5261954222dac78290c2b9014" dependencies = [ "heck 0.4.0", "proc-macro-error", @@ -804,6 +847,15 @@ dependencies = [ "os_str_bytes", ] +[[package]] +name = "clap_lex" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d4198f73e42b4936b35b5bb248d81d2b595ecb170da0bac7655c54eedfa8da8" +dependencies = [ + "os_str_bytes", +] + [[package]] name = "coarsetime" version = "0.1.22" @@ -857,11 +909,11 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "1.2.4" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af4780a44ab5696ea9e28294517f1fffb421a83a25af521333c838635509db9c" +checksum = "bd7bef69dc86e3c610e4e7aed41035e2a7ed12e72dd7530f61327a6579a4390b" dependencies = [ - "cache-padded", + "crossbeam-utils", ] [[package]] @@ -950,9 +1002,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "722e23542a15cea1f65d4a1419c4cfd7a26706c70871a13a04238ca3f40f1661" +checksum = "cec318a675afcb6a1ea1d4340e2d377e56e47c266f28043ceccbf4412ddfdd3b" [[package]] name = "const-random" @@ -1065,15 +1117,16 @@ dependencies = [ [[package]] name = "criterion" -version = "0.3.6" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" +checksum = "e7c76e09c1aae2bc52b3d2f29e13c6572553b30c4aa1b8a49fd70de6412654cb" dependencies = [ + "anes", "atty", "cast", - "clap 2.34.0", + "ciborium", + "clap 3.2.23", "criterion-plot", - "csv", "futures", "itertools", "lazy_static", @@ -1083,7 +1136,6 @@ dependencies = [ "rayon", "regex", "serde", - "serde_cbor", "serde_derive", "serde_json", "tinytemplate", @@ -1093,9 +1145,9 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.4.5" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", "itertools", @@ -1124,22 +1176,22 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.11" +version = "0.9.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348" +checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", - "memoffset", + "memoffset 0.7.1", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.12" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac" +checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" dependencies = [ "cfg-if", ] @@ -1172,28 +1224,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "csv" -version = "1.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" -dependencies = [ - "bstr", - "csv-core", - "itoa 0.4.8", - "ryu", - "serde", -] - -[[package]] -name = "csv-core" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" -dependencies = [ - "memchr", -] - [[package]] name = "ct-codecs" version = "1.1.1" @@ -1230,7 +1260,7 @@ dependencies = [ "hashbrown", "lock_api", "once_cell", - "parking_lot_core 0.9.4", + "parking_lot_core 0.9.5", "serde", ] @@ -1273,9 +1303,9 @@ dependencies = [ [[package]] name = "der" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dd2ae565c0a381dde7fade45fce95984c568bdcb4700a4fdbe3175e0380b2f" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" dependencies = [ "const-oid", "pem-rfc7468", @@ -1295,9 +1325,9 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4903dff04948f22033ca30232ab8eca2c3fc4c913a8b6a34ee5199699814817f" +checksum = "f8a16495aeb28047bb1185fca837baf755e7d71ed3aeed7f8504654ffa927208" dependencies = [ "proc-macro2", "quote", @@ -1358,9 +1388,9 @@ checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" [[package]] name = "digest" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" +checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ "block-buffer", "const-oid", @@ -1434,9 +1464,9 @@ dependencies = [ [[package]] name = "ed25519-compact" -version = "2.0.2" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f2d21333b679bbbac680b3eb45c86937e42f69277028f4e97b599b80b86c253" +checksum = "6a3d382e8464107391c8706b4c14b087808ecb909f6c15c34114bc42e53a9e4c" dependencies = [ "ct-codecs", "getrandom", @@ -1487,12 +1517,12 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.9.3" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" +checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" dependencies = [ - "atty", "humantime", + "is-terminal", "log", "regex", "termcolor", @@ -1507,6 +1537,27 @@ dependencies = [ "serde", ] +[[package]] +name = "errno" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +dependencies = [ + "errno-dragonfly", + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "event-listener" version = "2.5.3" @@ -1544,9 +1595,9 @@ dependencies = [ [[package]] name = "filetime" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b9663d381d07ae25dc88dbdf27df458faa83a9b25336bcac83d5e452b5fc9d3" +checksum = "4e884668cd0c7480504233e951174ddc3b382f7c2666e3b7310b5c4e7b0c37f9" dependencies = [ "cfg-if", "libc", @@ -1639,9 +1690,9 @@ dependencies = [ [[package]] name = "fraction" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99df8100674344d1cee346c764684f7ad688a4dcaa1a3efb2fdb45daf9acf4f9" +checksum = "7aa5de57a62c2440ece64342ea59efb7171aa7d016faf8dfcb8795066a17146b" dependencies = [ "lazy_static", "num", @@ -2016,6 +2067,15 @@ dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" +dependencies = [ + "libc", +] + [[package]] name = "hex" version = "0.4.3" @@ -2042,24 +2102,24 @@ dependencies = [ [[package]] name = "hmac-sha1-compact" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76968b14b68737b664da5f9a31eb00af72f69a618a8ef2e2c8d30c494d28dae" +checksum = "05e2440a0078e20c3b68ca01234cea4219f23e64b0c0bdb1200c5550d54239bb" [[package]] name = "hmac-sha256" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43ecbd4fb98b97457d10a29f8bdfee1fad612b6ce879b39d7b8dd6ce510875af" +checksum = "fc736091aacb31ddaa4cd5f6988b3c21e99913ac846b41f32538c5fae5d71bfe" dependencies = [ "digest", ] [[package]] name = "hmac-sha512" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4d6b820c3093490ef1fb8253caa34feb83b0ab894c06c8c16c3c02cfaebc1eb" +checksum = "520c9c3f6040661669bc5c91e551b605a520c8e0a63a766a91a65adef734d151" dependencies = [ "digest", ] @@ -2072,7 +2132,7 @@ checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa 1.0.4", + "itoa", ] [[package]] @@ -2141,7 +2201,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 1.0.4", + "itoa", "pin-project-lite", "socket2", "tokio", @@ -2282,17 +2342,15 @@ checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" [[package]] name = "introspector-gadget" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a6a5345dd15741225868a205140b730de97b4f00b7d22a8520ae9d3e6266518" +checksum = "b9360a9dd04f347bf40cbb128bf622d4ef524fb89aee8cff836b88471d4f14dd" dependencies = [ "apollo-encoder 0.3.4", "backoff", "graphql_client", - "humantime", "hyper", "reqwest", - "semver 1.0.14", "serde", "serde_json", "thiserror", @@ -2309,11 +2367,33 @@ dependencies = [ "ghost", ] +[[package]] +name = "io-lifetimes" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46112a93252b123d31a119a8d1a1ac19deac4fac6e0e8b0df58f0d4e5870e63c" +dependencies = [ + "libc", + "windows-sys 0.42.0", +] + [[package]] name = "ipnet" -version = "2.5.1" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11b0d96e660696543b251e58030cf9787df56da39dab19ad60eae7353040917e" + +[[package]] +name = "is-terminal" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f88c5561171189e69df9d98bcf18fd5f9558300f7ea7b801eb8a0fd748bd8745" +checksum = "927609f78c2913a6f6ac3c27a4fe87f43e2a35367c0c4b0f8265e8f49a104330" +dependencies = [ + "hermit-abi 0.2.6", + "io-lifetimes", + "rustix", + "windows-sys 0.42.0", +] [[package]] name = "is_ci" @@ -2339,12 +2419,6 @@ dependencies = [ "either", ] -[[package]] -name = "itoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" - [[package]] name = "itoa" version = "1.0.4" @@ -2393,7 +2467,7 @@ dependencies = [ "fancy-regex", "fraction", "iso8601", - "itoa 1.0.4", + "itoa", "lazy_static", "memchr", "num-cmp", @@ -2501,7 +2575,7 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" dependencies = [ - "spin", + "spin 0.5.2", ] [[package]] @@ -2596,6 +2670,12 @@ dependencies = [ "syn", ] +[[package]] +name = "linux-raw-sys" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f9f08d8963a6c613f4b1a78f4f4a4dbfadf8e6545b2d72861731e4858b8b47f" + [[package]] name = "lock_api" version = "0.4.9" @@ -2617,9 +2697,9 @@ dependencies = [ [[package]] name = "lru" -version = "0.7.8" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a" +checksum = "b6e8aaa3f231bb4bd57b84b2d5dc3ae7f350265df8aa96492e0bc394a1571909" dependencies = [ "hashbrown", ] @@ -2672,6 +2752,15 @@ dependencies = [ "autocfg", ] +[[package]] +name = "memoffset" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +dependencies = [ + "autocfg", +] + [[package]] name = "miette" version = "4.7.1" @@ -2823,6 +2912,24 @@ dependencies = [ "syn", ] +[[package]] +name = "multer" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed4198ce7a4cbd2a57af78d28c6fbb57d81ac5f1d6ad79ac6c5587419cbdf22" +dependencies = [ + "bytes", + "encoding_rs", + "futures-util", + "http", + "httparse", + "log", + "memchr", + "mime", + "spin 0.9.4", + "version_check", +] + [[package]] name = "multimap" version = "0.8.3" @@ -2919,9 +3026,9 @@ dependencies = [ [[package]] name = "num-bigint-dig" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "566d173b2f9406afbc5510a90925d5a2cd80cae4605631f1212303df265de011" +checksum = "2399c9463abc5f909349d8aa9ba080e0b88b3ce2885389b60b993f39b1a56905" dependencies = [ "byteorder", "lazy_static", @@ -2998,7 +3105,7 @@ version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5" dependencies = [ - "hermit-abi", + "hermit-abi 0.1.19", "libc", ] @@ -3044,9 +3151,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.42" +version = "0.10.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12fc0523e3bd51a692c8850d075d74dc062ccf251c0110668cbd921917118a13" +checksum = "29d971fd5722fec23977260f6e81aa67d2f22cadbdc2aa049f1022d9a3be1566" dependencies = [ "bitflags", "cfg-if", @@ -3076,9 +3183,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.77" +version = "0.9.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b03b84c3b2d099b81f0953422b4d4ad58761589d0229b5506356afca05a3670a" +checksum = "5454462c0eced1e97f2ec09036abc8da362e66802f66fd20f86854d9d8cbcbc4" dependencies = [ "autocfg", "cc", @@ -3284,9 +3391,9 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.3.1" +version = "6.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3baf96e39c5359d2eb0dd6ccb42c62b91d9678aa68160d261b9e0ccbf9e9dea9" +checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" [[package]] name = "overload" @@ -3330,7 +3437,7 @@ checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", "lock_api", - "parking_lot_core 0.8.5", + "parking_lot_core 0.8.6", ] [[package]] @@ -3340,14 +3447,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.4", + "parking_lot_core 0.9.5", ] [[package]] name = "parking_lot_core" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" dependencies = [ "cfg-if", "instant", @@ -3359,9 +3466,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" +checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba" dependencies = [ "cfg-if", "libc", @@ -3372,9 +3479,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1c2c742266c2f1041c914ba65355a83ae8747b05f208319784083583494b4b" +checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" [[package]] name = "pem-rfc7468" @@ -3393,9 +3500,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.4.1" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a528564cc62c19a7acac4d81e01f39e53e25e17b934878f4c6d25cc2836e62f8" +checksum = "cc8bed3549e0f9b0a2a78bf7c0018237a2cdf085eecbbc048e52612438e4e9d0" dependencies = [ "thiserror", "ucd-trie", @@ -3403,9 +3510,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.4.1" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5fd9bc6500181952d34bd0b2b0163a54d794227b498be0b7afa7698d0a7b18f" +checksum = "cdc078600d06ff90d4ed238f0119d84ab5d43dbaad278b0e33a8820293b32344" dependencies = [ "pest", "pest_generator", @@ -3413,9 +3520,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.4.1" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2610d5ac5156217b4ff8e46ddcef7cdf44b273da2ac5bca2ecbfa86a330e7c4" +checksum = "28a1af60b1c4148bb269006a750cff8e2ea36aff34d2d96cf7be0b14d1bed23c" dependencies = [ "pest", "pest_meta", @@ -3426,9 +3533,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.4.1" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824749bf7e21dd66b36fbe26b3f45c713879cccd4a009a917ab8e045ca8246fe" +checksum = "fec8605d59fc2ae0c6c1aefc0c7c7a9769732017c0ce07f7a9cfffa7b4404f20" dependencies = [ "once_cell", "pest", @@ -3543,9 +3650,9 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "predicates" -version = "2.1.2" +version = "2.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab68289ded120dcbf9d571afcf70163233229052aec9b08ab09532f698d0e1e6" +checksum = "f54fc5dc63ed3bbf19494623db4f3af16842c0d975818e469022d09e53f0aa05" dependencies = [ "difflib", "float-cmp", @@ -3557,15 +3664,15 @@ dependencies = [ [[package]] name = "predicates-core" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6e7125585d872860e9955ca571650b27a4979c5823084168c5ed5bbfb016b56" +checksum = "72f883590242d3c6fc5bf50299011695fa6590c2c70eac95ee1bdb9a733ad1a2" [[package]] name = "predicates-tree" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad3f7fa8d61e139cbc7c3edfebf3b6678883a53f5ffac65d1259329a93ee43a5" +checksum = "54ff541861505aabf6ea722d2131ee980b8276e10a1297b94e896dd8b621850d" dependencies = [ "predicates-core", "termtree", @@ -3664,9 +3771,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0841812012b2d4a6145fae9a6af1534873c32aa67fff26bd09f8fa42c83f95a" +checksum = "c0b18e655c21ff5ac2084a5ad0611e827b3f92badf79f4910b5a5c58f4d87ff0" dependencies = [ "bytes", "prost-derive", @@ -3674,9 +3781,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.11.3" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e330bf1316db56b12c2bcfa399e8edddd4821965ea25ddb2c134b610b1c1c604" +checksum = "276470f7f281b0ed53d2ae42dd52b4a8d08853a3c70e7fe95882acbb98a6ae94" dependencies = [ "bytes", "heck 0.4.0", @@ -3778,21 +3885,19 @@ dependencies = [ [[package]] name = "rayon" -version = "1.5.3" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd99e5772ead8baa5215278c9b15bf92087709e9c1b2d1f97cdb5a183c933a7d" +checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" dependencies = [ - "autocfg", - "crossbeam-deque", "either", "rayon-core", ] [[package]] name = "rayon-core" -version = "1.9.3" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" +checksum = "cac410af5d00ab6884528b4ab69d1e8e146e8d471201800fa1b4524126de6ad3" dependencies = [ "crossbeam-channel", "crossbeam-deque", @@ -3802,16 +3907,16 @@ dependencies = [ [[package]] name = "redis" -version = "0.21.6" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "571c252c68d09a2ad3e49edd14e9ee48932f3e0f27b06b4ea4c9b2a706d31103" +checksum = "152f3863635cbb76b73bc247845781098302c6c9ad2060e1a9a7de56840346b6" dependencies = [ "async-trait", "bytes", "combine 4.6.6", "crc16", "futures-util", - "itoa 1.0.4", + "itoa", "percent-encoding", "pin-project-lite", "rand", @@ -4030,9 +4135,9 @@ dependencies = [ [[package]] name = "rhai_codegen" -version = "1.4.2" +version = "1.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36791b0b801159db25130fd46ac726d2751c070260bba3a4a0a3eeb6231bb82a" +checksum = "c087528477f486dd71d6dc98a922d92403f0cfdaf317d670542caffb0a770164" dependencies = [ "proc-macro2", "quote", @@ -4048,7 +4153,7 @@ dependencies = [ "cc", "libc", "once_cell", - "spin", + "spin 0.5.2", "untrusted", "web-sys", "winapi 0.3.9", @@ -4088,7 +4193,7 @@ dependencies = [ name = "router-fuzz" version = "0.0.0" dependencies = [ - "apollo-parser 0.4.0", + "apollo-parser 0.4.1", "apollo-smith", "env_logger", "libfuzzer-sys", @@ -4105,16 +4210,16 @@ checksum = "5811547e7ba31e903fe48c8ceab10d40d70a101f3d15523c847cce91aa71f332" dependencies = [ "countme", "hashbrown", - "memoffset", + "memoffset 0.6.5", "rustc-hash", "text-size", ] [[package]] name = "rsa" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0ecc3307be66bfb3574577895555bacfb9a37a8d5cd959444b72ff02495c618" +checksum = "094052d5470cbcef561cb848a7209968c9f12dfa6d668f4bca048ac5de51099c" dependencies = [ "byteorder", "digest", @@ -4195,6 +4300,20 @@ dependencies = [ "semver 1.0.14", ] +[[package]] +name = "rustix" +version = "0.36.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3807b5d10909833d3e9acd1eb5fb988f79376ff10fce42937de71a449c4c588" +dependencies = [ + "bitflags", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys", + "windows-sys 0.42.0", +] + [[package]] name = "rustls" version = "0.20.7" @@ -4389,28 +4508,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.150" +version = "1.0.151" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e326c9ec8042f1b5da33252c8a37e9ffbd2c9bef0155215b6e6c80c790e05f91" +checksum = "97fed41fc1a24994d044e6db6935e69511a1153b52c15eb42493b26fa87feba0" dependencies = [ "serde_derive", ] -[[package]] -name = "serde_cbor" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" -dependencies = [ - "half", - "serde", -] - [[package]] name = "serde_derive" -version = "1.0.150" +version = "1.0.151" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42a3df25b0713732468deadad63ab9da1f1fd75a48a15024b50363f128db627e" +checksum = "255abe9a125a985c05190d687b320c12f9b1f0b99445e608c21ba0782c719ad8" dependencies = [ "proc-macro2", "quote", @@ -4430,12 +4539,12 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.89" +version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db" +checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883" dependencies = [ "indexmap", - "itoa 1.0.4", + "itoa", "ryu", "serde", ] @@ -4459,7 +4568,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.4", + "itoa", "ryu", "serde", ] @@ -4543,9 +4652,9 @@ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "shellexpand" -version = "2.1.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ccc8076840c4da029af4f87e4e8daeb0fca6b87bbb02e10cb60b791450e11e4" +checksum = "dd1c7ddea665294d484c39fd0c0d2b7e35bbfe10035c5fe1854741a57f6880e1" dependencies = [ "dirs", ] @@ -4571,9 +4680,9 @@ dependencies = [ [[package]] name = "similar" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62ac7f900db32bf3fd12e0117dd3dc4da74bc52ebaac97f39668446d89694803" +checksum = "420acb44afdae038210c99e69aae24109f32f15500aa708e81d46c9f29d55fcf" [[package]] name = "slab" @@ -4643,6 +4752,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spin" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f6002a767bff9e83f8eeecf883ecb8011875a21ae8da43bffb817a57e78cc09" + [[package]] name = "spki" version = "0.6.0" @@ -4739,7 +4854,7 @@ version = "0.1.0" dependencies = [ "anyhow", "apollo-compiler", - "apollo-parser 0.4.0", + "apollo-parser 0.4.1", "apollo-router", "async-trait", "futures", @@ -4929,18 +5044,18 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" +checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" +checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" dependencies = [ "proc-macro2", "quote", @@ -5077,9 +5192,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.8.0" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" +checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" dependencies = [ "proc-macro2", "quote", @@ -5166,6 +5281,7 @@ dependencies = [ "axum", "base64 0.13.1", "bytes", + "flate2", "futures-core", "futures-util", "h2", @@ -5438,9 +5554,9 @@ dependencies = [ [[package]] name = "typenum" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "typetag" @@ -5745,9 +5861,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.5" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368bfe657969fb01238bb756d351dcade285e0f6fcbd36dcb23359a5169975be" +checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" dependencies = [ "webpki", ] diff --git a/README.md b/README.md index 5a15025f1f..de30166296 100644 --- a/README.md +++ b/README.md @@ -6,9 +6,9 @@ The **Apollo Router** is a configurable, high-performance **graph router** written in Rust to run a [federated supergraph](https://www.apollographql.com/docs/federation/) that uses [Apollo Federation 2](https://www.apollographql.com/docs/federation/v2/federation-2/new-in-federation-2). -Apollo Router is well-tested, regularly benchmarked, includes most major features of Apollo Gateway and is able to serve production-scale workloads. Please note that the (pre-1.0) version is not yet "semver stable" and we may still make breaking changes. Generally speaking, we expect most breaking changes to be on the plugin API and the configuration file format. We will clearly convey such changes in the release notes. +Apollo Router is well-tested, regularly benchmarked, includes most major features of Apollo Gateway and is able to serve production-scale workloads. Please note that the (pre-1.0) version is not yet "semver stable" and we may still make breaking changes. Generally speaking, we expect most breaking changes to be on the plugin API and the configuration file format. We will clearly convey such changes in the release notes. -New releases and their release notes (along with notes about any breaking changes) can be found on the [Releases](https://github.com/apollographql/router/releases) page, and the latest release can always be found [on the latest page](https://github.com/apollographql/router/releases/latest). The `CHANGELOG.md` at the root of this repository also contains _unreleased_ changes in addition to the full history of changes. +New releases and their release notes (along with notes about any breaking changes) can be found on the [Releases](https://github.com/apollographql/router/releases) page, and the latest release can always be found [on the latest page](https://github.com/apollographql/router/releases/latest). The `CHANGELOG.md` at the root of this repository also contains _unreleased_ changes in addition to the full history of changes. Currently, we're publishing new releases every 1-2 weeks. @@ -38,6 +38,10 @@ OPTIONS: The time between polls to Apollo uplink. Minimum 10s [env: APOLLO_UPLINK_POLL_INTERVAL=] [default: 10s] + --apollo-uplink-timeout + The timeout for each of the polls to Apollo Uplink. [env: APOLLO_UPLINK_TIMEOUT=] + [default: 30s] + -c, --config Configuration location relative to the project directory [env: APOLLO_ROUTER_CONFIG_PATH=] @@ -67,7 +71,7 @@ SUBCOMMANDS: [Apollo](https://apollographql.com/) is building software and a graph platform to unify GraphQL across your apps and services. We help you ship faster with: * [Apollo Studio](https://www.apollographql.com/studio/develop/) โ€“ A free, end-to-end platform for managing your GraphQL lifecycle. Track your GraphQL schemas in a hosted registry to create a source of truth for everything in your graph. Studio provides an IDE (Apollo Explorer) so you can explore data, collaborate on queries, observe usage, and safely make schema changes. -* [Apollo Federation](https://www.apollographql.com/apollo-federation) โ€“ The industry-standard open architecture for building a distributed graph. Compose and manage your graphs using [Rover](https://www.apollographql.com/rover/) and then use Apollo Router to query plan and route requests across multiple subgraphs. +* [Apollo Federation](https://www.apollographql.com/apollo-federation) โ€“ The industry-standard open architecture for building a distributed graph. Compose and manage your graphs using [Rover](https://www.apollographql.com/rover/) and then use Apollo Router to query plan and route requests across multiple subgraphs. * [Apollo Client](https://www.apollographql.com/apollo-client/) โ€“ The most popular GraphQL client for the web. Apollo also builds and maintains [Apollo iOS](https://github.com/apollographql/apollo-ios) and [Apollo Android](https://github.com/apollographql/apollo-android). * [Apollo Server](https://www.apollographql.com/docs/apollo-server/) โ€“ A production-ready JavaScript GraphQL server that connects to any microservice, API, or database. Compatible with all popular JavaScript frameworks and deployable in serverless environments. @@ -111,4 +115,4 @@ Apollo Graph, Inc. Source code in this repository is covered by the Elastic License 2.0. The default throughout the repository is a license under the Elastic License 2.0, unless a file header or a license file in a subdirectory specifies another -license. [See the LICENSE](./LICENSE) for the full license text. +license. [See the LICENSE](./LICENSE) for the full license text. diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index d5f0c841e0..60e7aca339 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.6.0" +version = "1.7.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" @@ -10,7 +10,7 @@ publish = false [dev-dependencies] apollo-router = { path = "../apollo-router" } -criterion = { version = "0.3", features = ["async_tokio", "async_futures"] } +criterion = { version = "0.4", features = ["async_tokio", "async_futures"] } futures = "0.3" once_cell = "1" serde_json = { version = "1", features = ["preserve_order"] } diff --git a/apollo-router-benchmarks/benches/basic_composition.rs b/apollo-router-benchmarks/benches/basic_composition.rs index ea21f45a11..63b0886e6a 100644 --- a/apollo-router-benchmarks/benches/basic_composition.rs +++ b/apollo-router-benchmarks/benches/basic_composition.rs @@ -10,7 +10,7 @@ fn from_elem(c: &mut Criterion) { let builder = setup(); - let router = runtime.block_on(builder.build()).unwrap(); + let router = runtime.block_on(builder.build_router()).unwrap(); b.to_async(runtime) .iter(|| basic_composition_benchmark(router.clone())); }); diff --git a/apollo-router-benchmarks/src/lib.rs b/apollo-router-benchmarks/src/lib.rs index b36a324134..fa78c1ef5b 100644 --- a/apollo-router-benchmarks/src/lib.rs +++ b/apollo-router-benchmarks/src/lib.rs @@ -7,7 +7,7 @@ pub mod tests { let runtime = tokio::runtime::Runtime::new().unwrap(); let builder = setup(); - let router = runtime.block_on(builder.build()).unwrap(); + let router = runtime.block_on(builder.build_router()).unwrap(); runtime.block_on(async move { basic_composition_benchmark(router).await }); } } diff --git a/apollo-router-benchmarks/src/shared.rs b/apollo-router-benchmarks/src/shared.rs index 81c43bf0c2..15f0f67ed1 100644 --- a/apollo-router-benchmarks/src/shared.rs +++ b/apollo-router-benchmarks/src/shared.rs @@ -3,6 +3,7 @@ // in dev mode use apollo_router::graphql::Response; use apollo_router::plugin::test::MockSubgraph; +use apollo_router::services::router; use apollo_router::services::supergraph; use apollo_router::MockedSubgraphs; use apollo_router::TestHarness; @@ -17,24 +18,30 @@ static EXPECTED_RESPONSE: Lazy = Lazy::new(|| { static QUERY: &str = r#"query TopProducts($first: Int) { topProducts(first: $first) { upc name reviews { id product { name } author { id name } } } }"#; -pub async fn basic_composition_benchmark(mut supergraph_service: supergraph::BoxCloneService) { +pub async fn basic_composition_benchmark(mut router_service: router::BoxCloneService) { let request = supergraph::Request::fake_builder() .query(QUERY.to_string()) .variable("first", 2usize) .build() - .expect("expecting valid request"); - - let response = supergraph_service - .ready() - .await - .unwrap() - .call(request) - .await - .unwrap() - .next_response() - .await + .expect("expecting valid request") + .try_into() .unwrap(); + let response: Response = serde_json::from_slice( + &router_service + .ready() + .await + .unwrap() + .call(request) + .await + .unwrap() + .next_response() + .await + .unwrap() + .unwrap(), + ) + .unwrap(); + assert_eq!(response, *EXPECTED_RESPONSE); } diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index 960bed71e4..a70a8b2d59 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,15 +1,15 @@ [package] name = "apollo-router-scaffold" -version = "1.6.0" +version = "1.7.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" publish = false [dependencies] -anyhow = "1.0.66" -clap = { version = "3.2.23", features = ["derive"] } -cargo-scaffold = { version = "0.8.6", default-features = false } +anyhow = "1.0.68" +clap = { version = "4.0.29", features = ["derive"] } +cargo-scaffold = { version = "0.8.7", default-features = false } regex = "1" str_inflector = "0.12.0" toml = "0.5.9" diff --git a/apollo-router-scaffold/templates/base/.dockerignore b/apollo-router-scaffold/templates/base/.dockerignore new file mode 100644 index 0000000000..c2c4a5aa95 --- /dev/null +++ b/apollo-router-scaffold/templates/base/.dockerignore @@ -0,0 +1 @@ +target/** \ No newline at end of file diff --git a/apollo-router-scaffold/templates/base/Cargo.toml b/apollo-router-scaffold/templates/base/Cargo.toml index dc76d8ca05..0c941c27f2 100644 --- a/apollo-router-scaffold/templates/base/Cargo.toml +++ b/apollo-router-scaffold/templates/base/Cargo.toml @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" } apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} # Note if you update these dependencies then also update xtask/Cargo.toml -apollo-router = "1.6.0" +apollo-router = "1.7.0" {{/if}} {{/if}} async-trait = "0.1.52" diff --git a/apollo-router-scaffold/templates/base/Dockerfile b/apollo-router-scaffold/templates/base/Dockerfile new file mode 100644 index 0000000000..35572787d4 --- /dev/null +++ b/apollo-router-scaffold/templates/base/Dockerfile @@ -0,0 +1,49 @@ +# Use the rust build image from docker as our base +# renovate-automation: rustc version +FROM rust:1.65.0 as build + +# Set our working directory for the build +WORKDIR /usr/src/router + +# Update our build image and install required packages +RUN apt-get update +RUN apt-get -y install \ + npm \ + protobuf-compiler + +# Add rustfmt since build requires it +RUN rustup component add rustfmt + +# Copy the router source to our build environment +COPY . . + +# Build and install the custom binary +RUN cargo build --release + +# Make directories for config and schema +RUN mkdir -p /dist/config && \ + mkdir /dist/schema && \ + mv target/release/router /dist + +# Copy configuration for docker image +COPY router.yaml /dist/config.yaml + +FROM debian:bullseye-slim + +RUN apt-get update +RUN apt-get -y install \ + ca-certificates + +# Set labels for our image +LABEL org.opencontainers.image.authors="Apollo Graph, Inc. https://github.com/apollographql/router" +LABEL org.opencontainers.image.source="https://github.com/apollographql/router" + +# Copy in the required files from our build image +COPY --from=build --chown=root:root /dist /dist + +WORKDIR /dist + +ENV APOLLO_ROUTER_CONFIG_PATH="/dist/config.yaml" + +# Default executable is the router +ENTRYPOINT ["/dist/router"] diff --git a/apollo-router-scaffold/templates/base/README.md b/apollo-router-scaffold/templates/base/README.md index 5fa2c3326e..98ec70eb24 100644 --- a/apollo-router-scaffold/templates/base/README.md +++ b/apollo-router-scaffold/templates/base/README.md @@ -81,3 +81,40 @@ Your release binary is now located in `target/release/router` cargo router plugin remove hello_world ``` +# Docker + +You can use the provided Dockerfile to build a release container. + +Make sure your router is configured to listen to `0.0.0.0` so you can query it from outside the container: + +```yml + supergraph: + listen: 0.0.0.0:4000 +``` + +Use your `APOLLO_KEY` and `APOLLO_GRAPH_REF` environment variables to run the router in managed federation. + + ```bash + docker build -t my_custom_router . + docker run -e APOLLO_KEY="your apollo key" -e APOLLO_GRAPH_REF="your apollo graph ref" -p 4000:4000 my_custom_router + ``` + +Otherwise add a `COPY` step to the Dockerfile, and edit the entrypoint: + +```Dockerfile +# Copy configuration for docker image +COPY router.yaml /dist/config.yaml +# Copy supergraph for docker image +COPY my_supergraph.graphql /dist/supergraph.graphql + +# [...] and change the entrypoint + +# Default executable is the router +ENTRYPOINT ["/dist/router", "-s", "/dist/supergraph.graphql"] +``` + +You can now build and run your custom router: + ```bash + docker build -t my_custom_router . + docker run -p 4000:4000 my_custom_router + ``` diff --git a/apollo-router-scaffold/templates/base/router.yaml b/apollo-router-scaffold/templates/base/router.yaml index a4c05293c8..8411f80a8b 100644 --- a/apollo-router-scaffold/templates/base/router.yaml +++ b/apollo-router-scaffold/templates/base/router.yaml @@ -1,2 +1,5 @@ +# uncomment this section if you plan to use the Dockerfile +# supergraph: +# listen: 0.0.0.0:4000 plugins: # Add plugin configuration here diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.toml index e175f61e76..bb0feddddf 100644 --- a/apollo-router-scaffold/templates/base/xtask/Cargo.toml +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.toml @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" } {{#if branch}} apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} -apollo-router-scaffold = { git="https://github.com/apollographql/router.git", tag = "v1.6.0" } +apollo-router-scaffold = { git="https://github.com/apollographql/router.git", tag = "v1.7.0" } {{/if}} {{/if}} anyhow = "1.0.58" diff --git a/apollo-router-scaffold/templates/plugin/src/plugins/{{snake_name}}.rs b/apollo-router-scaffold/templates/plugin/src/plugins/{{snake_name}}.rs index e6e97129c8..fa994f23c5 100644 --- a/apollo-router-scaffold/templates/plugin/src/plugins/{{snake_name}}.rs +++ b/apollo-router-scaffold/templates/plugin/src/plugins/{{snake_name}}.rs @@ -3,6 +3,7 @@ use apollo_router::plugin::PluginInit; use apollo_router::register_plugin; use apollo_router::services::supergraph; {{#if type_basic}} +use apollo_router::services::router; use apollo_router::services::execution; use apollo_router::services::subgraph; {{/if}} @@ -45,6 +46,22 @@ impl Plugin for {{pascal_name}} { Ok({{pascal_name}} { configuration: init.config }) } + // Delete this function if you are not customizing it. + fn router_service( + &self, + service: router::BoxService, + ) -> router::BoxService { + // Always use service builder to compose your plugins. + // It provides off the shelf building blocks for your plugin. + // + // ServiceBuilder::new() + // .service(service) + // .boxed() + + // Returning the original service means that we didn't add any extra functionality at this point in the lifecycle. + service + } + // Delete this function if you are not customizing it. fn supergraph_service( &self, @@ -147,6 +164,7 @@ register_plugin!("{{project_name}}", "{{snake_name}}", {{pascal_name}}); mod tests { use apollo_router::TestHarness; use apollo_router::services::supergraph; + use apollo_router::graphql; use tower::BoxError; use tower::ServiceExt; @@ -161,16 +179,17 @@ mod tests { } })) .unwrap() - .build() + .build_router() .await .unwrap(); let request = supergraph::Request::canned_builder().build().unwrap(); - let mut streamed_response = test_harness.oneshot(request).await?; + let mut streamed_response = test_harness.oneshot(request.try_into()?).await?; - let first_response = streamed_response - .next_response() - .await - .expect("couldn't get primary response"); + let first_response: graphql::Response = + serde_json::from_slice(streamed_response + .next_response() + .await + .expect("couldn't get primary response")?.to_vec().as_slice()).unwrap(); assert!(first_response.data.is_some()); diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 30c2f59cc0..afd0f59091 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.6.0" +version = "1.7.0" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://www.apollographql.com/docs/router/" @@ -39,7 +39,7 @@ features = ["docs_rs"] [dependencies] access-json = "0.1.0" -anyhow = "1.0.66" +anyhow = "1.0.68" ansi_term = "0.12" apollo-parser = "0.4.0" arc-swap = "1.5.1" @@ -53,13 +53,14 @@ async-trait = "0.1.59" atty = "0.2.14" axum = { version = "0.5.17", features = ["headers", "json", "original-uri"] } backtrace = "0.3.66" -base64 = "0.13.1" +base64 = "0.20.0" buildstructor = "0.5.1" bytes = "1.3.0" -clap = { version = "3.2.23", default-features = false, features = [ +clap = { version = "4.0.29", default-features = false, features = [ "env", "derive", "std", + "help", ] } console-subscriber = { version = "0.1.8", optional = true } dashmap = { version = "5.4.0", features = ["serde"] } @@ -89,11 +90,12 @@ jsonschema = { version = "0.16.1", default-features = false } lazy_static = "1.4.0" libc = "0.2.138" linkme = "0.3.6" -lru = "0.7.8" +lru = "0.8.1" mediatype = "0.19.11" mockall = "0.11.3" miette = { version = "5.5.0", features = ["fancy"] } mime = "0.3.16" +multer = "2.0.4" multimap = "0.8.3" # To avoid tokio issues notify = { version = "5.0.0", default-features = false, features=["macos_kqueue"] } @@ -135,15 +137,15 @@ opentelemetry-zipkin = { version = "0.16.0", default-features = false, features "reqwest-rustls", ] } opentelemetry-prometheus = "0.11.0" -paste = "1.0.10" +paste = "1.0.11" pin-project-lite = "0.2.9" prometheus = "0.13" -prost = "0.11.0" +prost = "0.11.3" prost-types = "0.11.0" proteus = "0.5.0" rand = "0.8.5" rhai = { version = "1.11.0", features = ["sync", "serde", "internals"] } -redis = { version = "0.21.6", optional = true, features = ["cluster", "tokio-comp"] } +redis = { version = "0.21.7", optional = true, features = ["cluster", "tokio-comp"] } redis_cluster_async = { version = "0.7.0", optional = true } regex = "1.7.0" reqwest = { version = "0.11.13", default-features = false, features = [ @@ -154,22 +156,24 @@ reqwest = { version = "0.11.13", default-features = false, features = [ router-bridge = "0.1.12" rust-embed="6.4.2" schemars = { version = "0.8.11", features = ["url"] } -shellexpand = "2.1.2" +shellexpand = "3.0.0" sha2 = "0.10.6" -serde = { version = "1.0.150", features = ["derive", "rc"] } +serde = { version = "1.0.151", features = ["derive", "rc"] } serde_json_bytes = { version = "0.2.0", features = ["preserve_order"] } -serde_json = { version = "1.0.89", features = ["preserve_order"] } +serde_json = { version = "1.0.91", features = ["preserve_order"] } serde_urlencoded = "0.7.1" serde_yaml = "0.8.26" static_assertions = "1.1.0" +strum_macros = "0.24.3" sys-info = "0.9.1" -thiserror = "1.0.37" +thiserror = "1.0.38" tokio = { version = "1.23.0", features = ["full"] } tokio-stream = { version = "0.1.11", features = ["sync", "net"] } tokio-util = { version = "0.7.4", features = ["net", "codec"] } -tonic = { version = "0.8.2", features = ["transport", "tls", "tls-roots"] } +tonic = { version = "0.8.2", features = ["transport", "tls", "tls-roots", "gzip"] } tower = { version = "0.4.13", features = ["full"] } tower-http = { version = "0.3.5", features = [ + "add-extension", "trace", "cors", "compression-br", @@ -191,6 +195,7 @@ urlencoding = "2.1.2" uuid = { version = "1.2.2", features = ["serde", "v4"] } yaml-rust = "0.4.5" askama = "0.11.1" +heck = "0.4.0" apollo-encoder = "0.4.0" [target.'cfg(macos)'.dependencies] @@ -201,7 +206,7 @@ uname = "0.1.1" [dev-dependencies] insta = { version = "1.23.0", features = ["json", "redactions", "yaml"] } -introspector-gadget = "0.1.0" +introspector-gadget = "0.2.0" maplit = "1.0.2" memchr = { version = "2.5.0", default-features = false } mockall = "0.11.3" diff --git a/apollo-router/build/studio.rs b/apollo-router/build/studio.rs index 6c4024b2c3..afeabdb0c6 100644 --- a/apollo-router/build/studio.rs +++ b/apollo-router/build/studio.rs @@ -8,8 +8,6 @@ pub fn main() -> Result<(), Box> { let reports_src = proto_dir.join("reports.proto"); let reports_out = out_dir.join("reports.proto"); - println!("cargo:rerun-if-changed={}", reports_src.to_str().unwrap()); - // Process the retrieved content to: // - Insert a package Report; line after the import lines (currently only one) and before the first message definition // - Remove the Apollo TS extensions [(js_use_toArray)=true] and [(js_preEncoded)=true] from the file @@ -19,13 +17,15 @@ pub fn main() -> Result<(), Box> { let mut content = std::fs::read_to_string(&reports_src)?; let message = "\nmessage"; let msg_index = content.find(message).ok_or("cannot find message string")?; - content.insert_str(msg_index, "\npackage Report;\n"); + content.insert_str(msg_index, "\npackage Reports;\n"); content = content.replace("[(js_use_toArray)=true]", ""); content = content.replace("[(js_preEncoded)=true]", ""); std::fs::write(&reports_out, &content)?; + println!("cargo:rerun-if-changed={}", reports_src.to_str().unwrap()); + // Process the proto files - let proto_files = [reports_out]; + tonic_build::configure() .field_attribute( "Trace.start_time", @@ -49,7 +49,8 @@ pub fn main() -> Result<(), Box> { ) .type_attribute(".", "#[derive(serde::Serialize)]") .type_attribute("StatsContext", "#[derive(Eq, Hash)]") - .compile(&proto_files, &[&out_dir, &proto_dir])?; + .emit_rerun_if_changed(false) + .compile(&[reports_out], &[&out_dir])?; Ok(()) } diff --git a/apollo-router/src/axum_factory/axum_http_server_factory.rs b/apollo-router/src/axum_factory/axum_http_server_factory.rs index e6a370b81c..ebdfdd4c6f 100644 --- a/apollo-router/src/axum_factory/axum_http_server_factory.rs +++ b/apollo-router/src/axum_factory/axum_http_server_factory.rs @@ -1,13 +1,8 @@ //! Axum http server factory. Axum provides routing capability on top of Hyper HTTP. use std::pin::Pin; use std::sync::Arc; -use std::time::Duration; -use axum::extract::rejection::JsonRejection; use axum::extract::Extension; -use axum::extract::Host; -use axum::extract::OriginalUri; -use axum::http::header::HeaderMap; use axum::http::StatusCode; use axum::middleware; use axum::response::*; @@ -33,37 +28,27 @@ use tower_http::compression::CompressionLayer; use tower_http::compression::DefaultPredicate; use tower_http::compression::Predicate; use tower_http::trace::TraceLayer; -use tracing::Span; -use super::handlers::handle_get; -use super::handlers::handle_get_with_static; -use super::handlers::handle_post; use super::listeners::ensure_endpoints_consistency; use super::listeners::ensure_listenaddrs_consistency; use super::listeners::extra_endpoints; use super::listeners::ListenersAndRouters; -use super::utils::check_accept_header; use super::utils::decompress_request_body; use super::utils::PropagatingMakeSpan; use super::ListenAddrAndRouter; use crate::axum_factory::listeners::get_extra_listeners; use crate::axum_factory::listeners::serve_router_on_listen_addr; -use crate::cache::DeduplicatingCache; use crate::configuration::Configuration; -use crate::configuration::Homepage; use crate::configuration::ListenAddr; -use crate::configuration::Sandbox; -use crate::graphql; use crate::http_server_factory::HttpServerFactory; use crate::http_server_factory::HttpServerHandle; use crate::http_server_factory::Listener; -use crate::plugins::telemetry::formatters::TRACE_ID_FIELD_NAME; +use crate::plugins::traffic_shaping::Elapsed; +use crate::plugins::traffic_shaping::RateLimited; use crate::router::ApolloRouterError; use crate::router_factory::Endpoint; -use crate::router_factory::SupergraphServiceFactory; -use crate::services::layers::apq::APQLayer; -use crate::services::transport; -use crate::tracer::TraceId; +use crate::router_factory::RouterFactory; +use crate::services::router; /// A basic http server using Axum. /// Uses streaming as primary method of response. @@ -93,10 +78,9 @@ pub(crate) fn make_axum_router( service_factory: RF, configuration: &Configuration, mut endpoints: MultiMap, - apq: APQLayer, ) -> Result where - RF: SupergraphServiceFactory, + RF: RouterFactory, { ensure_listenaddrs_consistency(configuration, &endpoints)?; @@ -107,16 +91,19 @@ where ); endpoints.insert( configuration.health_check.listen.clone(), - Endpoint::new( + Endpoint::from_router_service( "/health".to_string(), - service_fn(move |_req: transport::Request| { + service_fn(move |req: router::Request| { let health = Health { status: HealthStatus::Up, }; - async move { - Ok(http::Response::builder() - .body(serde_json::to_vec(&health).map_err(BoxError::from)?.into())?) + Ok(router::Response { + response: http::Response::builder().body::( + serde_json::to_vec(&health).map_err(BoxError::from)?.into(), + )?, + context: req.context, + }) } }) .boxed(), @@ -132,7 +119,6 @@ where endpoints .remove(&configuration.supergraph.listen) .unwrap_or_default(), - apq, )?; let mut extra_endpoints = extra_endpoints(endpoints); @@ -161,19 +147,10 @@ impl HttpServerFactory for AxumHttpServerFactory { extra_endpoints: MultiMap, ) -> Self::Future where - RF: SupergraphServiceFactory, + RF: RouterFactory, { Box::pin(async move { - let apq = APQLayer::with_cache( - DeduplicatingCache::from_configuration( - &configuration.supergraph.apq.experimental_cache, - "APQ", - ) - .await, - ); - - let all_routers = - make_axum_router(service_factory, &configuration, extra_endpoints, apq)?; + let all_routers = make_axum_router(service_factory, &configuration, extra_endpoints)?; // serve main router @@ -296,38 +273,17 @@ fn main_endpoint( service_factory: RF, configuration: &Configuration, endpoints_on_main_listener: Vec, - apq: APQLayer, ) -> Result where - RF: SupergraphServiceFactory, + RF: RouterFactory, { let cors = configuration.cors.clone().into_layer().map_err(|e| { ApolloRouterError::ServiceCreationError(format!("CORS configuration error: {e}").into()) })?; - let main_route = main_router::(configuration, apq) + let main_route = main_router::(configuration) .layer(middleware::from_fn(decompress_request_body)) - .layer( - TraceLayer::new_for_http() - .make_span_with(PropagatingMakeSpan::new()) - .on_request(|_: &Request<_>, span: &Span| { - let trace_id = TraceId::maybe_new() - .map(|t| t.to_string()) - .unwrap_or_default(); - - span.record(TRACE_ID_FIELD_NAME, trace_id.as_str()); - }) - .on_response(|resp: &Response<_>, duration: Duration, span: &Span| { - // Duration here is instant based - span.record("apollo_private.duration_ns", duration.as_nanos() as i64); - // otel.status_code now has to be a string rather than enum. See opentelemetry_tracing::layer::str_to_status - if resp.status() >= StatusCode::BAD_REQUEST { - span.record("otel.status_code", "error"); - } else { - span.record("otel.status_code", "ok"); - } - }), - ) + .layer(TraceLayer::new_for_http().make_span_with(PropagatingMakeSpan::default())) .layer(Extension(service_factory)) .layer(cors) // Compress the response body, except for multipart responses such as with `@defer`. @@ -344,9 +300,9 @@ where Ok(ListenAddrAndRouter(listener, route)) } -pub(super) fn main_router(configuration: &Configuration, apq: APQLayer) -> axum::Router +pub(super) fn main_router(configuration: &Configuration) -> axum::Router where - RF: SupergraphServiceFactory, + RF: RouterFactory, { let mut graphql_configuration = configuration.supergraph.clone(); if graphql_configuration.path.ends_with("/*") { @@ -354,60 +310,42 @@ where graphql_configuration.path = format!("{}router_extra_path", graphql_configuration.path); } - let apq2 = apq.clone(); - let get_handler = if configuration.sandbox.enabled { - get({ - move |host: Host, Extension(service): Extension, http_request: Request| { - handle_get_with_static( - Sandbox::display_page(), - host, - apq2, - service.new_service().boxed(), - http_request, - ) - } - }) - } else if configuration.homepage.enabled { + Router::::new().route( + &graphql_configuration.path, get({ - move |host: Host, Extension(service): Extension, http_request: Request| { - handle_get_with_static( - Homepage::display_page(), - host, - apq2, - service.new_service().boxed(), - http_request, - ) + move |Extension(service): Extension, request: Request| { + handle_graphql(service.create().boxed(), request) } }) - } else { - get({ - move |host: Host, Extension(service): Extension, http_request: Request| { - handle_get(host, apq2, service.new_service().boxed(), http_request) + .post({ + move |Extension(service): Extension, request: Request| { + handle_graphql(service.create().boxed(), request) } - }) - }; + }), + ) +} - Router::::new().route( - &graphql_configuration.path, - get_handler - .post({ - move |host: Host, - uri: OriginalUri, - request: Result, JsonRejection>, - Extension(service): Extension, - header_map: HeaderMap| { - { - handle_post( - host, - uri, - request, - apq, - service.new_service().boxed(), - header_map, - ) - } +async fn handle_graphql( + service: router::BoxService, + http_request: Request, +) -> impl IntoResponse { + match service.oneshot(http_request.into()).await { + Err(e) => { + if let Some(source_err) = e.source() { + if source_err.is::() { + return RateLimited::new().into_response(); } - }) - .layer(middleware::from_fn(check_accept_header)), - ) + if source_err.is::() { + return Elapsed::new().into_response(); + } + } + tracing::error!("router service call failed: {}", e); + ( + StatusCode::INTERNAL_SERVER_ERROR, + "router service call failed", + ) + .into_response() + } + Ok(response) => response.response.into_response(), + } } diff --git a/apollo-router/src/axum_factory/handlers.rs b/apollo-router/src/axum_factory/handlers.rs deleted file mode 100644 index 8e48480b01..0000000000 --- a/apollo-router/src/axum_factory/handlers.rs +++ /dev/null @@ -1,293 +0,0 @@ -//! Http handlers -use std::str::FromStr; - -use axum::body::StreamBody; -use axum::extract::rejection::JsonRejection; -use axum::extract::Host; -use axum::extract::OriginalUri; -use axum::http::header::HeaderMap; -use axum::http::StatusCode; -use axum::response::*; -use bytes::Bytes; -use futures::future::ready; -use futures::stream::once; -use futures::StreamExt; -use http::header::CONTENT_TYPE; -use http::HeaderValue; -use http::Request; -use http::Uri; -use hyper::Body; -use tower::util::BoxService; -use tower::BoxError; -use tower::ServiceExt; -use tower_service::Service; - -use super::utils::accepts_json; -use super::utils::accepts_multipart; -use super::utils::accepts_wildcard; -use super::utils::prefers_html; -use super::utils::process_vary_header; -use super::utils::APPLICATION_JSON_HEADER_VALUE; -use super::utils::GRAPHQL_JSON_RESPONSE_HEADER_VALUE; -use crate::graphql; -use crate::http_ext; -use crate::plugins::traffic_shaping::Elapsed; -use crate::plugins::traffic_shaping::RateLimited; -use crate::services::layers::apq::APQLayer; -use crate::services::MULTIPART_DEFER_CONTENT_TYPE; -use crate::SupergraphRequest; -use crate::SupergraphResponse; - -pub(super) async fn handle_get_with_static( - static_page: Bytes, - Host(host): Host, - apq: APQLayer, - service: BoxService, - http_request: Request, -) -> impl IntoResponse { - if prefers_html(http_request.headers()) { - return Html(static_page).into_response(); - } - - if let Some(request) = http_request - .uri() - .query() - .and_then(|q| graphql::Request::from_urlencoded_query(q.to_string()).ok()) - { - let mut http_request = http_request.map(|_| request); - *http_request.uri_mut() = Uri::from_str(&format!("http://{}{}", host, http_request.uri())) - .expect("the URL is already valid because it comes from axum; qed"); - return run_graphql_request(service, apq, http_request) - .await - .into_response(); - } - - (StatusCode::BAD_REQUEST, "Invalid GraphQL request").into_response() -} - -pub(super) async fn handle_get( - Host(host): Host, - apq: APQLayer, - service: BoxService, - http_request: Request, -) -> impl IntoResponse { - if let Some(request) = http_request - .uri() - .query() - .and_then(|q| graphql::Request::from_urlencoded_query(q.to_string()).ok()) - { - let mut http_request = http_request.map(|_| request); - *http_request.uri_mut() = Uri::from_str(&format!("http://{}{}", host, http_request.uri())) - .expect("the URL is already valid because it comes from axum; qed"); - return run_graphql_request(service, apq, http_request) - .await - .into_response(); - } - - ::tracing::error!( - monotonic_counter.apollo_router_http_requests_total = 1u64, - status = %400, - error = "missing query string", - "missing query string" - ); - (StatusCode::BAD_REQUEST, "Invalid Graphql request").into_response() -} - -pub(super) async fn handle_post( - Host(host): Host, - OriginalUri(uri): OriginalUri, - request_json: Result, JsonRejection>, - apq: APQLayer, - service: BoxService, - header_map: HeaderMap, -) -> impl IntoResponse { - let request = match request_json { - Ok(Json(req)) => req, - Err(json_err) => { - let json_err = json_err.into_response(); - ::tracing::error!( - monotonic_counter.apollo_router_http_requests_total = 1u64, - status = %json_err.status().as_u16(), - error = "failed to parse the request body as JSON", - "failed to parse the request body as JSON" - ); - - return json_err; - } - }; - - let mut http_request = Request::post( - Uri::from_str(&format!("http://{}{}", host, uri)) - .expect("the URL is already valid because it comes from axum; qed"), - ) - .body(request) - .expect("body has already been parsed; qed"); - *http_request.headers_mut() = header_map; - - run_graphql_request(service, apq, http_request) - .await - .into_response() -} - -async fn run_graphql_request( - service: RS, - apq: APQLayer, - http_request: Request, -) -> impl IntoResponse -where - RS: Service + Send, -{ - let (head, body) = http_request.into_parts(); - let mut req: SupergraphRequest = Request::from_parts(head, body).into(); - req = match apq.apq_request(req).await { - Ok(req) => req, - Err(res) => { - let (parts, mut stream) = res.response.into_parts(); - - return match stream.next().await { - None => { - tracing::error!( - monotonic_counter.apollo_router_http_requests_total = 1u64, - status = %StatusCode::SERVICE_UNAVAILABLE.as_u16(), - "router service is not available to process request" - ); - ( - StatusCode::SERVICE_UNAVAILABLE, - "router service is not available to process request", - ) - .into_response() - } - Some(body) => { - tracing::info!(monotonic_counter.apollo_router_http_requests_total = 1u64); - http_ext::Response::from(http::Response::from_parts(parts, body)) - .into_response() - } - }; - } - }; - - match service.ready_oneshot().await { - Ok(mut service) => { - let accepts_multipart = accepts_multipart(req.supergraph_request.headers()); - let accepts_json = accepts_json(req.supergraph_request.headers()); - let accepts_wildcard = accepts_wildcard(req.supergraph_request.headers()); - - match service.call(req).await { - Err(e) => { - if let Some(source_err) = e.source() { - if source_err.is::() { - return RateLimited::new().into_response(); - } - if source_err.is::() { - return Elapsed::new().into_response(); - } - } - tracing::error!("router service call failed: {}", e); - ( - StatusCode::INTERNAL_SERVER_ERROR, - "router service call failed", - ) - .into_response() - } - Ok(response) => { - let (mut parts, mut stream) = response.response.into_parts(); - - process_vary_header(&mut parts.headers); - - match stream.next().await { - None => { - tracing::error!("router service is not available to process request",); - ( - StatusCode::SERVICE_UNAVAILABLE, - "router service is not available to process request", - ) - .into_response() - } - Some(response) => { - if !response.has_next.unwrap_or(false) - && (accepts_json || accepts_wildcard) - { - parts.headers.insert( - CONTENT_TYPE, - HeaderValue::from_static("application/json"), - ); - tracing::trace_span!("serialize_response").in_scope(|| { - http_ext::Response::from(http::Response::from_parts( - parts, response, - )) - .into_response() - }) - } else if accepts_multipart { - parts.headers.insert( - CONTENT_TYPE, - HeaderValue::from_static(MULTIPART_DEFER_CONTENT_TYPE), - ); - - // each chunk contains a response and the next delimiter, to let client parsers - // know that they can process the response right away - let mut first_buf = Vec::from( - &b"\r\n--graphql\r\ncontent-type: application/json\r\n\r\n"[..], - ); - serde_json::to_writer(&mut first_buf, &response).unwrap(); - if response.has_next.unwrap_or(false) { - first_buf.extend_from_slice(b"\r\n--graphql\r\n"); - } else { - first_buf.extend_from_slice(b"\r\n--graphql--\r\n"); - } - - let body = once(ready(Ok(Bytes::from(first_buf)))).chain( - stream.map(|res| { - let mut buf = Vec::from( - &b"content-type: application/json\r\n\r\n"[..], - ); - serde_json::to_writer(&mut buf, &res).unwrap(); - - // the last chunk has a different end delimiter - if res.has_next.unwrap_or(false) { - buf.extend_from_slice(b"\r\n--graphql\r\n"); - } else { - buf.extend_from_slice(b"\r\n--graphql--\r\n"); - } - - Ok::<_, BoxError>(buf.into()) - }), - ); - - (parts, StreamBody::new(body)).into_response() - } else { - // this should be unreachable due to a previous check, but just to be sure... - ( - StatusCode::NOT_ACCEPTABLE, - format!( - r#"'accept' header can't be different than \"*/*\", {:?}, {:?} or {:?}"#, - APPLICATION_JSON_HEADER_VALUE, - GRAPHQL_JSON_RESPONSE_HEADER_VALUE, - MULTIPART_DEFER_CONTENT_TYPE - ), - ) - .into_response() - } - } - } - } - } - } - Err(e) => { - tracing::error!("router service is not available to process request: {}", e); - if let Some(source_err) = e.source() { - if source_err.is::() { - return RateLimited::new().into_response(); - } - if source_err.is::() { - return Elapsed::new().into_response(); - } - } - - ( - StatusCode::SERVICE_UNAVAILABLE, - "router service is not available to process request", - ) - .into_response() - } - } -} diff --git a/apollo-router/src/axum_factory/listeners.rs b/apollo-router/src/axum_factory/listeners.rs index 5e664e5bdc..726d6bec9d 100644 --- a/apollo-router/src/axum_factory/listeners.rs +++ b/apollo-router/src/axum_factory/listeners.rs @@ -367,18 +367,22 @@ mod tests { use super::*; use crate::axum_factory::tests::init_with_config; - use crate::axum_factory::tests::MockSupergraphService; use crate::configuration::Sandbox; use crate::configuration::Supergraph; - use crate::services::transport; + use crate::services::router; + use crate::services::router_service; #[tokio::test] async fn it_makes_sure_same_listenaddrs_are_accepted() { let configuration = Configuration::fake_builder().build().unwrap(); - init_with_config(MockSupergraphService::new(), configuration, MultiMap::new()) - .await - .unwrap(); + init_with_config( + router_service::empty().await, + Arc::new(configuration), + MultiMap::new(), + ) + .await + .unwrap(); } #[tokio::test] @@ -393,24 +397,29 @@ mod tests { .build() .unwrap(); - let endpoint = service_fn(|_req: transport::Request| async move { - Ok::<_, BoxError>( - http::Response::builder() - .body("this is a test".to_string().into()) + let endpoint = service_fn(|req: router::Request| async move { + Ok::<_, BoxError>(router::Response { + response: http::Response::builder() + .body::("this is a test".to_string().into()) .unwrap(), - ) + context: req.context, + }) }) .boxed(); let mut web_endpoints = MultiMap::new(); web_endpoints.insert( SocketAddr::from_str("0.0.0.0:4010").unwrap().into(), - Endpoint::new("/".to_string(), endpoint), + Endpoint::from_router_service("/".to_string(), endpoint), ); - let error = init_with_config(MockSupergraphService::new(), configuration, web_endpoints) - .await - .unwrap_err(); + let error = init_with_config( + router_service::empty().await, + Arc::new(configuration), + web_endpoints, + ) + .await + .unwrap_err(); assert_eq!( "tried to bind 127.0.0.1 and 0.0.0.0 on port 4010", error.to_string() @@ -427,22 +436,23 @@ mod tests { ) .build() .unwrap(); - let endpoint = service_fn(|_req: transport::Request| async move { - Ok::<_, BoxError>( - http::Response::builder() - .body("this is a test".to_string().into()) + let endpoint = service_fn(|req: router::Request| async move { + Ok::<_, BoxError>(router::Response { + response: http::Response::builder() + .body::("this is a test".to_string().into()) .unwrap(), - ) + context: req.context, + }) }) .boxed(); let mut mm = MultiMap::new(); mm.insert( SocketAddr::from_str("127.0.0.1:4010").unwrap().into(), - Endpoint::new("/".to_string(), endpoint), + Endpoint::from_router_service("/".to_string(), endpoint), ); - let error = init_with_config(MockSupergraphService::new(), configuration, mm) + let error = init_with_config(router_service::empty().await, Arc::new(configuration), mm) .await .unwrap_err(); diff --git a/apollo-router/src/axum_factory/mod.rs b/apollo-router/src/axum_factory/mod.rs index 158bd34a7b..5f6668794d 100644 --- a/apollo-router/src/axum_factory/mod.rs +++ b/apollo-router/src/axum_factory/mod.rs @@ -1,9 +1,8 @@ //! axum factory is useful to create an [`AxumHttpServerFactory`] which implements [`crate::http_server_factory::HttpServerFactory`] mod axum_http_server_factory; -mod handlers; mod listeners; #[cfg(test)] -mod tests; +pub(crate) mod tests; pub(crate) mod utils; pub(crate) use axum_http_server_factory::make_axum_router; diff --git a/apollo-router/src/axum_factory/snapshots/apollo_router__axum_factory__tests__defer_is_not_buffered.snap b/apollo-router/src/axum_factory/snapshots/apollo_router__axum_factory__tests__defer_is_not_buffered.snap index 166a799aa1..b1ed456e61 100644 --- a/apollo-router/src/axum_factory/snapshots/apollo_router__axum_factory__tests__defer_is_not_buffered.snap +++ b/apollo-router/src/axum_factory/snapshots/apollo_router__axum_factory__tests__defer_is_not_buffered.snap @@ -1,6 +1,6 @@ --- source: apollo-router/src/axum_factory/tests.rs -expression: "json!([{\n \"data\" :\n {\n \"topProducts\" :\n [{ \"upc\" : \"1\", \"name\" : \"Table\", \"reviews\" : null },\n { \"upc\" : \"2\", \"name\" : \"Couch\", \"reviews\" : null }]\n }, \"errors\" :\n [{\n \"message\" :\n \"couldn't find mock for query {\\\"query\\\":\\\"query TopProducts__reviews__1($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{__typename id product{__typename upc}}}}}\\\",\\\"operationName\\\":\\\"TopProducts__reviews__1\\\",\\\"variables\\\":{\\\"representations\\\":[{\\\"__typename\\\":\\\"Product\\\",\\\"upc\\\":\\\"1\\\"},{\\\"__typename\\\":\\\"Product\\\",\\\"upc\\\":\\\"2\\\"}]}}\"\n },\n {\n \"message\" :\n \"Subgraph response from 'reviews' was missing key `_entities`\",\n \"path\" : [\"topProducts\", \"@\"]\n }], \"hasNext\" : true,\n }, { \"hasNext\" : false }])" +expression: parts --- [ { @@ -20,14 +20,20 @@ expression: "json!([{\n \"data\" :\n {\n \"topProducts\ }, "errors": [ { - "message": "couldn't find mock for query {\"query\":\"query TopProducts__reviews__1($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{__typename id product{__typename upc}}}}}\",\"operationName\":\"TopProducts__reviews__1\",\"variables\":{\"representations\":[{\"__typename\":\"Product\",\"upc\":\"1\"},{\"__typename\":\"Product\",\"upc\":\"2\"}]}}" + "message": "couldn't find mock for query {\"query\":\"query TopProducts__reviews__1($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{__typename id product{__typename upc}}}}}\",\"operationName\":\"TopProducts__reviews__1\",\"variables\":{\"representations\":[{\"__typename\":\"Product\",\"upc\":\"1\"},{\"__typename\":\"Product\",\"upc\":\"2\"}]}}", + "extensions": { + "code": "FETCH_ERROR" + } }, { "message": "Subgraph response from 'reviews' was missing key `_entities`", "path": [ "topProducts", "@" - ] + ], + "extensions": { + "code": "PARSE_ERROR" + } } ], "hasNext": true diff --git a/apollo-router/src/axum_factory/tests.rs b/apollo-router/src/axum_factory/tests.rs index d2cbed288c..1509051e1a 100644 --- a/apollo-router/src/axum_factory/tests.rs +++ b/apollo-router/src/axum_factory/tests.rs @@ -9,8 +9,10 @@ use std::sync::Arc; use async_compression::tokio::write::GzipDecoder; use async_compression::tokio::write::GzipEncoder; use axum::body::BoxBody; +use futures::future::BoxFuture; use futures::stream; use futures::stream::poll_fn; +use futures::Future; use futures::StreamExt; use http::header::ACCEPT_ENCODING; use http::header::CONTENT_ENCODING; @@ -19,6 +21,7 @@ use http::header::{self}; use http::HeaderMap; use http::HeaderValue; use http_body::Body; +use mime::APPLICATION_JSON; use mockall::mock; use multimap::MultiMap; use reqwest::header::ACCEPT; @@ -45,6 +48,7 @@ use tower::BoxError; use tower::Service; use tower::ServiceExt; +pub(crate) use super::axum_http_server_factory::make_axum_router; use super::*; use crate::configuration::cors::Cors; use crate::configuration::HealthCheck; @@ -52,15 +56,18 @@ use crate::configuration::Homepage; use crate::configuration::Sandbox; use crate::configuration::Supergraph; use crate::graphql; -use crate::http_ext; use crate::http_server_factory::HttpServerFactory; use crate::http_server_factory::HttpServerHandle; use crate::json_ext::Path; use crate::router_factory::Endpoint; -use crate::router_factory::SupergraphServiceFactory; -use crate::services::new_service::NewService; -use crate::services::transport; -use crate::services::SupergraphRequest; +use crate::router_factory::RouterFactory; +use crate::services::layers::static_page::home_page_content; +use crate::services::layers::static_page::sandbox_page_content; +use crate::services::new_service::ServiceFactory; +use crate::services::router; +use crate::services::router_service; +use crate::services::RouterRequest; +use crate::services::RouterResponse; use crate::services::SupergraphResponse; use crate::services::MULTIPART_DEFER_CONTENT_TYPE; use crate::test_harness::http_client; @@ -112,47 +119,54 @@ macro_rules! assert_header_contains { mock! { #[derive(Debug)] - pub(super) SupergraphService { - fn service_call(&mut self, req: SupergraphRequest) -> Result; + pub(super) RouterService { + fn service_call(&mut self, req: RouterRequest) -> impl Future> + Send + 'static; } } -type MockSupergraphServiceType = tower_test::mock::Mock; +type MockRouterServiceType = tower_test::mock::Mock; #[derive(Clone)] -struct TestSupergraphServiceFactory { - inner: MockSupergraphServiceType, +struct TestRouterFactory { + inner: MockRouterServiceType, } -impl NewService for TestSupergraphServiceFactory { - type Service = MockSupergraphServiceType; +impl ServiceFactory for TestRouterFactory { + type Service = MockRouterServiceType; - fn new_service(&self) -> Self::Service { + fn create(&self) -> Self::Service { self.inner.clone() } } -impl SupergraphServiceFactory for TestSupergraphServiceFactory { - type SupergraphService = MockSupergraphServiceType; +impl RouterFactory for TestRouterFactory { + type RouterService = MockRouterServiceType; - type Future = - <>::Service as Service< - SupergraphRequest, - >>::Future; + type Future = <>::Service as Service< + router::Request, + >>::Future; fn web_endpoints(&self) -> MultiMap { MultiMap::new() } } -async fn init(mut mock: MockSupergraphService) -> (HttpServerHandle, Client) { +async fn init( + mut mock: impl Service< + router::Request, + Response = router::Response, + Error = BoxError, + Future = BoxFuture<'static, router::ServiceResult>, + > + Send + + 'static, +) -> (HttpServerHandle, Client) { let server_factory = AxumHttpServerFactory::new(); let (service, mut handle) = tower_test::mock::spawn(); tokio::spawn(async move { loop { while let Some((request, responder)) = handle.next_request().await { - match mock.service_call(request) { + match mock.ready().await.unwrap().call(request).await { Ok(response) => responder.send_response(response), Err(err) => responder.send_error(err), } @@ -161,7 +175,7 @@ async fn init(mut mock: MockSupergraphService) -> (HttpServerHandle, Client) { }); let server = server_factory .create( - TestSupergraphServiceFactory { + TestRouterFactory { inner: service.into_inner(), }, Arc::new( @@ -191,8 +205,14 @@ async fn init(mut mock: MockSupergraphService) -> (HttpServerHandle, Client) { .await .expect("Failed to create server factory"); let mut default_headers = HeaderMap::new(); - default_headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); - default_headers.insert(ACCEPT, HeaderValue::from_static("application/json")); + default_headers.insert( + CONTENT_TYPE, + HeaderValue::from_static(APPLICATION_JSON.essence_str()), + ); + default_headers.insert( + ACCEPT, + HeaderValue::from_static(APPLICATION_JSON.essence_str()), + ); let client = reqwest::Client::builder() .default_headers(default_headers) @@ -203,8 +223,14 @@ async fn init(mut mock: MockSupergraphService) -> (HttpServerHandle, Client) { } pub(super) async fn init_with_config( - mut mock: MockSupergraphService, - conf: Configuration, + mut router_service: impl Service< + router::Request, + Response = router::Response, + Error = BoxError, + Future = BoxFuture<'static, router::ServiceResult>, + > + Send + + 'static, + conf: Arc, web_endpoints: MultiMap, ) -> Result<(HttpServerHandle, Client), ApolloRouterError> { let server_factory = AxumHttpServerFactory::new(); @@ -213,7 +239,7 @@ pub(super) async fn init_with_config( tokio::spawn(async move { loop { while let Some((request, responder)) = handle.next_request().await { - match mock.service_call(request) { + match router_service.ready().await.unwrap().call(request).await { Ok(response) => responder.send_response(response), Err(err) => responder.send_error(err), } @@ -222,18 +248,24 @@ pub(super) async fn init_with_config( }); let server = server_factory .create( - TestSupergraphServiceFactory { + TestRouterFactory { inner: service.into_inner(), }, - Arc::new(conf), + conf, None, vec![], web_endpoints, ) .await?; let mut default_headers = HeaderMap::new(); - default_headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); - default_headers.insert(ACCEPT, HeaderValue::from_static("application/json")); + default_headers.insert( + CONTENT_TYPE, + HeaderValue::from_static(APPLICATION_JSON.essence_str()), + ); + default_headers.insert( + ACCEPT, + HeaderValue::from_static(APPLICATION_JSON.essence_str()), + ); let client = reqwest::Client::builder() .default_headers(default_headers) @@ -245,7 +277,13 @@ pub(super) async fn init_with_config( #[cfg(unix)] async fn init_unix( - mut mock: MockSupergraphService, + mut mock: impl Service< + router::Request, + Response = router::Response, + Error = BoxError, + Future = BoxFuture<'static, router::ServiceResult>, + > + Send + + 'static, temp_dir: &tempfile::TempDir, ) -> HttpServerHandle { let server_factory = AxumHttpServerFactory::new(); @@ -254,7 +292,7 @@ async fn init_unix( tokio::spawn(async move { loop { while let Some((request, responder)) = handle.next_request().await { - match mock.service_call(request) { + match mock.ready().await.unwrap().call(request).await { Ok(response) => responder.send_response(response), Err(err) => responder.send_error(err), } @@ -264,7 +302,7 @@ async fn init_unix( server_factory .create( - TestSupergraphServiceFactory { + TestRouterFactory { inner: service.into_inner(), }, Arc::new( @@ -286,17 +324,27 @@ async fn init_unix( } #[tokio::test] -async fn it_displays_sandbox() -> Result<(), ApolloRouterError> { - let expectations = MockSupergraphService::new(); +async fn it_displays_sandbox() { + let conf = Arc::new( + Configuration::fake_builder() + .sandbox(Sandbox::fake_builder().enabled(true).build()) + .homepage(Homepage::fake_builder().enabled(false).build()) + .supergraph(Supergraph::fake_builder().introspection(true).build()) + .build() + .unwrap(), + ); - let conf = Configuration::fake_builder() - .sandbox(Sandbox::fake_builder().enabled(true).build()) - .homepage(Homepage::fake_builder().enabled(false).build()) - .supergraph(Supergraph::fake_builder().introspection(true).build()) - .build() - .unwrap(); + let router_service = router_service::from_supergraph_mock_callback_and_configuration( + move |_| { + panic!("this should never be called"); + }, + conf.clone(), + ) + .await; - let (server, client) = init_with_config(expectations, conf, MultiMap::new()).await?; + let (server, client) = init_with_config(router_service, conf, MultiMap::new()) + .await + .unwrap(); // Regular studio redirect let response = client @@ -314,29 +362,36 @@ async fn it_displays_sandbox() -> Result<(), ApolloRouterError> { "{}", response.text().await.unwrap() ); - assert_eq!(response.bytes().await.unwrap(), Sandbox::display_page()); - - Ok(()) + assert_eq!(response.text().await.unwrap(), sandbox_page_content()); } #[tokio::test] -async fn it_displays_sandbox_with_different_supergraph_path() -> Result<(), ApolloRouterError> { - let expectations = MockSupergraphService::new(); +async fn it_displays_sandbox_with_different_supergraph_path() { + let conf = Arc::new( + Configuration::fake_builder() + .sandbox(Sandbox::fake_builder().enabled(true).build()) + .homepage(Homepage::fake_builder().enabled(false).build()) + .supergraph( + Supergraph::fake_builder() + .introspection(true) + .path("/custom") + .build(), + ) + .build() + .unwrap(), + ); - let conf = Configuration::fake_builder() - .sandbox(Sandbox::fake_builder().enabled(true).build()) - .homepage(Homepage::fake_builder().enabled(false).build()) - .supergraph( - Supergraph::fake_builder() - .introspection(true) - .path("/custom") - .build(), - ) - .build() + let router_service = router_service::from_supergraph_mock_callback_and_configuration( + move |_| { + panic!("this should never be called"); + }, + conf.clone(), + ) + .await; + let (server, client) = init_with_config(router_service, conf, MultiMap::new()) + .await .unwrap(); - let (server, client) = init_with_config(expectations, conf, MultiMap::new()).await?; - // Regular studio redirect let response = client .get(&format!( @@ -353,9 +408,7 @@ async fn it_displays_sandbox_with_different_supergraph_path() -> Result<(), Apol "{}", response.text().await.unwrap() ); - assert_eq!(response.bytes().await.unwrap(), Sandbox::display_page()); - - Ok(()) + assert_eq!(response.text().await.unwrap(), sandbox_page_content()); } #[tokio::test] @@ -364,18 +417,16 @@ async fn it_compress_response_body() -> Result<(), ApolloRouterError> { .data(json!({"response": "yayyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy"})) // Body must be bigger than 32 to be compressed .build(); let example_response = expected_response.clone(); - let mut expectations = MockSupergraphService::new(); - expectations - .expect_service_call() - .times(2) - .returning(move |_req| { - let example_response = example_response.clone(); - Ok(SupergraphResponse::new_from_graphql_response( - example_response, - Context::new(), - )) - }); - let (server, client) = init(expectations).await; + let router_service = router_service::from_supergraph_mock_callback(move |req| { + let example_response = example_response.clone(); + + Ok(SupergraphResponse::new_from_graphql_response( + example_response, + req.context, + )) + }) + .await; + let (server, client) = init(router_service).await; let url = format!("{}/", server.graphql_listen_address().as_ref().unwrap()); // Post query @@ -415,7 +466,7 @@ async fn it_compress_response_body() -> Result<(), ApolloRouterError> { assert_eq!( response.headers().get(header::CONTENT_TYPE), - Some(&HeaderValue::from_static("application/json")) + Some(&HeaderValue::from_static(APPLICATION_JSON.essence_str())) ); assert_eq!( response.headers().get(&CONTENT_ENCODING), @@ -449,25 +500,16 @@ async fn it_decompress_request_body() -> Result<(), ApolloRouterError> { .data(json!({"response": "yayyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy"})) // Body must be bigger than 32 to be compressed .build(); let example_response = expected_response.clone(); - let mut expectations = MockSupergraphService::new(); - expectations - .expect_service_call() - .times(1) - .withf(move |req| { - assert_eq!( - req.supergraph_request.body().query.as_ref().unwrap(), - "query" - ); - true - }) - .returning(move |_req| { - let example_response = example_response.clone(); - Ok(SupergraphResponse::new_from_graphql_response( - example_response, - Context::new(), - )) - }); - let (server, client) = init(expectations).await; + let router_service = router_service::from_supergraph_mock_callback(move |req| { + let example_response = example_response.clone(); + assert_eq!(req.supergraph_request.into_body().query.unwrap(), "query"); + Ok(SupergraphResponse::new_from_graphql_response( + example_response, + req.context, + )) + }) + .await; + let (server, client) = init(router_service).await; let url = format!("{}/", server.graphql_listen_address().as_ref().unwrap()); // Post query @@ -492,8 +534,7 @@ async fn it_decompress_request_body() -> Result<(), ApolloRouterError> { #[tokio::test] async fn malformed_request() -> Result<(), ApolloRouterError> { - let expectations = MockSupergraphService::new(); - let (server, client) = init(expectations).await; + let (server, client) = init(router_service::empty().await).await; let response = client .post(format!( @@ -514,18 +555,16 @@ async fn response() -> Result<(), ApolloRouterError> { .data(json!({"response": "yay"})) .build(); let example_response = expected_response.clone(); - let mut expectations = MockSupergraphService::new(); - expectations - .expect_service_call() - .times(2) - .returning(move |_| { - let example_response = example_response.clone(); - Ok(SupergraphResponse::new_from_graphql_response( - example_response, - Context::new(), - )) - }); - let (server, client) = init(expectations).await; + let router_service = router_service::from_supergraph_mock_callback(move |req| { + let example_response = example_response.clone(); + + Ok(SupergraphResponse::new_from_graphql_response( + example_response, + req.context, + )) + }) + .await; + let (server, client) = init(router_service).await; let url = format!("{}/", server.graphql_listen_address().as_ref().unwrap()); // Post query @@ -555,7 +594,7 @@ async fn response() -> Result<(), ApolloRouterError> { assert_eq!( response.headers().get(header::CONTENT_TYPE), - Some(&HeaderValue::from_static("application/json")) + Some(&HeaderValue::from_static(APPLICATION_JSON.essence_str())) ); assert_eq!( @@ -569,8 +608,7 @@ async fn response() -> Result<(), ApolloRouterError> { #[tokio::test] async fn bad_response() -> Result<(), ApolloRouterError> { - let expectations = MockSupergraphService::new(); - let (server, client) = init(expectations).await; + let (server, client) = init(router_service::empty().await).await; let url = format!("{}/test", server.graphql_listen_address().as_ref().unwrap()); // Post query @@ -609,17 +647,16 @@ async fn response_with_custom_endpoint() -> Result<(), ApolloRouterError> { .data(json!({"response": "yay"})) .build(); let example_response = expected_response.clone(); - let mut expectations = MockSupergraphService::new(); - expectations - .expect_service_call() - .times(2) - .returning(move |_| { - let example_response = example_response.clone(); - Ok(SupergraphResponse::new_from_graphql_response( - example_response, - Context::new(), - )) - }); + + let router_service = router_service::from_supergraph_mock_callback(move |req| { + let example_response = example_response.clone(); + Ok(SupergraphResponse::new_from_graphql_response( + example_response, + req.context, + )) + }) + .await; + let conf = Configuration::fake_builder() .supergraph( crate::configuration::Supergraph::fake_builder() @@ -628,7 +665,8 @@ async fn response_with_custom_endpoint() -> Result<(), ApolloRouterError> { ) .build() .unwrap(); - let (server, client) = init_with_config(expectations, conf, MultiMap::new()).await?; + let (server, client) = + init_with_config(router_service, Arc::new(conf), MultiMap::new()).await?; let url = format!( "{}/graphql", server.graphql_listen_address().as_ref().unwrap() @@ -674,17 +712,15 @@ async fn response_with_custom_prefix_endpoint() -> Result<(), ApolloRouterError> .data(json!({"response": "yay"})) .build(); let example_response = expected_response.clone(); - let mut expectations = MockSupergraphService::new(); - expectations - .expect_service_call() - .times(2) - .returning(move |_| { - let example_response = example_response.clone(); - Ok(SupergraphResponse::new_from_graphql_response( - example_response, - Context::new(), - )) - }); + let router_service = router_service::from_supergraph_mock_callback(move |req| { + let example_response = example_response.clone(); + Ok(SupergraphResponse::new_from_graphql_response( + example_response, + req.context, + )) + }) + .await; + let conf = Configuration::fake_builder() .supergraph( crate::configuration::Supergraph::fake_builder() @@ -693,7 +729,8 @@ async fn response_with_custom_prefix_endpoint() -> Result<(), ApolloRouterError> ) .build() .unwrap(); - let (server, client) = init_with_config(expectations, conf, MultiMap::new()).await?; + let (server, client) = + init_with_config(router_service, Arc::new(conf), MultiMap::new()).await?; let url = format!( "{}/prefix/graphql", server.graphql_listen_address().as_ref().unwrap() @@ -739,17 +776,16 @@ async fn response_with_custom_endpoint_wildcard() -> Result<(), ApolloRouterErro .data(json!({"response": "yay"})) .build(); let example_response = expected_response.clone(); - let mut expectations = MockSupergraphService::new(); - expectations - .expect_service_call() - .times(4) - .returning(move |_| { - let example_response = example_response.clone(); - Ok(SupergraphResponse::new_from_graphql_response( - example_response, - Context::new(), - )) - }); + + let router_service = router_service::from_supergraph_mock_callback(move |req| { + let example_response = example_response.clone(); + Ok(SupergraphResponse::new_from_graphql_response( + example_response, + req.context, + )) + }) + .await; + let conf = Configuration::fake_builder() .supergraph( crate::configuration::Supergraph::fake_builder() @@ -758,7 +794,8 @@ async fn response_with_custom_endpoint_wildcard() -> Result<(), ApolloRouterErro ) .build() .unwrap(); - let (server, client) = init_with_config(expectations, conf, MultiMap::new()).await?; + let (server, client) = + init_with_config(router_service, Arc::new(conf), MultiMap::new()).await?; for url in &[ format!( "{}/graphql/test", @@ -804,141 +841,22 @@ async fn response_with_custom_endpoint_wildcard() -> Result<(), ApolloRouterErro Ok(()) } -#[tokio::test] -async fn it_extracts_query_and_operation_name_on_get_requests() -> Result<(), ApolloRouterError> { - let query = "query"; - let expected_query = query; - let operation_name = "operationName"; - let expected_operation_name = operation_name; - - let expected_response = graphql::Response::builder() - .data(json!({"response": "yay"})) - .build(); - let example_response = expected_response.clone(); - - let mut expectations = MockSupergraphService::new(); - expectations - .expect_service_call() - .times(1) - .withf(move |req| { - assert_eq!( - req.supergraph_request.body().query.as_deref().unwrap(), - expected_query - ); - assert_eq!( - req.supergraph_request - .body() - .operation_name - .as_deref() - .unwrap(), - expected_operation_name - ); - true - }) - .returning(move |_| { - let example_response = example_response.clone(); - Ok(SupergraphResponse::new_from_graphql_response( - example_response, - Context::new(), - )) - }); - let (server, client) = init(expectations).await; - let url = format!("{}/", server.graphql_listen_address().as_ref().unwrap()); - - let response = client - .get(url.as_str()) - .query(&[("query", query), ("operationName", operation_name)]) - .send() - .await - .unwrap() - .error_for_status() - .unwrap(); - - assert_eq!( - response.json::().await.unwrap(), - expected_response, - ); - - server.shutdown().await?; - Ok(()) -} - -#[tokio::test] -async fn it_extracts_query_and_operation_name_on_post_requests() -> Result<(), ApolloRouterError> { - let query = "query"; - let expected_query = query; - let operation_name = "operationName"; - let expected_operation_name = operation_name; - - let expected_response = graphql::Response::builder() - .data(json!({"response": "yay"})) - .build(); - let example_response = expected_response.clone(); - - let mut expectations = MockSupergraphService::new(); - expectations - .expect_service_call() - .times(1) - .withf(move |req| { - assert_eq!( - req.supergraph_request.body().query.as_deref().unwrap(), - expected_query - ); - assert_eq!( - req.supergraph_request - .body() - .operation_name - .as_deref() - .unwrap(), - expected_operation_name - ); - true - }) - .returning(move |_| { - let example_response = example_response.clone(); - Ok(SupergraphResponse::new_from_graphql_response( - example_response, - Context::new(), - )) - }); - let (server, client) = init(expectations).await; - let url = format!("{}/", server.graphql_listen_address().as_ref().unwrap()); - - let response = client - .post(url.as_str()) - .body(json!({ "query": query, "operationName": operation_name }).to_string()) - .send() - .await - .unwrap() - .error_for_status() - .unwrap(); - - assert_eq!( - response.json::().await.unwrap(), - expected_response, - ); - - server.shutdown().await -} - #[tokio::test] async fn response_failure() -> Result<(), ApolloRouterError> { - let mut expectations = MockSupergraphService::new(); - expectations - .expect_service_call() - .times(1) - .returning(move |_| { - let example_response = crate::error::FetchError::SubrequestHttpError { - service: "Mock service".to_string(), - reason: "Mock error".to_string(), - } - .to_response(); - Ok(SupergraphResponse::new_from_graphql_response( - example_response, - Context::new(), - )) - }); - let (server, client) = init(expectations).await; + let router_service = router_service::from_supergraph_mock_callback(move |req| { + let example_response = crate::error::FetchError::SubrequestHttpError { + service: "Mock service".to_string(), + reason: "Mock error".to_string(), + } + .to_response(); + + Ok(SupergraphResponse::new_from_graphql_response( + example_response, + req.context, + )) + }) + .await; + let (server, client) = init(router_service).await; let response = client .post(format!( @@ -972,7 +890,6 @@ async fn response_failure() -> Result<(), ApolloRouterError> { #[tokio::test] async fn cors_preflight() -> Result<(), ApolloRouterError> { - let expectations = MockSupergraphService::new(); let conf = Configuration::fake_builder() .cors(Cors::builder().build()) .supergraph( @@ -982,7 +899,12 @@ async fn cors_preflight() -> Result<(), ApolloRouterError> { ) .build() .unwrap(); - let (server, client) = init_with_config(expectations, conf, MultiMap::new()).await?; + let (server, client) = init_with_config( + router_service::empty().await, + Arc::new(conf), + MultiMap::new(), + ) + .await?; let response = client .request( @@ -1030,8 +952,7 @@ async fn cors_preflight() -> Result<(), ApolloRouterError> { #[tokio::test] async fn test_previous_health_check_returns_four_oh_four() { - let expectations = MockSupergraphService::new(); - let (server, client) = init(expectations).await; + let (server, client) = init(router_service::empty().await).await; let url = format!( "{}/.well-known/apollo/server-health", server.graphql_listen_address().as_ref().unwrap() @@ -1046,8 +967,17 @@ async fn it_send_bad_content_type() -> Result<(), ApolloRouterError> { let query = "query"; let operation_name = "operationName"; - let expectations = MockSupergraphService::new(); - let (server, client) = init(expectations).await; + let router_service = router_service::from_supergraph_mock_callback(|req| { + Ok(SupergraphResponse::new_from_graphql_response( + graphql::Response::builder() + .data(json!({"response": "hey"})) + .build(), + req.context, + )) + }) + .await; + + let (server, client) = init(router_service).await; let url = format!("{}", server.graphql_listen_address().as_ref().unwrap()); let response = client .post(url.as_str()) @@ -1067,36 +997,52 @@ async fn it_sends_bad_accept_header() -> Result<(), ApolloRouterError> { let query = "query"; let operation_name = "operationName"; - let expectations = MockSupergraphService::new(); - let (server, client) = init(expectations).await; + let router_service = router_service::from_supergraph_mock_callback(|req| { + Ok(SupergraphResponse::new_from_graphql_response( + graphql::Response::builder() + .data(json!({"response": "hey"})) + .build(), + req.context, + )) + }) + .await; + + let (server, client) = init(router_service).await; let url = format!("{}", server.graphql_listen_address().as_ref().unwrap()); let response = client .post(url.as_str()) .header(ACCEPT, "foo/bar") - .header(CONTENT_TYPE, "application/json") + .header(CONTENT_TYPE, APPLICATION_JSON.essence_str()) .body(json!({ "query": query, "operationName": operation_name }).to_string()) .send() .await .unwrap(); - assert_eq!(response.status(), StatusCode::NOT_ACCEPTABLE,); + assert_eq!(response.status(), StatusCode::NOT_ACCEPTABLE); server.shutdown().await } #[test(tokio::test)] -async fn it_doesnt_display_disabled_sandbox() -> Result<(), ApolloRouterError> { - let expectations = MockSupergraphService::new(); - let conf = Configuration::fake_builder() - // sandbox is disabled by default, but homepage will take over if we dont disable it - .homepage( - crate::configuration::Homepage::fake_builder() - .enabled(false) - .build(), - ) - .build() +async fn it_displays_homepage() { + let conf = Arc::new(Configuration::fake_builder().build().unwrap()); + + let router_service = router_service::from_supergraph_mock_callback_and_configuration( + |req| { + Ok(SupergraphResponse::new_from_graphql_response( + graphql::Response::builder() + .data(json!({"response": "test"})) + .build(), + req.context, + )) + }, + conf.clone(), + ) + .await; + + let (server, client) = init_with_config(router_service, conf, MultiMap::new()) + .await .unwrap(); - let (server, client) = init_with_config(expectations, conf, MultiMap::new()).await?; let response = client .get(&format!( "{}/", @@ -1107,29 +1053,47 @@ async fn it_doesnt_display_disabled_sandbox() -> Result<(), ApolloRouterError> { .await .unwrap(); - assert_eq!(response.status(), StatusCode::BAD_REQUEST); - - server.shutdown().await + assert_eq!(response.status(), StatusCode::OK); + assert_eq!(response.text().await.unwrap(), home_page_content()); + server.shutdown().await.unwrap(); } #[test(tokio::test)] -async fn it_doesnt_display_disabled_homepage() -> Result<(), ApolloRouterError> { - let expectations = MockSupergraphService::new(); - let conf = Configuration::fake_builder() - .homepage( - crate::configuration::Homepage::fake_builder() - .enabled(false) - .build(), - ) - .build() +async fn it_doesnt_display_disabled_homepage() { + let conf = Arc::new( + Configuration::fake_builder() + .homepage( + crate::configuration::Homepage::fake_builder() + .enabled(false) + .build(), + ) + .build() + .unwrap(), + ); + + let router_service = router_service::from_supergraph_mock_callback_and_configuration( + |req| { + Ok(SupergraphResponse::new_from_graphql_response( + graphql::Response::builder() + .data(json!({"response": "test"})) + .build(), + req.context, + )) + }, + conf.clone(), + ) + .await; + + let (server, client) = init_with_config(router_service, conf, MultiMap::new()) + .await .unwrap(); - let (server, client) = init_with_config(expectations, conf, MultiMap::new()).await?; let response = client .get(&format!( "{}/", server.graphql_listen_address().as_ref().unwrap() )) .header(ACCEPT, "text/html") + .header(ACCEPT, "*/*") .send() .await .unwrap(); @@ -1141,33 +1105,41 @@ async fn it_doesnt_display_disabled_homepage() -> Result<(), ApolloRouterError> response.text().await ); - server.shutdown().await + server.shutdown().await.unwrap(); } #[test(tokio::test)] async fn it_answers_to_custom_endpoint() -> Result<(), ApolloRouterError> { - let expectations = MockSupergraphService::new(); - let endpoint = service_fn(|req: transport::Request| async move { + let endpoint = service_fn(|req: router::Request| async move { Ok::<_, BoxError>( http::Response::builder() .status(StatusCode::OK) - .body(format!("{} + {}", req.method(), req.uri().path()).into()) - .unwrap(), + .body( + format!( + "{} + {}", + req.router_request.method(), + req.router_request.uri().path() + ) + .into(), + ) + .unwrap() + .into(), ) }) .boxed_clone(); let mut web_endpoints = MultiMap::new(); web_endpoints.insert( ListenAddr::SocketAddr("127.0.0.1:0".parse().unwrap()), - Endpoint::new("/a-custom-path".to_string(), endpoint.clone().boxed()), + Endpoint::from_router_service("/a-custom-path".to_string(), endpoint.clone().boxed()), ); web_endpoints.insert( ListenAddr::SocketAddr("127.0.0.1:0".parse().unwrap()), - Endpoint::new("/an-other-custom-path".to_string(), endpoint.boxed()), + Endpoint::from_router_service("/an-other-custom-path".to_string(), endpoint.boxed()), ); let conf = Configuration::fake_builder().build().unwrap(); - let (server, client) = init_with_config(expectations, conf, web_endpoints).await?; + let (server, client) = + init_with_config(router_service::empty().await, Arc::new(conf), web_endpoints).await?; for path in &["/a-custom-path", "/an-other-custom-path"] { let response = client @@ -1244,12 +1216,20 @@ async fn it_refuses_to_start_if_sandbox_is_enabled_and_introspection_is_not() { #[test(tokio::test)] async fn it_refuses_to_bind_two_extra_endpoints_on_the_same_path() { - let endpoint = service_fn(|req: transport::Request| async move { + let endpoint = service_fn(|req: router::Request| async move { Ok::<_, BoxError>( http::Response::builder() .status(StatusCode::OK) - .body(format!("{} + {}", req.method(), req.uri().path()).into()) - .unwrap(), + .body( + format!( + "{} + {}", + req.router_request.method(), + req.router_request.uri().path() + ) + .into(), + ) + .unwrap() + .into(), ) }) .boxed_clone(); @@ -1257,15 +1237,15 @@ async fn it_refuses_to_bind_two_extra_endpoints_on_the_same_path() { let mut web_endpoints = MultiMap::new(); web_endpoints.insert( ListenAddr::SocketAddr("127.0.0.1:0".parse().unwrap()), - Endpoint::new("/a-custom-path".to_string(), endpoint.clone().boxed()), + Endpoint::from_router_service("/a-custom-path".to_string(), endpoint.clone().boxed()), ); web_endpoints.insert( ListenAddr::SocketAddr("127.0.0.1:0".parse().unwrap()), - Endpoint::new("/a-custom-path".to_string(), endpoint.boxed()), + Endpoint::from_router_service("/a-custom-path".to_string(), endpoint.boxed()), ); let conf = Configuration::fake_builder().build().unwrap(); - let error = init_with_config(MockSupergraphService::new(), conf, web_endpoints) + let error = init_with_config(router_service::empty().await, Arc::new(conf), web_endpoints) .await .unwrap_err(); @@ -1275,72 +1255,9 @@ async fn it_refuses_to_bind_two_extra_endpoints_on_the_same_path() { ) } -#[test(tokio::test)] -async fn it_checks_the_shape_of_router_request() -> Result<(), ApolloRouterError> { - let mut expectations = MockSupergraphService::new(); - expectations - .expect_service_call() - .times(2) - .returning(move |req| { - Ok(SupergraphResponse::new_from_graphql_response( - graphql::Response::builder() - .data(json!(format!( - "{} + {} + {:?}", - req.supergraph_request.method(), - req.supergraph_request.uri(), - serde_json::to_string(req.supergraph_request.body()).unwrap() - ))) - .build(), - Context::new(), - )) - }); - let (server, client) = init(expectations).await; - let query = json!( - { - "query": "query", - }); - let url = format!("{}/", server.graphql_listen_address().as_ref().unwrap()); - let response = client.get(&url).query(&query).send().await.unwrap(); - - assert_eq!(response.status(), StatusCode::OK); - assert_eq!( - response.text().await.unwrap(), - serde_json::to_string(&json!({ - "data": - format!( - "GET + {}?query=query + {:?}", - url, - serde_json::to_string(&query).unwrap() - ) - })) - .unwrap() - ); - let response = client - .post(&url) - .body(query.to_string()) - .send() - .await - .unwrap(); - - assert_eq!(response.status(), StatusCode::OK); - assert_eq!( - response.text().await.unwrap(), - serde_json::to_string(&json!({ - "data": - format!( - "POST + {} + {:?}", - url, - serde_json::to_string(&query).unwrap() - ) - })) - .unwrap() - ); - server.shutdown().await -} - #[tokio::test] async fn cors_origin_default() -> Result<(), ApolloRouterError> { - let (server, client) = init(MockSupergraphService::new()).await; + let (server, client) = init(router_service::empty().await).await; let url = format!("{}/", server.graphql_listen_address().as_ref().unwrap()); let response = @@ -1359,8 +1276,12 @@ async fn cors_allow_any_origin() -> Result<(), ApolloRouterError> { .cors(Cors::builder().allow_any_origin(true).build()) .build() .unwrap(); - let (server, client) = - init_with_config(MockSupergraphService::new(), conf, MultiMap::new()).await?; + let (server, client) = init_with_config( + router_service::empty().await, + Arc::new(conf), + MultiMap::new(), + ) + .await?; let url = format!("{}/", server.graphql_listen_address().as_ref().unwrap()); let response = request_cors_with_origin(&client, url.as_str(), "https://thisisatest.com").await; @@ -1381,8 +1302,12 @@ async fn cors_origin_list() -> Result<(), ApolloRouterError> { ) .build() .unwrap(); - let (server, client) = - init_with_config(MockSupergraphService::new(), conf, MultiMap::new()).await?; + let (server, client) = init_with_config( + router_service::empty().await, + Arc::new(conf), + MultiMap::new(), + ) + .await?; let url = format!("{}/", server.graphql_listen_address().as_ref().unwrap()); let response = request_cors_with_origin(&client, url.as_str(), valid_origin).await; @@ -1408,8 +1333,12 @@ async fn cors_origin_regex() -> Result<(), ApolloRouterError> { ) .build() .unwrap(); - let (server, client) = - init_with_config(MockSupergraphService::new(), conf, MultiMap::new()).await?; + let (server, client) = init_with_config( + router_service::empty().await, + Arc::new(conf), + MultiMap::new(), + ) + .await?; let url = format!("{}/", server.graphql_listen_address().as_ref().unwrap()); // regex tests @@ -1474,21 +1403,18 @@ fn origin_valid(headers: &HeaderMap, origin: &str) -> bool { #[test(tokio::test)] async fn response_shape() -> Result<(), ApolloRouterError> { - let mut expectations = MockSupergraphService::new(); - expectations - .expect_service_call() - .times(1) - .returning(move |_| { - Ok(SupergraphResponse::new_from_graphql_response( - graphql::Response::builder() - .data(json!({ - "test": "hello" - })) - .build(), - Context::new(), - )) - }); - let (server, client) = init(expectations).await; + let router_service = router_service::from_supergraph_mock_callback(move |req| { + Ok(SupergraphResponse::new_from_graphql_response( + graphql::Response::builder() + .data(json!({ + "test": "hello" + })) + .build(), + req.context, + )) + }) + .await; + let (server, client) = init(router_service).await; let query = json!( { "query": "query { test }", @@ -1505,7 +1431,7 @@ async fn response_shape() -> Result<(), ApolloRouterError> { assert_eq!(response.status(), StatusCode::OK); assert_eq!( response.headers().get(CONTENT_TYPE), - Some(&HeaderValue::from_static("application/json")) + Some(&HeaderValue::from_static(APPLICATION_JSON.essence_str())) ); assert_eq!( @@ -1523,36 +1449,33 @@ async fn response_shape() -> Result<(), ApolloRouterError> { #[test(tokio::test)] async fn deferred_response_shape() -> Result<(), ApolloRouterError> { - let mut expectations = MockSupergraphService::new(); - expectations - .expect_service_call() - .times(1) - .returning(move |_| { - let body = stream::iter(vec![ - graphql::Response::builder() + let router_service = router_service::from_supergraph_mock_callback(|req| { + let body = stream::iter(vec![ + graphql::Response::builder() + .data(json!({ + "test": "hello", + })) + .has_next(true) + .build(), + graphql::Response::builder() + .incremental(vec![graphql::IncrementalResponse::builder() .data(json!({ - "test": "hello", + "other": "world" })) - .has_next(true) - .build(), - graphql::Response::builder() - .incremental(vec![graphql::IncrementalResponse::builder() - .data(json!({ - "other": "world" - })) - .path(Path::default()) - .build()]) - .has_next(true) - .build(), - graphql::Response::builder().has_next(false).build(), - ]) - .boxed(); - Ok(SupergraphResponse::new_from_response( - http::Response::builder().status(200).body(body).unwrap(), - Context::new(), - )) - }); - let (server, client) = init(expectations).await; + .path(Path::default()) + .build()]) + .has_next(true) + .build(), + graphql::Response::builder().has_next(false).build(), + ]) + .boxed(); + Ok(SupergraphResponse::new_from_response( + http::Response::builder().status(200).body(body).unwrap(), + req.context, + )) + }) + .await; + let (server, client) = init(router_service).await; let query = json!( { "query": "query { test ... @defer { other } }", @@ -1598,24 +1521,22 @@ async fn deferred_response_shape() -> Result<(), ApolloRouterError> { #[test(tokio::test)] async fn multipart_response_shape_with_one_chunk() -> Result<(), ApolloRouterError> { - let mut expectations = MockSupergraphService::new(); - expectations - .expect_service_call() - .times(1) - .returning(move |_| { - let body = stream::iter(vec![graphql::Response::builder() - .data(json!({ - "test": "hello", - })) - .has_next(false) - .build()]) - .boxed(); - Ok(SupergraphResponse::new_from_response( - http::Response::builder().status(200).body(body).unwrap(), - Context::new(), - )) - }); - let (server, client) = init(expectations).await; + let router_service = router_service::from_supergraph_mock_callback(move |req| { + let body = stream::iter(vec![graphql::Response::builder() + .data(json!({ + "test": "hello", + })) + .has_next(false) + .build()]) + .boxed(); + + Ok(SupergraphResponse::new_from_response( + http::Response::builder().status(200).body(body).unwrap(), + req.context, + )) + }) + .await; + let (server, client) = init(router_service).await; let query = json!( { "query": "query { test }", @@ -1663,7 +1584,7 @@ async fn it_supports_server_restart() { let server_factory = AxumHttpServerFactory::new(); let (service, _) = tower_test::mock::spawn(); - let supergraph_service_factory = TestSupergraphServiceFactory { + let supergraph_service_factory = TestRouterFactory { inner: service.into_inner(), }; @@ -1766,8 +1687,10 @@ async fn http_compressed_service() -> impl Service< let service = http_client::response_decompression(service) .map_request(|mut req: http::Request| { - req.headers_mut() - .append(ACCEPT, HeaderValue::from_static("application/json")); + req.headers_mut().append( + ACCEPT, + HeaderValue::from_static(APPLICATION_JSON.essence_str()), + ); req }) .map_future(|future| async { @@ -1946,19 +1869,15 @@ async fn listening_to_unix_socket() { .build(); let example_response = expected_response.clone(); - let mut expectations = MockSupergraphService::new(); - expectations - .expect_service_call() - .times(2) - .returning(move |_| { - let example_response = example_response.clone(); - - Ok(SupergraphResponse::new_from_graphql_response( - example_response, - Context::new(), - )) - }); - let server = init_unix(expectations, &temp_dir).await; + let router_service = router_service::from_supergraph_mock_callback(move |req| { + let example_response = example_response.clone(); + Ok(SupergraphResponse::new_from_graphql_response( + example_response, + req.context, + )) + }) + .await; + let server = init_unix(router_service, &temp_dir).await; let output = send_to_unix_socket( server.graphql_listen_address().as_ref().unwrap(), @@ -2054,24 +1973,16 @@ Accept: application/json\r #[tokio::test] async fn test_health_check() { - let mut expectations = MockSupergraphService::new(); - expectations.expect_service_call().once().returning(|_| { - Ok(SupergraphResponse::new_from_response( - http_ext::from_response_to_stream( - http::Response::builder() - .status(200) - .body( - graphql::Response::builder() - .data(json!({ "__typename": "Query"})) - .build(), - ) - .unwrap(), - ), - Context::new(), - )) - }); + let router_service = router_service::from_supergraph_mock_callback(|_| { + Ok(crate::supergraph::Response::builder() + .data(json!({ "__typename": "Query"})) + .context(Context::new()) + .build() + .unwrap()) + }) + .await; - let (server, client) = init(expectations).await; + let (server, client) = init(router_service).await; let url = format!( "{}/health", server.graphql_listen_address().as_ref().unwrap() @@ -2097,12 +2008,14 @@ async fn test_health_check_custom_listener() { .build() .unwrap(); - let expectations = MockSupergraphService::new(); - // keep the server handle around otherwise it will immediately shutdown - let (_server, client) = init_with_config(expectations, conf, MultiMap::new()) - .await - .unwrap(); + let (_server, client) = init_with_config( + router_service::empty().await, + Arc::new(conf), + MultiMap::new(), + ) + .await + .unwrap(); let url = "http://localhost:4012/health"; let response = client.get(url).send().await.unwrap(); @@ -2125,10 +2038,13 @@ async fn test_sneaky_supergraph_and_health_check_configuration() { .supergraph(Supergraph::fake_builder().path("/health").build()) // here be dragons .build() .unwrap(); - let expectations = MockSupergraphService::new(); - let error = init_with_config(expectations, conf, MultiMap::new()) - .await - .unwrap_err(); + let error = init_with_config( + router_service::empty().await, + Arc::new(conf), + MultiMap::new(), + ) + .await + .unwrap_err(); assert_eq!( "tried to register two endpoints on `127.0.0.1:0/health`", @@ -2148,10 +2064,13 @@ async fn test_sneaky_supergraph_and_disabled_health_check_configuration() { .supergraph(Supergraph::fake_builder().path("/health").build()) .build() .unwrap(); - let expectations = MockSupergraphService::new(); - let _ = init_with_config(expectations, conf, MultiMap::new()) - .await - .unwrap(); + let _ = init_with_config( + router_service::empty().await, + Arc::new(conf), + MultiMap::new(), + ) + .await + .unwrap(); } #[tokio::test] @@ -2170,10 +2089,13 @@ async fn test_supergraph_and_health_check_same_port_different_listener() { ) .build() .unwrap(); - let expectations = MockSupergraphService::new(); - let error = init_with_config(expectations, conf, MultiMap::new()) - .await - .unwrap_err(); + let error = init_with_config( + router_service::empty().await, + Arc::new(conf), + MultiMap::new(), + ) + .await + .unwrap_err(); assert_eq!( "tried to bind 0.0.0.0 and 127.0.0.1 on port 4013", diff --git a/apollo-router/src/axum_factory/utils.rs b/apollo-router/src/axum_factory/utils.rs index 3c457d12dc..815617d9a4 100644 --- a/apollo-router/src/axum_factory/utils.rs +++ b/apollo-router/src/axum_factory/utils.rs @@ -3,27 +3,13 @@ use async_compression::tokio::write::BrotliDecoder; use async_compression::tokio::write::GzipDecoder; use async_compression::tokio::write::ZlibDecoder; -use axum::http::header::HeaderMap; use axum::http::StatusCode; use axum::middleware::Next; use axum::response::*; use futures::prelude::*; -use http::header::ACCEPT; use http::header::CONTENT_ENCODING; -use http::header::VARY; -use http::HeaderValue; -use http::Method; use http::Request; use hyper::Body; -use mediatype::names::APPLICATION; -use mediatype::names::HTML; -use mediatype::names::JSON; -use mediatype::names::MIXED; -use mediatype::names::MULTIPART; -use mediatype::names::TEXT; -use mediatype::MediaType; -use mediatype::MediaTypeList; -use mediatype::ReadParams; use opentelemetry::global; use opentelemetry::trace::TraceContextExt; use tokio::io::AsyncWriteExt; @@ -31,28 +17,7 @@ use tower_http::trace::MakeSpan; use tracing::Level; use tracing::Span; -use crate::services::MULTIPART_DEFER_CONTENT_TYPE; -use crate::services::MULTIPART_DEFER_SPEC_PARAMETER; -use crate::services::MULTIPART_DEFER_SPEC_VALUE; - pub(crate) const REQUEST_SPAN_NAME: &str = "request"; -pub(crate) const APPLICATION_JSON_HEADER_VALUE: &str = "application/json"; -pub(crate) const GRAPHQL_JSON_RESPONSE_HEADER_VALUE: &str = "application/graphql-response+json"; - -pub(super) fn prefers_html(headers: &HeaderMap) -> bool { - let text_html = MediaType::new(TEXT, HTML); - - headers.get_all(&http::header::ACCEPT).iter().any(|value| { - value - .to_str() - .map(|accept_str| { - let mut list = MediaTypeList::new(accept_str); - - list.any(|mime| mime.as_ref() == Ok(&text_html)) - }) - .unwrap_or(false) - }) -} pub(super) async fn decompress_request_body( req: Request, @@ -127,117 +92,8 @@ pub(super) async fn decompress_request_body( } } -pub(super) async fn check_accept_header( - req: Request, - next: Next, -) -> Result { - let ask_for_html = req.method() == Method::GET && prefers_html(req.headers()); - - if accepts_wildcard(req.headers()) - || ask_for_html - || accepts_multipart(req.headers()) - || accepts_json(req.headers()) - { - Ok(next.run(req).await) - } else { - ::tracing::error!( - monotonic_counter.apollo_router_http_requests_total = 1u64, - status = %406u16, - error = "accept header is wrong", - ); - - Err(( - StatusCode::NOT_ACCEPTABLE, - format!( - r#"'accept' header can't be different than \"*/*\", {:?}, {:?} or {:?}"#, - APPLICATION_JSON_HEADER_VALUE, - GRAPHQL_JSON_RESPONSE_HEADER_VALUE, - MULTIPART_DEFER_CONTENT_TYPE - ), - ) - .into_response()) - } -} - -/// Returns true if the headers contain header `accept: */*` -pub(crate) fn accepts_wildcard(headers: &HeaderMap) -> bool { - headers.get_all(ACCEPT).iter().any(|value| { - value - .to_str() - .map(|accept_str| accept_str == "*/*") - .unwrap_or(false) - }) -} - -/// Returns true if the headers contain `accept: application/json` or `accept: application/graphql-response+json`, -/// or if there is no `accept` header -pub(crate) fn accepts_json(headers: &HeaderMap) -> bool { - !headers.contains_key(ACCEPT) - || headers.get_all(ACCEPT).iter().any(|value| { - value - .to_str() - .map(|accept_str| { - let mut list = MediaTypeList::new(accept_str); - - list.any(|mime| { - mime.as_ref() - .map(|mime| { - (mime.ty == APPLICATION && mime.subty == JSON) - || (mime.ty == APPLICATION - && mime.subty.as_str() == "graphql-response" - && mime.suffix == Some(JSON)) - }) - .unwrap_or(false) - }) - }) - .unwrap_or(false) - }) -} - -/// Returns true if the headers contain accept header to enable defer -pub(crate) fn accepts_multipart(headers: &HeaderMap) -> bool { - headers.get_all(ACCEPT).iter().any(|value| { - value - .to_str() - .map(|accept_str| { - let mut list = MediaTypeList::new(accept_str); - - list.any(|mime| { - mime.as_ref() - .map(|mime| { - mime.ty == MULTIPART - && mime.subty == MIXED - && mime.get_param( - mediatype::Name::new(MULTIPART_DEFER_SPEC_PARAMETER) - .expect("valid name"), - ) == Some( - mediatype::Value::new(MULTIPART_DEFER_SPEC_VALUE) - .expect("valid value"), - ) - }) - .unwrap_or(false) - }) - }) - .unwrap_or(false) - }) -} - -// Process the headers to make sure that `VARY` is set correctly -pub(super) fn process_vary_header(headers: &mut HeaderMap) { - if headers.get(VARY).is_none() { - // We don't have a VARY header, add one with value "origin" - headers.insert(VARY, HeaderValue::from_static("origin")); - } -} - -#[derive(Clone)] -pub(super) struct PropagatingMakeSpan; - -impl PropagatingMakeSpan { - pub(super) fn new() -> Self { - Self {} - } -} +#[derive(Clone, Default)] +pub(crate) struct PropagatingMakeSpan; impl MakeSpan for PropagatingMakeSpan { fn make_span(&mut self, request: &http::Request) -> Span { @@ -262,9 +118,7 @@ impl MakeSpan for PropagatingMakeSpan { "http.route" = %request.uri(), "http.flavor" = ?request.version(), "otel.kind" = "SERVER", - "otel.status_code" = tracing::field::Empty, - "apollo_private.duration_ns" = tracing::field::Empty, - "trace_id" = tracing::field::Empty + ) } else { // No remote span, we can go ahead and create the span without context. @@ -275,83 +129,7 @@ impl MakeSpan for PropagatingMakeSpan { "http.route" = %request.uri(), "http.flavor" = ?request.version(), "otel.kind" = "SERVER", - "otel.status_code" = tracing::field::Empty, - "apollo_private.duration_ns" = tracing::field::Empty, - "trace_id" = tracing::field::Empty ) } } } - -#[cfg(test)] -mod tests { - use super::*; - - // Test Vary processing - - #[test] - fn it_adds_default_with_value_origin_if_no_vary_header() { - let mut default_headers = HeaderMap::new(); - process_vary_header(&mut default_headers); - let vary_opt = default_headers.get(VARY); - assert!(vary_opt.is_some()); - let vary = vary_opt.expect("has a value"); - assert_eq!(vary, "origin"); - } - - #[test] - fn it_leaves_vary_alone_if_set() { - let mut default_headers = HeaderMap::new(); - default_headers.insert(VARY, HeaderValue::from_static("*")); - process_vary_header(&mut default_headers); - let vary_opt = default_headers.get(VARY); - assert!(vary_opt.is_some()); - let vary = vary_opt.expect("has a value"); - assert_eq!(vary, "*"); - } - - #[test] - fn it_leaves_varys_alone_if_there_are_more_than_one() { - let mut default_headers = HeaderMap::new(); - default_headers.insert(VARY, HeaderValue::from_static("one")); - default_headers.append(VARY, HeaderValue::from_static("two")); - process_vary_header(&mut default_headers); - let vary = default_headers.get_all(VARY); - assert_eq!(vary.iter().count(), 2); - for value in vary { - assert!(value == "one" || value == "two"); - } - } - - #[test] - fn it_checks_accept_header() { - let mut default_headers = HeaderMap::new(); - default_headers.insert(ACCEPT, HeaderValue::from_static("application/json")); - default_headers.append(ACCEPT, HeaderValue::from_static("foo/bar")); - assert!(accepts_json(&default_headers)); - - let mut default_headers = HeaderMap::new(); - default_headers.insert(ACCEPT, HeaderValue::from_static("*/*")); - default_headers.append(ACCEPT, HeaderValue::from_static("foo/bar")); - assert!(accepts_wildcard(&default_headers)); - - let mut default_headers = HeaderMap::new(); - default_headers.insert( - ACCEPT, - HeaderValue::from_static(GRAPHQL_JSON_RESPONSE_HEADER_VALUE), - ); - default_headers.append(ACCEPT, HeaderValue::from_static("foo/bar")); - assert!(accepts_json(&default_headers)); - - let mut default_headers = HeaderMap::new(); - default_headers.insert( - ACCEPT, - HeaderValue::from_static(GRAPHQL_JSON_RESPONSE_HEADER_VALUE), - ); - default_headers.append( - ACCEPT, - HeaderValue::from_static(MULTIPART_DEFER_CONTENT_TYPE), - ); - assert!(accepts_multipart(&default_headers)); - } -} diff --git a/apollo-router/src/cache/mod.rs b/apollo-router/src/cache/mod.rs index 5ade794dc3..421e688819 100644 --- a/apollo-router/src/cache/mod.rs +++ b/apollo-router/src/cache/mod.rs @@ -1,4 +1,5 @@ use std::collections::HashMap; +use std::num::NonZeroUsize; use std::sync::Arc; use tokio::sync::broadcast; @@ -14,7 +15,10 @@ mod redis; pub(crate) mod storage; type WaitMap = Arc>>>; -pub(crate) const DEFAULT_CACHE_CAPACITY: usize = 512; +pub(crate) const DEFAULT_CACHE_CAPACITY: NonZeroUsize = match NonZeroUsize::new(512) { + Some(v) => v, + None => unreachable!(), +}; /// Cache implementation with query deduplication #[derive(Clone)] @@ -34,7 +38,7 @@ where } pub(crate) async fn with_capacity( - capacity: usize, + capacity: NonZeroUsize, redis_urls: Option>, caller: &str, ) -> Self { @@ -128,6 +132,10 @@ where let _ = locked_wait_map.remove(key); let _ = sender.send(value); } + + pub(crate) async fn in_memory_keys(&self) -> Vec { + self.storage.in_memory_keys().await + } } pub(crate) struct Entry { @@ -199,6 +207,8 @@ where #[cfg(test)] mod tests { + use std::num::NonZeroUsize; + use futures::stream::FuturesUnordered; use futures::stream::StreamExt; use mockall::mock; @@ -209,7 +219,8 @@ mod tests { #[tokio::test] async fn example_cache_usage() { let k = "key".to_string(); - let cache = DeduplicatingCache::with_capacity(1, None, "test").await; + let cache = + DeduplicatingCache::with_capacity(NonZeroUsize::new(1).unwrap(), None, "test").await; let entry = cache.get(&k).await; @@ -226,7 +237,7 @@ mod tests { #[test(tokio::test)] async fn it_should_enforce_cache_limits() { let cache: DeduplicatingCache = - DeduplicatingCache::with_capacity(13, None, "test").await; + DeduplicatingCache::with_capacity(NonZeroUsize::new(13).unwrap(), None, "test").await; for i in 0..14 { let entry = cache.get(&i).await; @@ -249,7 +260,7 @@ mod tests { mock.expect_retrieve().times(1).return_const(1usize); let cache: DeduplicatingCache = - DeduplicatingCache::with_capacity(10, None, "test").await; + DeduplicatingCache::with_capacity(NonZeroUsize::new(10).unwrap(), None, "test").await; // Let's trigger 100 concurrent gets of the same value and ensure only // one delegated retrieve is made diff --git a/apollo-router/src/cache/storage.rs b/apollo-router/src/cache/storage.rs index b719c1988b..d9994851ea 100644 --- a/apollo-router/src/cache/storage.rs +++ b/apollo-router/src/cache/storage.rs @@ -2,6 +2,7 @@ use std::fmt; use std::hash::Hash; +use std::num::NonZeroUsize; use std::sync::Arc; use lru::LruCache; @@ -56,7 +57,7 @@ where V: ValueType, { pub(crate) async fn new( - max_capacity: usize, + max_capacity: NonZeroUsize, _redis_urls: Option>, _caller: &str, ) -> Self { @@ -114,6 +115,15 @@ where } } + pub(crate) async fn in_memory_keys(&self) -> Vec { + self.inner + .lock() + .await + .iter() + .map(|(k, _)| k.clone()) + .collect() + } + #[cfg(test)] pub(crate) async fn len(&self) -> usize { self.inner.lock().await.len() diff --git a/apollo-router/src/configuration/mod.rs b/apollo-router/src/configuration/mod.rs index 43a7f5948a..058d5deac6 100644 --- a/apollo-router/src/configuration/mod.rs +++ b/apollo-router/src/configuration/mod.rs @@ -12,10 +12,9 @@ mod yaml; use std::fmt; use std::net::IpAddr; use std::net::SocketAddr; +use std::num::NonZeroUsize; use std::str::FromStr; -use askama::Template; -use bytes::Bytes; use cors::*; use derivative::Derivative; use displaydoc::Display; @@ -561,6 +560,11 @@ pub(crate) struct Apq { #[serde(deny_unknown_fields)] pub(crate) struct QueryPlanning { pub(crate) experimental_cache: Cache, + /// Warm up the cache on reloads by running the query plan over + /// a list of the most used queries + /// Defaults to 0 (do not warm up the cache) + #[serde(default)] + pub(crate) warmed_up_queries: usize, } #[derive(Debug, Clone, Default, Deserialize, Serialize, JsonSchema)] @@ -579,7 +583,7 @@ pub(crate) struct Cache { /// In memory cache configuration pub(crate) struct InMemoryCache { /// Number of entries in the Least Recently Used cache - pub(crate) limit: usize, + pub(crate) limit: NonZeroUsize, } impl Default for InMemoryCache { @@ -638,17 +642,6 @@ impl Default for Sandbox { } } -#[derive(Template)] -#[template(path = "sandbox_index.html")] -struct SandboxTemplate {} - -impl Sandbox { - pub(crate) fn display_page() -> Bytes { - let template = SandboxTemplate {}; - template.render().unwrap().into() - } -} - /// Configuration options pertaining to the home page. #[derive(Debug, Clone, Deserialize, Serialize, JsonSchema)] #[serde(deny_unknown_fields)] @@ -688,17 +681,6 @@ impl Default for Homepage { } } -#[derive(Template)] -#[template(path = "homepage_index.html")] -struct HomepageTemplate {} - -impl Homepage { - pub(crate) fn display_page() -> Bytes { - let template = HomepageTemplate {}; - template.render().unwrap().into() - } -} - /// Configuration options pertaining to the http server component. #[derive(Debug, Clone, Deserialize, Serialize, JsonSchema)] #[serde(deny_unknown_fields)] diff --git a/apollo-router/src/configuration/schema.rs b/apollo-router/src/configuration/schema.rs index 27a92cbfe5..9d610bbe0e 100644 --- a/apollo-router/src/configuration/schema.rs +++ b/apollo-router/src/configuration/schema.rs @@ -105,125 +105,113 @@ pub(crate) fn validate_yaml_configuration( } log_used_experimental_conf(&yaml); let expanded_yaml = expand_env_variables(&yaml, &expansion)?; - + let parsed_yaml = super::yaml::parse(raw_yaml)?; if let Err(errors) = schema.validate(&expanded_yaml) { // Validation failed, translate the errors into something nice for the user // We have to reparse the yaml to get the line number information for each error. - match super::yaml::parse(raw_yaml) { - Ok(yaml) => { - let yaml_split_by_lines = raw_yaml.split('\n').collect::>(); - - let errors = errors - .enumerate() - .filter_map(|(idx, mut e)| { - if let Some(element) = yaml.get_element(&e.instance_path) { - const NUMBER_OF_PREVIOUS_LINES_TO_DISPLAY: usize = 5; - match element { - yaml::Value::String(value, marker) => { - let lines = yaml_split_by_lines[0.max( - marker - .line() - .saturating_sub(NUMBER_OF_PREVIOUS_LINES_TO_DISPLAY), - ) - ..marker.line()] - .iter() - .join("\n"); - - // Replace the value in the error message with the one from the raw config. - // This guarantees that if the env variable contained a secret it won't be leaked. - e.instance = Cow::Owned(coerce(value)); - - Some(format!( - "{}. {}\n\n{}\n{}^----- {}", - idx + 1, - e.instance_path, - lines, - " ".repeat(0.max(marker.col())), - e - )) - } - seq_element @ yaml::Value::Sequence(_, m) => { - let (start_marker, end_marker) = (m, seq_element.end_marker()); - - let offset = 0.max( - start_marker - .line() - .saturating_sub(NUMBER_OF_PREVIOUS_LINES_TO_DISPLAY), - ); - let lines = yaml_split_by_lines[offset..end_marker.line()] - .iter() - .enumerate() - .map(|(idx, line)| { - let real_line = idx + offset; - match real_line.cmp(&start_marker.line()) { - Ordering::Equal => format!("โ”Œ {line}"), - Ordering::Greater => format!("| {line}"), - Ordering::Less => line.to_string(), - } - }) - .join("\n"); - - Some(format!( - "{}. {}\n\n{}\nโ””-----> {}", - idx + 1, - e.instance_path, - lines, - e - )) - } - map_value - @ yaml::Value::Mapping(current_label, _value, _marker) => { - let (start_marker, end_marker) = ( - current_label.as_ref()?.marker.as_ref()?, - map_value.end_marker(), - ); - let offset = 0.max( - start_marker - .line() - .saturating_sub(NUMBER_OF_PREVIOUS_LINES_TO_DISPLAY), - ); - let lines = yaml_split_by_lines[offset..end_marker.line()] - .iter() - .enumerate() - .map(|(idx, line)| { - let real_line = idx + offset; - match real_line.cmp(&start_marker.line()) { - Ordering::Equal => format!("โ”Œ {line}"), - Ordering::Greater => format!("| {line}"), - Ordering::Less => line.to_string(), - } - }) - .join("\n"); - - Some(format!( - "{}. {}\n\n{}\nโ””-----> {}", - idx + 1, - e.instance_path, - lines, - e - )) - } - } - } else { - None + let yaml_split_by_lines = raw_yaml.split('\n').collect::>(); + + let errors = errors + .enumerate() + .filter_map(|(idx, mut e)| { + if let Some(element) = parsed_yaml.get_element(&e.instance_path) { + const NUMBER_OF_PREVIOUS_LINES_TO_DISPLAY: usize = 5; + match element { + yaml::Value::String(value, marker) => { + let lines = yaml_split_by_lines[0.max( + marker + .line() + .saturating_sub(NUMBER_OF_PREVIOUS_LINES_TO_DISPLAY), + ) + ..marker.line()] + .iter() + .join("\n"); + + // Replace the value in the error message with the one from the raw config. + // This guarantees that if the env variable contained a secret it won't be leaked. + e.instance = Cow::Owned(coerce(value)); + + Some(format!( + "{}. {}\n\n{}\n{}^----- {}", + idx + 1, + e.instance_path, + lines, + " ".repeat(0.max(marker.col())), + e + )) } - }) - .join("\n\n"); - - if !errors.is_empty() { - return Err(ConfigurationError::InvalidConfiguration { - message: "configuration had errors", - error: format!("\n{}", errors), - }); + seq_element @ yaml::Value::Sequence(_, m) => { + let (start_marker, end_marker) = (m, seq_element.end_marker()); + + let offset = 0.max( + start_marker + .line() + .saturating_sub(NUMBER_OF_PREVIOUS_LINES_TO_DISPLAY), + ); + let lines = yaml_split_by_lines[offset..end_marker.line()] + .iter() + .enumerate() + .map(|(idx, line)| { + let real_line = idx + offset; + match real_line.cmp(&start_marker.line()) { + Ordering::Equal => format!("โ”Œ {line}"), + Ordering::Greater => format!("| {line}"), + Ordering::Less => line.to_string(), + } + }) + .join("\n"); + + Some(format!( + "{}. {}\n\n{}\nโ””-----> {}", + idx + 1, + e.instance_path, + lines, + e + )) + } + map_value @ yaml::Value::Mapping(current_label, _value, _marker) => { + let (start_marker, end_marker) = ( + current_label.as_ref()?.marker.as_ref()?, + map_value.end_marker(), + ); + let offset = 0.max( + start_marker + .line() + .saturating_sub(NUMBER_OF_PREVIOUS_LINES_TO_DISPLAY), + ); + let lines = yaml_split_by_lines[offset..end_marker.line()] + .iter() + .enumerate() + .map(|(idx, line)| { + let real_line = idx + offset; + match real_line.cmp(&start_marker.line()) { + Ordering::Equal => format!("โ”Œ {line}"), + Ordering::Greater => format!("| {line}"), + Ordering::Less => line.to_string(), + } + }) + .join("\n"); + + Some(format!( + "{}. {}\n\n{}\nโ””-----> {}", + idx + 1, + e.instance_path, + lines, + e + )) + } + } + } else { + None } - } - Err(e) => { - // the yaml failed to parse. Just let serde do it's thing. - tracing::warn!( - "failed to parse yaml using marked parser: {}. Falling back to serde validation", - e - ); - } + }) + .join("\n\n"); + + if !errors.is_empty() { + return Err(ConfigurationError::InvalidConfiguration { + message: "configuration had errors", + error: format!("\n{}", errors), + }); } } diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index 6669243858..e542cf3b20 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -549,6 +549,81 @@ expression: "&schema" "properties": { "experimental.expose_query_plan": { "type": "boolean" + }, + "experimental.external": { + "type": "object", + "required": [ + "url" + ], + "properties": { + "stages": { + "default": null, + "type": "object", + "properties": { + "router": { + "default": null, + "type": "object", + "properties": { + "request": { + "default": null, + "type": "object", + "properties": { + "body": { + "default": false, + "type": "boolean" + }, + "context": { + "default": false, + "type": "boolean" + }, + "headers": { + "default": false, + "type": "boolean" + }, + "sdl": { + "default": false, + "type": "boolean" + } + }, + "nullable": true + }, + "response": { + "default": null, + "type": "object", + "properties": { + "body": { + "default": false, + "type": "boolean" + }, + "context": { + "default": false, + "type": "boolean" + }, + "headers": { + "default": false, + "type": "boolean" + }, + "sdl": { + "default": false, + "type": "boolean" + } + }, + "nullable": true + } + }, + "nullable": true + } + }, + "nullable": true + }, + "timeout": { + "default": null, + "type": "string" + }, + "url": { + "type": "string" + } + } } }, "additionalProperties": false @@ -618,7 +693,8 @@ expression: "&schema" "in_memory": { "limit": 512 } - } + }, + "warmed_up_queries": 0 } }, "type": "object", @@ -654,7 +730,7 @@ expression: "&schema" "description": "Number of entries in the Least Recently Used cache", "type": "integer", "format": "uint", - "minimum": 0.0 + "minimum": 1.0 } }, "additionalProperties": false @@ -700,7 +776,8 @@ expression: "&schema" "in_memory": { "limit": 512 } - } + }, + "warmed_up_queries": 0 }, "type": "object", "required": [ @@ -724,13 +801,20 @@ expression: "&schema" "description": "Number of entries in the Least Recently Used cache", "type": "integer", "format": "uint", - "minimum": 0.0 + "minimum": 1.0 } }, "additionalProperties": false } }, "additionalProperties": false + }, + "warmed_up_queries": { + "description": "Warm up the cache on reloads by running the query plan over a list of the most used queries Defaults to 0 (do not warm up the cache)", + "default": 0, + "type": "integer", + "format": "uint", + "minimum": 0.0 } }, "additionalProperties": false @@ -745,51 +829,67 @@ expression: "&schema" "type": "object", "properties": { "batch_processor": { + "description": "Configuration for batch processing.", + "default": { + "scheduled_delay": { + "secs": 5, + "nanos": 0 + }, + "max_queue_size": 2048, + "max_export_batch_size": 512, + "max_export_timeout": { + "secs": 30, + "nanos": 0 + }, + "max_concurrent_exports": 1 + }, "type": "object", "properties": { "max_concurrent_exports": { - "description": "Maximum number of concurrent exports\n\nLimits the number of spawned tasks for exports and thus memory consumed by an exporter. A value of 1 will cause exports to be performed synchronously on the BatchSpanProcessor task.", - "default": null, + "description": "Maximum number of concurrent exports\n\nLimits the number of spawned tasks for exports and thus memory consumed by an exporter. A value of 1 will cause exports to be performed synchronously on the BatchSpanProcessor task. The default is 1.", + "default": 1, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "max_export_batch_size": { "description": "The maximum number of spans to process in a single batch. If there are more than one batch worth of spans then it processes multiple batches of spans one batch after the other without any delay. The default value is 512.", - "default": null, + "default": 512, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "max_export_timeout": { - "description": "The maximum duration to export a batch of data.", - "default": null, + "description": "The maximum duration to export a batch of data. The default value is 30 seconds.", + "default": { + "secs": 30, + "nanos": 0 + }, "type": "string" }, "max_queue_size": { "description": "The maximum queue size to buffer spans for delayed processing. If the queue gets full it drops the spans. The default value of is 2048.", - "default": null, + "default": 2048, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "scheduled_delay": { "description": "The delay interval in milliseconds between two consecutive processing of batches. The default value is 5 seconds.", - "default": null, + "default": { + "secs": 5, + "nanos": 0 + }, "type": "string" } - }, - "nullable": true + } }, "buffer_size": { "description": "The buffer size for sending traces to Apollo. Increase this if you are experiencing lost traces.", "default": 10000, "type": "integer", "format": "uint", - "minimum": 0.0 + "minimum": 1.0 }, "client_name_header": { "description": "The name of the header to extract from requests when populating 'client nane' for traces and metrics in Apollo Studio.", @@ -834,8 +934,7 @@ expression: "&schema" } ] } - ], - "nullable": true + ] }, "send_headers": { "description": "To configure which request header names and values are included in trace data that's sent to Apollo Studio.", @@ -1830,44 +1929,59 @@ expression: "&schema" ], "properties": { "batch_processor": { + "default": { + "scheduled_delay": { + "secs": 5, + "nanos": 0 + }, + "max_queue_size": 2048, + "max_export_batch_size": 512, + "max_export_timeout": { + "secs": 30, + "nanos": 0 + }, + "max_concurrent_exports": 1 + }, "type": "object", "properties": { "max_concurrent_exports": { - "description": "Maximum number of concurrent exports\n\nLimits the number of spawned tasks for exports and thus memory consumed by an exporter. A value of 1 will cause exports to be performed synchronously on the BatchSpanProcessor task.", - "default": null, + "description": "Maximum number of concurrent exports\n\nLimits the number of spawned tasks for exports and thus memory consumed by an exporter. A value of 1 will cause exports to be performed synchronously on the BatchSpanProcessor task. The default is 1.", + "default": 1, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "max_export_batch_size": { "description": "The maximum number of spans to process in a single batch. If there are more than one batch worth of spans then it processes multiple batches of spans one batch after the other without any delay. The default value is 512.", - "default": null, + "default": 512, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "max_export_timeout": { - "description": "The maximum duration to export a batch of data.", - "default": null, + "description": "The maximum duration to export a batch of data. The default value is 30 seconds.", + "default": { + "secs": 30, + "nanos": 0 + }, "type": "string" }, "max_queue_size": { "description": "The maximum queue size to buffer spans for delayed processing. If the queue gets full it drops the spans. The default value of is 2048.", - "default": null, + "default": 2048, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "scheduled_delay": { "description": "The delay interval in milliseconds between two consecutive processing of batches. The default value is 5 seconds.", - "default": null, + "default": { + "secs": 5, + "nanos": 0 + }, "type": "string" } - }, - "nullable": true + } }, "endpoint": { "type": "string" @@ -1973,44 +2087,59 @@ expression: "&schema" "type": "object", "properties": { "batch_processor": { + "default": { + "scheduled_delay": { + "secs": 5, + "nanos": 0 + }, + "max_queue_size": 2048, + "max_export_batch_size": 512, + "max_export_timeout": { + "secs": 30, + "nanos": 0 + }, + "max_concurrent_exports": 1 + }, "type": "object", "properties": { "max_concurrent_exports": { - "description": "Maximum number of concurrent exports\n\nLimits the number of spawned tasks for exports and thus memory consumed by an exporter. A value of 1 will cause exports to be performed synchronously on the BatchSpanProcessor task.", - "default": null, + "description": "Maximum number of concurrent exports\n\nLimits the number of spawned tasks for exports and thus memory consumed by an exporter. A value of 1 will cause exports to be performed synchronously on the BatchSpanProcessor task. The default is 1.", + "default": 1, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "max_export_batch_size": { "description": "The maximum number of spans to process in a single batch. If there are more than one batch worth of spans then it processes multiple batches of spans one batch after the other without any delay. The default value is 512.", - "default": null, + "default": 512, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "max_export_timeout": { - "description": "The maximum duration to export a batch of data.", - "default": null, + "description": "The maximum duration to export a batch of data. The default value is 30 seconds.", + "default": { + "secs": 30, + "nanos": 0 + }, "type": "string" }, "max_queue_size": { "description": "The maximum queue size to buffer spans for delayed processing. If the queue gets full it drops the spans. The default value of is 2048.", - "default": null, + "default": 2048, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "scheduled_delay": { "description": "The delay interval in milliseconds between two consecutive processing of batches. The default value is 5 seconds.", - "default": null, + "default": { + "secs": 5, + "nanos": 0 + }, "type": "string" } - }, - "nullable": true + } }, "endpoint": { "default": "default", @@ -2062,37 +2191,40 @@ expression: "&schema" "type": "object", "properties": { "max_concurrent_exports": { - "description": "Maximum number of concurrent exports\n\nLimits the number of spawned tasks for exports and thus memory consumed by an exporter. A value of 1 will cause exports to be performed synchronously on the BatchSpanProcessor task.", - "default": null, + "description": "Maximum number of concurrent exports\n\nLimits the number of spawned tasks for exports and thus memory consumed by an exporter. A value of 1 will cause exports to be performed synchronously on the BatchSpanProcessor task. The default is 1.", + "default": 1, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "max_export_batch_size": { "description": "The maximum number of spans to process in a single batch. If there are more than one batch worth of spans then it processes multiple batches of spans one batch after the other without any delay. The default value is 512.", - "default": null, + "default": 512, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "max_export_timeout": { - "description": "The maximum duration to export a batch of data.", - "default": null, + "description": "The maximum duration to export a batch of data. The default value is 30 seconds.", + "default": { + "secs": 30, + "nanos": 0 + }, "type": "string" }, "max_queue_size": { "description": "The maximum queue size to buffer spans for delayed processing. If the queue gets full it drops the spans. The default value of is 2048.", - "default": null, + "default": 2048, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "scheduled_delay": { "description": "The delay interval in milliseconds between two consecutive processing of batches. The default value is 5 seconds.", - "default": null, + "default": { + "secs": 5, + "nanos": 0 + }, "type": "string" } } @@ -2110,37 +2242,40 @@ expression: "&schema" "type": "object", "properties": { "max_concurrent_exports": { - "description": "Maximum number of concurrent exports\n\nLimits the number of spawned tasks for exports and thus memory consumed by an exporter. A value of 1 will cause exports to be performed synchronously on the BatchSpanProcessor task.", - "default": null, + "description": "Maximum number of concurrent exports\n\nLimits the number of spawned tasks for exports and thus memory consumed by an exporter. A value of 1 will cause exports to be performed synchronously on the BatchSpanProcessor task. The default is 1.", + "default": 1, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "max_export_batch_size": { "description": "The maximum number of spans to process in a single batch. If there are more than one batch worth of spans then it processes multiple batches of spans one batch after the other without any delay. The default value is 512.", - "default": null, + "default": 512, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "max_export_timeout": { - "description": "The maximum duration to export a batch of data.", - "default": null, + "description": "The maximum duration to export a batch of data. The default value is 30 seconds.", + "default": { + "secs": 30, + "nanos": 0 + }, "type": "string" }, "max_queue_size": { "description": "The maximum queue size to buffer spans for delayed processing. If the queue gets full it drops the spans. The default value of is 2048.", - "default": null, + "default": 2048, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "scheduled_delay": { "description": "The delay interval in milliseconds between two consecutive processing of batches. The default value is 5 seconds.", - "default": null, + "default": { + "secs": 5, + "nanos": 0 + }, "type": "string" } } @@ -2171,45 +2306,59 @@ expression: "&schema" ], "properties": { "batch_processor": { - "default": null, + "default": { + "scheduled_delay": { + "secs": 5, + "nanos": 0 + }, + "max_queue_size": 2048, + "max_export_batch_size": 512, + "max_export_timeout": { + "secs": 30, + "nanos": 0 + }, + "max_concurrent_exports": 1 + }, "type": "object", "properties": { "max_concurrent_exports": { - "description": "Maximum number of concurrent exports\n\nLimits the number of spawned tasks for exports and thus memory consumed by an exporter. A value of 1 will cause exports to be performed synchronously on the BatchSpanProcessor task.", - "default": null, + "description": "Maximum number of concurrent exports\n\nLimits the number of spawned tasks for exports and thus memory consumed by an exporter. A value of 1 will cause exports to be performed synchronously on the BatchSpanProcessor task. The default is 1.", + "default": 1, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "max_export_batch_size": { "description": "The maximum number of spans to process in a single batch. If there are more than one batch worth of spans then it processes multiple batches of spans one batch after the other without any delay. The default value is 512.", - "default": null, + "default": 512, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "max_export_timeout": { - "description": "The maximum duration to export a batch of data.", - "default": null, + "description": "The maximum duration to export a batch of data. The default value is 30 seconds.", + "default": { + "secs": 30, + "nanos": 0 + }, "type": "string" }, "max_queue_size": { "description": "The maximum queue size to buffer spans for delayed processing. If the queue gets full it drops the spans. The default value of is 2048.", - "default": null, + "default": 2048, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "scheduled_delay": { "description": "The delay interval in milliseconds between two consecutive processing of batches. The default value is 5 seconds.", - "default": null, + "default": { + "secs": 5, + "nanos": 0 + }, "type": "string" } - }, - "nullable": true + } } }, "nullable": true @@ -2221,44 +2370,59 @@ expression: "&schema" ], "properties": { "batch_processor": { + "default": { + "scheduled_delay": { + "secs": 5, + "nanos": 0 + }, + "max_queue_size": 2048, + "max_export_batch_size": 512, + "max_export_timeout": { + "secs": 30, + "nanos": 0 + }, + "max_concurrent_exports": 1 + }, "type": "object", "properties": { "max_concurrent_exports": { - "description": "Maximum number of concurrent exports\n\nLimits the number of spawned tasks for exports and thus memory consumed by an exporter. A value of 1 will cause exports to be performed synchronously on the BatchSpanProcessor task.", - "default": null, + "description": "Maximum number of concurrent exports\n\nLimits the number of spawned tasks for exports and thus memory consumed by an exporter. A value of 1 will cause exports to be performed synchronously on the BatchSpanProcessor task. The default is 1.", + "default": 1, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "max_export_batch_size": { "description": "The maximum number of spans to process in a single batch. If there are more than one batch worth of spans then it processes multiple batches of spans one batch after the other without any delay. The default value is 512.", - "default": null, + "default": 512, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "max_export_timeout": { - "description": "The maximum duration to export a batch of data.", - "default": null, + "description": "The maximum duration to export a batch of data. The default value is 30 seconds.", + "default": { + "secs": 30, + "nanos": 0 + }, "type": "string" }, "max_queue_size": { "description": "The maximum queue size to buffer spans for delayed processing. If the queue gets full it drops the spans. The default value of is 2048.", - "default": null, + "default": 2048, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "scheduled_delay": { "description": "The delay interval in milliseconds between two consecutive processing of batches. The default value is 5 seconds.", - "default": null, + "default": { + "secs": 5, + "nanos": 0 + }, "type": "string" } - }, - "nullable": true + } }, "endpoint": { "type": "string" @@ -2425,8 +2589,7 @@ expression: "&schema" ] } ] - }, - "nullable": true + } }, "max_attributes_per_event": { "type": "integer", @@ -2459,8 +2622,8 @@ expression: "&schema" "nullable": true }, "parent_based_sampler": { - "type": "boolean", - "nullable": true + "default": true, + "type": "boolean" }, "sampler": { "anyOf": [ @@ -2487,8 +2650,7 @@ expression: "&schema" } ] } - ], - "nullable": true + ] }, "service_name": { "type": "string", @@ -2506,44 +2668,59 @@ expression: "&schema" "type": "object", "properties": { "batch_processor": { + "default": { + "scheduled_delay": { + "secs": 5, + "nanos": 0 + }, + "max_queue_size": 2048, + "max_export_batch_size": 512, + "max_export_timeout": { + "secs": 30, + "nanos": 0 + }, + "max_concurrent_exports": 1 + }, "type": "object", "properties": { "max_concurrent_exports": { - "description": "Maximum number of concurrent exports\n\nLimits the number of spawned tasks for exports and thus memory consumed by an exporter. A value of 1 will cause exports to be performed synchronously on the BatchSpanProcessor task.", - "default": null, + "description": "Maximum number of concurrent exports\n\nLimits the number of spawned tasks for exports and thus memory consumed by an exporter. A value of 1 will cause exports to be performed synchronously on the BatchSpanProcessor task. The default is 1.", + "default": 1, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "max_export_batch_size": { "description": "The maximum number of spans to process in a single batch. If there are more than one batch worth of spans then it processes multiple batches of spans one batch after the other without any delay. The default value is 512.", - "default": null, + "default": 512, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "max_export_timeout": { - "description": "The maximum duration to export a batch of data.", - "default": null, + "description": "The maximum duration to export a batch of data. The default value is 30 seconds.", + "default": { + "secs": 30, + "nanos": 0 + }, "type": "string" }, "max_queue_size": { "description": "The maximum queue size to buffer spans for delayed processing. If the queue gets full it drops the spans. The default value of is 2048.", - "default": null, + "default": 2048, "type": "integer", "format": "uint", - "minimum": 0.0, - "nullable": true + "minimum": 0.0 }, "scheduled_delay": { "description": "The delay interval in milliseconds between two consecutive processing of batches. The default value is 5 seconds.", - "default": null, + "default": { + "secs": 5, + "nanos": 0 + }, "type": "string" } - }, - "nullable": true + } }, "endpoint": { "default": "default", diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__yaml__test__duplicate_keys.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__yaml__test__duplicate_keys.snap new file mode 100644 index 0000000000..6a891be5df --- /dev/null +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__yaml__test__duplicate_keys.snap @@ -0,0 +1,8 @@ +--- +source: apollo-router/src/configuration/yaml.rs +expression: "format!(\"{:#?}\", parse(yaml).unwrap_err())" +--- +InvalidConfiguration { + message: "duplicated keys detected in your yaml configuration", + error: "'test.a', 'c.dup', 'test'", +} diff --git a/apollo-router/src/configuration/yaml.rs b/apollo-router/src/configuration/yaml.rs index b891a2b17f..cc1a37f560 100644 --- a/apollo-router/src/configuration/yaml.rs +++ b/apollo-router/src/configuration/yaml.rs @@ -1,4 +1,5 @@ use std::collections::HashMap; +use std::collections::HashSet; use derivative::Derivative; use indexmap::IndexMap; @@ -64,6 +65,7 @@ pub(crate) struct MarkedYaml { current_label: Option