Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions .changesets/fix_simon_router_1200.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
### Entity caching: fix inconsistency in cache-control header handling ([PR #7987](https://github.com/apollographql/router/pull/7987))

When the [Subgraph Entity Caching] feature is in use, it determines the `Cache-Control` HTTP response header sent to supergraph clients based on those received from subgraph servers.
In this process, Apollo Router only emits the `max-age` [directive] and not `s-maxage`.
This PR fixes a bug where, for a query that involved a single subgraph fetch that was not already cached, the subgraph response’s `Cache-Control` header would be forwarded as-is.
Instead, it now goes through the same algorithm as other cases.

[Subgraph Entity Caching]: https://www.apollographql.com/docs/graphos/routing/performance/caching/entity
[directive]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Cache-Control#response_directives
11 changes: 5 additions & 6 deletions apollo-router/src/plugins/cache/entity.rs
Original file line number Diff line number Diff line change
Expand Up @@ -937,10 +937,7 @@ async fn cache_lookup_root(
Some(value) => {
if value.0.control.can_use() {
let control = value.0.control.clone();
request
.context
.extensions()
.with_lock(|lock| lock.insert(control));
update_cache_control(&request.context, &control);
if expose_keys_in_context {
let request_id = request.id.clone();
let cache_control_header = value.0.control.to_cache_control_header()?;
Expand Down Expand Up @@ -1124,8 +1121,10 @@ fn update_cache_control(context: &Context, cache_control: &CacheControl) {
if let Some(c) = lock.get_mut::<CacheControl>() {
*c = c.merge(cache_control);
} else {
//FIXME: race condition. We need an Entry API for private entries
lock.insert(cache_control.clone());
// Go through the "merge" algorithm even with a single value
// in order to keep single-fetch queries consistent between cache hit and miss,
// and with multi-fetch queries.
lock.insert(cache_control.merge(cache_control));
}
})
}
Expand Down
5 changes: 4 additions & 1 deletion apollo-router/src/plugins/response_cache/plugin.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1681,7 +1681,10 @@ fn update_cache_control(context: &Context, cache_control: &CacheControl) {
if let Some(c) = lock.get_mut::<CacheControl>() {
*c = c.merge(cache_control);
} else {
lock.insert(cache_control.clone());
// Go through the "merge" algorithm even with a single value
// in order to keep single-fetch queries consistent between cache hit and miss,
// and with multi-fetch queries.
lock.insert(cache_control.merge(cache_control));
}
})
}
Expand Down
79 changes: 73 additions & 6 deletions apollo-router/tests/integration/entity_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ use std::sync::Arc;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;

use apollo_router::graphql;
use apollo_router::services;
use apollo_router::test_harness::HttpService;
use http::HeaderMap;
Expand Down Expand Up @@ -104,17 +105,19 @@ async fn harness(
(router, counters)
}

async fn make_graphql_request(
router: &mut HttpService,
) -> (HeaderMap<String>, apollo_router::graphql::Response) {
async fn make_graphql_request(router: &mut HttpService) -> (HeaderMap<String>, graphql::Response) {
let query = "{ topProducts { reviews { id } } }";
let request: services::router::Request = services::supergraph::Request::fake_builder()
let request = graphql_request(query);
make_http_request(router, request.into()).await
}

fn graphql_request(query: &str) -> services::router::Request {
services::supergraph::Request::fake_builder()
.query(query)
.build()
.unwrap()
.try_into()
.unwrap();
make_http_request(router, request.into()).await
.unwrap()
}

async fn make_json_request(
Expand Down Expand Up @@ -282,3 +285,67 @@ async fn invalidate_with_endpoint() {
reviews: 2
"###);
}

#[tokio::test]
async fn cache_control_merging_single_fetch() {
if !graph_os_enabled() {
return;
}

let mut subgraphs = base_subgraphs();
subgraphs["products"]["headers"]["cache-control"] = "public, s-maxage=120".into();
subgraphs["reviews"]["headers"]["cache-control"] = "public, s-maxage=60".into();
let (mut router, _subgraph_request_counters) = harness(base_config(), subgraphs).await;
let query = "{ topProducts { upc } }";

// Router responds with `max-age` even if a single subgraph used `s-maxage`
let (headers, _body) =
make_http_request::<graphql::Response>(&mut router, graphql_request(query).into()).await;
insta::assert_snapshot!(&headers["cache-control"], @"max-age=120,public");

tokio::time::sleep(std::time::Duration::from_secs(2)).await;

let query = "{ topProducts { upc } }";
let (headers, _body) =
make_http_request::<graphql::Response>(&mut router, graphql_request(query).into()).await;
let cache_control = &headers["cache-control"];
let max_age = parse_max_age(cache_control);
// Usually 120 - 2 = 118, but allow some slack in case CI CPUs are busy
assert!(max_age > 100 && max_age < 120, "got '{cache_control}'");
}

#[tokio::test]
async fn cache_control_merging_multi_fetch() {
if !graph_os_enabled() {
return;
}

let mut subgraphs = base_subgraphs();
subgraphs["products"]["headers"]["cache-control"] = "public, s-maxage=120".into();
subgraphs["reviews"]["headers"]["cache-control"] = "public, s-maxage=60".into();
let (mut router, _subgraph_request_counters) = harness(base_config(), subgraphs).await;
let query = "{ topProducts { reviews { id } } }";

// Router responds with `max-age` even if a subgraphs used `s-maxage`.
// The smaller value is used.
let (headers, _body) =
make_http_request::<graphql::Response>(&mut router, graphql_request(query).into()).await;
insta::assert_snapshot!(&headers["cache-control"], @"max-age=60,public");

tokio::time::sleep(std::time::Duration::from_secs(2)).await;

let (headers, _body) =
make_http_request::<graphql::Response>(&mut router, graphql_request(query).into()).await;
let cache_control = &headers["cache-control"];
let max_age = parse_max_age(cache_control);
// Usually 60 - 2 = 58, but allow some slack in case CI CPUs are busy
assert!(max_age > 40 && max_age < 60, "got '{cache_control}'");
}

fn parse_max_age(cache_control: &str) -> u32 {
cache_control
.strip_prefix("max-age=")
.and_then(|s| s.strip_suffix(",public"))
.and_then(|s| s.parse().ok())
.unwrap_or_else(|| panic!("expected 'max-age={{seconds}},public', got '{cache_control}'"))
}
79 changes: 73 additions & 6 deletions apollo-router/tests/integration/response_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::time::Duration;

use apollo_router::graphql;
use apollo_router::services;
use apollo_router::test_harness::HttpService;
use http::HeaderMap;
Expand Down Expand Up @@ -149,17 +150,19 @@ async fn harness(
(router, counters)
}

async fn make_graphql_request(
router: &mut HttpService,
) -> (HeaderMap<String>, apollo_router::graphql::Response) {
async fn make_graphql_request(router: &mut HttpService) -> (HeaderMap<String>, graphql::Response) {
let query = "{ topProducts { reviews { id } } }";
let request: services::router::Request = services::supergraph::Request::fake_builder()
let request = graphql_request(query);
make_http_request(router, request.into()).await
}

fn graphql_request(query: &str) -> services::router::Request {
services::supergraph::Request::fake_builder()
.query(query)
.build()
.unwrap()
.try_into()
.unwrap();
make_http_request(router, request.into()).await
.unwrap()
}

async fn make_json_request(
Expand Down Expand Up @@ -428,3 +431,67 @@ async fn invalidate_with_endpoint_by_entity_cache_tag() {
reviews: 2
"###);
}

#[tokio::test]
async fn cache_control_merging_single_fetch() {
if !graph_os_enabled() {
return;
}

let mut subgraphs = base_subgraphs();
subgraphs["products"]["headers"]["cache-control"] = "public, s-maxage=120".into();
subgraphs["reviews"]["headers"]["cache-control"] = "public, s-maxage=60".into();
let (mut router, _subgraph_request_counters) = harness(base_config(), subgraphs).await;
let query = "{ topProducts { upc } }";

// Router responds with `max-age` even if a single subgraph used `s-maxage`
let (headers, _body) =
make_http_request::<graphql::Response>(&mut router, graphql_request(query).into()).await;
insta::assert_snapshot!(&headers["cache-control"], @"max-age=120,public");

tokio::time::sleep(std::time::Duration::from_secs(2)).await;

let query = "{ topProducts { upc } }";
let (headers, _body) =
make_http_request::<graphql::Response>(&mut router, graphql_request(query).into()).await;
let cache_control = &headers["cache-control"];
let max_age = parse_max_age(cache_control);
// Usually 120 - 2 = 118, but allow some slack in case CI CPUs are busy
assert!(max_age > 100 && max_age < 120, "got '{cache_control}'");
}

#[tokio::test]
async fn cache_control_merging_multi_fetch() {
if !graph_os_enabled() {
return;
}

let mut subgraphs = base_subgraphs();
subgraphs["products"]["headers"]["cache-control"] = "public, s-maxage=120".into();
subgraphs["reviews"]["headers"]["cache-control"] = "public, s-maxage=60".into();
let (mut router, _subgraph_request_counters) = harness(base_config(), subgraphs).await;
let query = "{ topProducts { reviews { id } } }";

// Router responds with `max-age` even if a subgraphs used `s-maxage`.
// The smaller value is used.
let (headers, _body) =
make_http_request::<graphql::Response>(&mut router, graphql_request(query).into()).await;
insta::assert_snapshot!(&headers["cache-control"], @"max-age=60,public");

tokio::time::sleep(std::time::Duration::from_secs(2)).await;

let (headers, _body) =
make_http_request::<graphql::Response>(&mut router, graphql_request(query).into()).await;
let cache_control = &headers["cache-control"];
let max_age = parse_max_age(cache_control);
// Usually 60 - 2 = 58, but allow some slack in case CI CPUs are busy
assert!(max_age > 40 && max_age < 60, "got '{cache_control}'");
}

fn parse_max_age(cache_control: &str) -> u32 {
cache_control
.strip_prefix("max-age=")
.and_then(|s| s.strip_suffix(",public"))
.and_then(|s| s.parse().ok())
.unwrap_or_else(|| panic!("expected 'max-age={{seconds}},public', got '{cache_control}'"))
}