Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 0 additions & 5 deletions apollo-federation/cli/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,6 @@ struct QueryPlannerArgs {
/// Set the `debug.paths_limit` option.
#[arg(long)]
paths_limit: Option<u32>,
/// If the supergraph only represents a single subgraph, pass through queries directly without
/// planning.
#[arg(long, default_value_t = false)]
single_subgraph_passthrough: bool,
}

/// CLI arguments. See <https://docs.rs/clap/latest/clap/_derive/index.html>
Expand Down Expand Up @@ -112,7 +108,6 @@ impl QueryPlannerArgs {
config.debug.max_evaluated_plans = max_evaluated_plans;
}
config.debug.paths_limit = self.paths_limit;
config.debug.bypass_planner_for_single_subgraph = self.single_subgraph_passthrough;
}
}

Expand Down
9 changes: 0 additions & 9 deletions apollo-federation/src/query_plan/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -266,12 +266,3 @@ pub enum QueryPathElement {
#[serde(serialize_with = "crate::utils::serde_bridge::serialize_exe_inline_fragment")]
InlineFragment(executable::InlineFragment),
}

impl QueryPlan {
fn new(node: impl Into<TopLevelPlanNode>, statistics: QueryPlanningStatistics) -> Self {
Self {
node: Some(node.into()),
statistics,
}
}
}
105 changes: 0 additions & 105 deletions apollo-federation/src/query_plan/query_planner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ use crate::query_plan::fetch_dependency_graph_processor::FetchDependencyGraphToQ
use crate::query_plan::query_planning_traversal::BestQueryPlanInfo;
use crate::query_plan::query_planning_traversal::QueryPlanningParameters;
use crate::query_plan::query_planning_traversal::QueryPlanningTraversal;
use crate::query_plan::FetchNode;
use crate::query_plan::PlanNode;
use crate::query_plan::QueryPlan;
use crate::query_plan::SequenceNode;
Expand Down Expand Up @@ -120,11 +119,6 @@ pub struct QueryPlanIncrementalDeliveryConfig {

#[derive(Debug, Clone, Hash, Serialize)]
pub struct QueryPlannerDebugConfig {
/// If used and the supergraph is built from a single subgraph, then user queries do not go
/// through the normal query planning and instead a fetch to the one subgraph is built directly
/// from the input query.
pub bypass_planner_for_single_subgraph: bool,

/// Query planning is an exploratory process. Depending on the specificities and feature used by
/// subgraphs, there could exist may different theoretical valid (if not always efficient) plans
/// for a given query, and at a high level, the query planner generates those possible choices,
Expand Down Expand Up @@ -162,7 +156,6 @@ pub struct QueryPlannerDebugConfig {
impl Default for QueryPlannerDebugConfig {
fn default() -> Self {
Self {
bypass_planner_for_single_subgraph: false,
max_evaluated_plans: NonZeroU32::new(10_000).unwrap(),
paths_limit: None,
}
Expand All @@ -175,15 +168,6 @@ pub struct QueryPlanningStatistics {
pub evaluated_plan_count: Cell<usize>,
}

impl QueryPlannerConfig {
/// Panics if options are used together in unsupported ways.
fn assert_valid(&self) {
if self.incremental_delivery.enable_defer {
assert!(!self.debug.bypass_planner_for_single_subgraph, "Cannot use the `debug.bypass_planner_for_single_subgraph` query planner option when @defer support is enabled");
}
}
}

#[derive(Debug, Default, Clone)]
pub struct QueryPlanOptions {
/// A set of labels which will be used _during query planning_ to
Expand Down Expand Up @@ -230,8 +214,6 @@ impl QueryPlanner {
supergraph: &Supergraph,
config: QueryPlannerConfig,
) -> Result<Self, FederationError> {
config.assert_valid();

let supergraph_schema = supergraph.schema.clone();
let api_schema = supergraph.to_api_schema(ApiSchemaOptions {
include_defer: config.incremental_delivery.enable_defer,
Expand Down Expand Up @@ -356,32 +338,6 @@ impl QueryPlanner {

let statistics = QueryPlanningStatistics::default();

if self.config.debug.bypass_planner_for_single_subgraph {
let mut subgraphs = self.federated_query_graph.subgraphs();
if let (Some((subgraph_name, _subgraph_schema)), None) =
(subgraphs.next(), subgraphs.next())
{
let node = FetchNode {
subgraph_name: subgraph_name.clone(),
operation_document: document.clone(),
operation_name: operation.name.clone(),
operation_kind: operation.operation_type,
id: None,
variable_usages: operation
.variables
.iter()
.map(|var| var.name.clone())
.collect(),
requires: Default::default(),
input_rewrites: Default::default(),
output_rewrites: Default::default(),
context_rewrites: Default::default(),
};

return Ok(QueryPlan::new(node, statistics));
}
}

let normalized_operation = normalize_operation(
operation,
NamedFragments::new(&document.fragments, &self.api_schema),
Expand Down Expand Up @@ -1199,67 +1155,6 @@ type User
"###);
}

#[test]
fn bypass_planner_for_single_subgraph() {
let a = Subgraph::parse_and_expand(
"A",
"https://A",
r#"
type Query {
a: A
}
type A {
b: B
}
type B {
x: Int
y: String
}
"#,
)
.unwrap();
let subgraphs = vec![&a];
let supergraph = Supergraph::compose(subgraphs).unwrap();
let api_schema = supergraph.to_api_schema(Default::default()).unwrap();

let document = ExecutableDocument::parse_and_validate(
api_schema.schema(),
r#"
{
a {
b {
x
y
}
}
}
"#,
"",
)
.unwrap();

let mut config = QueryPlannerConfig::default();
config.debug.bypass_planner_for_single_subgraph = true;
let planner = QueryPlanner::new(&supergraph, config).unwrap();
let plan = planner
.build_query_plan(&document, None, Default::default())
.unwrap();
insta::assert_snapshot!(plan, @r###"
QueryPlan {
Fetch(service: "A") {
{
a {
b {
x
y
}
}
}
},
}
"###);
}

#[test]
fn test_optimize_no_fragments_generated() {
let supergraph = Supergraph::new(TEST_SUPERGRAPH).unwrap();
Expand Down
10 changes: 5 additions & 5 deletions apollo-router/tests/integration/redis.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ async fn query_planner_cache() -> Result<(), BoxError> {
}
// If this test fails and the cache key format changed you'll need to update the key here.
// Look at the top of the file for instructions on getting the new cache key.
let known_cache_key = "plan:cache:1:federation:v2.9.3:8c0b4bfb4630635c2b5748c260d686ddb301d164e5818c63d6d9d77e13631676:opname:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:metadata:8f8ce6ad09f15c3d567a05f1c3d7230ab71b3366fcaebc9cc3bbfa356d55ac12";
let known_cache_key = "plan:cache:1:federation:v2.9.3:8c0b4bfb4630635c2b5748c260d686ddb301d164e5818c63d6d9d77e13631676:opname:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:metadata:81b9c296d8ebc8adf4575b83a8d296621fd76de6d12d8c91f4552eda02d1dd9c";

let config = RedisConfig::from_url("redis://127.0.0.1:6379").unwrap();
let client = RedisClient::new(config, None, None, None);
Expand Down Expand Up @@ -988,7 +988,7 @@ async fn query_planner_redis_update_query_fragments() {
test_redis_query_plan_config_update(
// This configuration turns the fragment generation option *off*.
include_str!("fixtures/query_planner_redis_config_update_query_fragments.router.yaml"),
"plan:cache:1:federation:v2.9.3:5938623f2155169070684a48be1e0b8468d0f2c662b5527a2247f683173f7d05:opname:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:metadata:b030b297e8cc0fb51de5b683162be9a4a5a0023844597253e580f99672bdf2b4",
"plan:cache:1:federation:v2.9.3:5938623f2155169070684a48be1e0b8468d0f2c662b5527a2247f683173f7d05:opname:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:metadata:a55ce3338ce6d5b78566be89cc6a7ad3fe8a7eeb38229d14ddf647edef84e545",
)
.await;
}
Expand Down Expand Up @@ -1018,7 +1018,7 @@ async fn query_planner_redis_update_defer() {
// test just passes locally.
test_redis_query_plan_config_update(
include_str!("fixtures/query_planner_redis_config_update_defer.router.yaml"),
"plan:cache:1:federation:v2.9.3:5938623f2155169070684a48be1e0b8468d0f2c662b5527a2247f683173f7d05:opname:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:metadata:ab8143af84859ddbed87fc3ac3b1f9c1e2271ffc8e58b58a666619ffc90bfc29",
"plan:cache:1:federation:v2.9.3:5938623f2155169070684a48be1e0b8468d0f2c662b5527a2247f683173f7d05:opname:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:metadata:0497501d3d01d05ad142938e7b8d8e7ea13e648aabbbedb47f6291ca8b3e536d",
)
.await;
}
Expand All @@ -1040,7 +1040,7 @@ async fn query_planner_redis_update_type_conditional_fetching() {
include_str!(
"fixtures/query_planner_redis_config_update_type_conditional_fetching.router.yaml"
),
"plan:cache:1:federation:v2.9.3:5938623f2155169070684a48be1e0b8468d0f2c662b5527a2247f683173f7d05:opname:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:metadata:285740e3d6ca7533144f54f8395204d7c19c44ed16e48f22a3ea41195d60180b",
"plan:cache:1:federation:v2.9.3:5938623f2155169070684a48be1e0b8468d0f2c662b5527a2247f683173f7d05:opname:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:metadata:662f5041882b3f621aeb7bad8e18818173eb077dc4343e16f3a34d2b6b6e4e59",
)
.await;
}
Expand Down Expand Up @@ -1088,7 +1088,7 @@ async fn test_redis_query_plan_config_update(updated_config: &str, new_cache_key
router.clear_redis_cache().await;

// If the tests above are failing, this is the key that needs to be changed first.
let starting_key = "plan:cache:1:federation:v2.9.3:5938623f2155169070684a48be1e0b8468d0f2c662b5527a2247f683173f7d05:opname:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:metadata:8f8ce6ad09f15c3d567a05f1c3d7230ab71b3366fcaebc9cc3bbfa356d55ac12";
let starting_key = "plan:cache:1:federation:v2.9.3:5938623f2155169070684a48be1e0b8468d0f2c662b5527a2247f683173f7d05:opname:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:metadata:81b9c296d8ebc8adf4575b83a8d296621fd76de6d12d8c91f4552eda02d1dd9c";
assert_ne!(starting_key, new_cache_key, "starting_key (cache key for the initial config) and new_cache_key (cache key with the updated config) should not be equal. This either means that the cache key is not being generated correctly, or that the test is not actually checking the updated key.");

router.execute_default_query().await;
Expand Down