Skip to content

Commit

Permalink
chore: improve explain insert into with select
Browse files Browse the repository at this point in the history
  • Loading branch information
xudong963 committed Nov 30, 2023
1 parent b9b1b1c commit 2cbdde0
Show file tree
Hide file tree
Showing 4 changed files with 56 additions and 10 deletions.
49 changes: 40 additions & 9 deletions src/query/service/src/interpreters/interpreter_explain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ use common_profile::QueryProfileManager;
use common_profile::SharedProcessorProfiles;
use common_sql::executor::ProfileHelper;
use common_sql::optimizer::ColumnSet;
use common_sql::BindContext;
use common_sql::InsertInputSource;
use common_sql::MetadataRef;
use common_storages_result_cache::gen_result_cache_key;
use common_storages_result_cache::ResultCacheReader;
Expand Down Expand Up @@ -71,17 +73,28 @@ impl Interpreter for ExplainInterpreter {
formatted_ast,
..
} => {
let ctx = self.ctx.clone();
// If `formatted_ast` is Some, it means we may use query result cache.
// If we use result cache for this query,
// we should not use `dry_run` mode to build the physical plan.
// It's because we need to get the same partitions as the original selecting plan.
let mut builder =
PhysicalPlanBuilder::new(metadata.clone(), ctx, formatted_ast.is_none());
let plan = builder.build(s_expr, bind_context.column_set()).await?;
self.explain_physical_plan(&plan, metadata, formatted_ast)
self.explain_query(s_expr, metadata, bind_context, formatted_ast)
.await?
}
Plan::Insert(insert_plan) => {
let mut res = self.explain_plan(&self.plan)?;
if let InsertInputSource::SelectPlan(plan) = &insert_plan.source {
if let Plan::Query {
s_expr,
metadata,
bind_context,
formatted_ast,
..
} = &**plan
{
let query = self
.explain_query(s_expr, metadata, bind_context, formatted_ast)
.await?;
res.extend(query);
}
}
res
}
_ => self.explain_plan(&self.plan)?,
},

Expand Down Expand Up @@ -340,4 +353,22 @@ impl ExplainInterpreter {
let formatted_plan = StringType::from_data(line_split_result);
Ok(vec![DataBlock::new_from_columns(vec![formatted_plan])])
}

async fn explain_query(
&self,
s_expr: &SExpr,
metadata: &MetadataRef,
bind_context: &BindContext,
formatted_ast: &Option<String>,
) -> Result<Vec<DataBlock>> {
let ctx = self.ctx.clone();
// If `formatted_ast` is Some, it means we may use query result cache.
// If we use result cache for this query,
// we should not use `dry_run` mode to build the physical plan.
// It's because we need to get the same partitions as the original selecting plan.
let mut builder = PhysicalPlanBuilder::new(metadata.clone(), ctx, formatted_ast.is_none());
let plan = builder.build(s_expr, bind_context.column_set()).await?;
self.explain_physical_plan(&plan, metadata, formatted_ast)
.await
}
}
1 change: 1 addition & 0 deletions src/query/sql/src/planner/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ pub use format::format_scalar;
pub use metadata::*;
pub use planner::PlanExtras;
pub use planner::Planner;
pub use plans::insert::InsertInputSource;
pub use plans::ScalarExpr;
pub use semantic::*;
pub use stream_column::*;
1 change: 0 additions & 1 deletion src/query/sql/src/planner/plans/insert.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,6 @@ impl std::fmt::Debug for Insert {
.field("catalog", &self.catalog)
.field("database", &self.database)
.field("table", &self.table)
.field("table_id", &self.table_id)
.field("schema", &self.schema)
.field("overwrite", &self.overwrite)
.finish()
Expand Down
15 changes: 15 additions & 0 deletions tests/sqllogictests/suites/mode/standalone/explain/explain.test
Original file line number Diff line number Diff line change
Expand Up @@ -1709,6 +1709,21 @@ Filter
├── push downs: [filters: [], limit: NONE]
└── estimated rows: 0.00

query T
explain insert into t2 select * from t1;
----
TableScan
├── table: default.default.t1
├── output columns: [a (#0), b (#1), c (#2)]
├── read rows: 0
├── read bytes: 0
├── partitions total: 0
├── partitions scanned: 0
├── push downs: [filters: [], limit: NONE]
└── estimated rows: 0.00
Insert { catalog: "default", database: "default", table: "t2", schema: TableSchema { fields: [TableField { name: "a", default_expr: None, data_type: Nullable(Number(Int32)), column_id: 0, computed_expr: None }, TableField { name: "b", default_expr: None, data_type: Nullable(Number(Int32)), column_id: 1, computed_expr: None }, TableField { name: "c", default_expr: None, data_type: Nullable(String), column_id: 2, computed_expr: None }], metadata: {}, next_column_id: 3 }, overwrite: false }


statement ok
drop table t1;

Expand Down

0 comments on commit 2cbdde0

Please sign in to comment.