-
Notifications
You must be signed in to change notification settings - Fork 29k
[SPARK-15647] [SQL] Fix Boundary Cases in OptimizeCodegen Rule #13392
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 7 commits
40b70f3
aaf5f2e
f351c10
ecc4318
db67f8c
414e116
4306c4f
b2849e8
9830e31
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -18,6 +18,7 @@ | |
| package org.apache.spark.sql.internal | ||
|
|
||
| import org.apache.spark.sql.{QueryTest, Row, SparkSession, SQLContext} | ||
| import org.apache.spark.sql.execution.WholeStageCodegenExec | ||
| import org.apache.spark.sql.test.{SharedSQLContext, TestSQLContext} | ||
|
|
||
| class SQLConfSuite extends QueryTest with SharedSQLContext { | ||
|
|
@@ -219,4 +220,41 @@ class SQLConfSuite extends QueryTest with SharedSQLContext { | |
| } | ||
| } | ||
|
|
||
| test("MAX_CASES_BRANCHES") { | ||
| import testImplicits._ | ||
|
|
||
| val original = spark.conf.get(SQLConf.MAX_CASES_BRANCHES) | ||
| try { | ||
| withTable("tab1") { | ||
| spark | ||
| .range(10) | ||
| .select('id as 'a, 'id as 'b, 'id as 'c, 'id as 'd) | ||
|
||
| .write | ||
| .saveAsTable("tab1") | ||
|
|
||
| val sql_one_branch_caseWhen = "SELECT CASE WHEN a = 1 THEN 1 END FROM tab1" | ||
| val sql_two_branch_caseWhen = "SELECT CASE WHEN a = 1 THEN 1 ELSE 0 END FROM tab1" | ||
|
|
||
| spark.conf.set(SQLConf.MAX_CASES_BRANCHES.key, "0") | ||
|
||
| assert(!sql(sql_one_branch_caseWhen) | ||
| .queryExecution.executedPlan.isInstanceOf[WholeStageCodegenExec]) | ||
| assert(!sql(sql_two_branch_caseWhen) | ||
| .queryExecution.executedPlan.isInstanceOf[WholeStageCodegenExec]) | ||
|
|
||
| spark.conf.set(SQLConf.MAX_CASES_BRANCHES.key, "1") | ||
| assert(sql(sql_one_branch_caseWhen) | ||
| .queryExecution.executedPlan.isInstanceOf[WholeStageCodegenExec]) | ||
| assert(!sql(sql_two_branch_caseWhen) | ||
| .queryExecution.executedPlan.isInstanceOf[WholeStageCodegenExec]) | ||
|
|
||
| spark.conf.set(SQLConf.MAX_CASES_BRANCHES.key, "2") | ||
| assert(sql(sql_one_branch_caseWhen) | ||
| .queryExecution.executedPlan.isInstanceOf[WholeStageCodegenExec]) | ||
| assert(sql(sql_two_branch_caseWhen) | ||
| .queryExecution.executedPlan.isInstanceOf[WholeStageCodegenExec]) | ||
| } | ||
| } finally { | ||
| spark.conf.set(SQLConf.MAX_CASES_BRANCHES.key, s"$original") | ||
| } | ||
| } | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Sorry for nitpicking, but could you use
canCodegeninstead (to follow the name of the method to call)? Thanks!There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Sure, let me fix it. Thanks!