Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -343,8 +343,10 @@ object QueryExecution {
PlanDynamicPruningFilters(sparkSession),
PlanSubqueries(sparkSession),
RemoveRedundantProjects,
RemoveRedundantSorts,
EnsureRequirements,
// `RemoveRedundantSorts` needs to be added before `EnsureRequirements` to guarantee the same
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

before -> after ?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah, I missed it. @allisonwang-db could you fix it?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for catching it! Will create a fix.

// number of partitions when instantiating PartitioningCollection.
RemoveRedundantSorts,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you leave some comments here about why we need to put this rule after EnsureRequirements?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

+1 for @maropu 's comment.

DisableUnnecessaryBucketedScan,
ApplyColumnarRulesAndInsertTransitions(sparkSession.sessionState.columnarRules),
CollapseCodegenStages(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,12 @@ abstract class SparkPlan extends QueryPlan[SparkPlan] with Logging with Serializ
def longMetric(name: String): SQLMetric = metrics(name)

// TODO: Move to `DistributedPlan`
/** Specifies how data is partitioned across different nodes in the cluster. */
/**
* Specifies how data is partitioned across different nodes in the cluster.
* Note this method may fail if it is invoked before `EnsureRequirements` is applied
* since `PartitioningCollection` requires all its partitionings to have
* the same number of partitions.
*/
def outputPartitioning: Partitioning = UnknownPartitioning(0) // TODO: WRONG WIDTH!

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,8 @@ case class AdaptiveSparkPlanExec(
// Exchange nodes) after running these rules.
private def queryStagePreparationRules: Seq[Rule[SparkPlan]] = Seq(
RemoveRedundantProjects,
RemoveRedundantSorts,
EnsureRequirements,
RemoveRedundantSorts,
DisableUnnecessaryBucketedScan
) ++ context.session.sessionState.queryStagePrepRules

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@
package org.apache.spark.sql.execution

import org.apache.spark.sql.{DataFrame, QueryTest}
import org.apache.spark.sql.catalyst.plans.physical.{RangePartitioning, UnknownPartitioning}
import org.apache.spark.sql.execution.adaptive.{AdaptiveSparkPlanHelper, DisableAdaptiveExecutionSuite, EnableAdaptiveExecutionSuite}
import org.apache.spark.sql.execution.joins.ShuffledJoin
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession

Expand Down Expand Up @@ -135,6 +137,32 @@ abstract class RemoveRedundantSortsSuiteBase
}
}
}

test("SPARK-33472: shuffled join with different left and right side partition numbers") {
withTempView("t1", "t2") {
spark.range(0, 100, 1, 2).select('id as "key").createOrReplaceTempView("t1")
(0 to 100).toDF("key").createOrReplaceTempView("t2")

val queryTemplate = """
|SELECT /*+ %s(t1) */ t1.key
|FROM t1 JOIN t2 ON t1.key = t2.key
|WHERE t1.key > 10 AND t2.key < 50
|ORDER BY t1.key ASC
""".stripMargin

Seq(("MERGE", 3), ("SHUFFLE_HASH", 1)).foreach { case (hint, count) =>
val query = queryTemplate.format(hint)
val df = sql(query)
val sparkPlan = df.queryExecution.sparkPlan
val join = sparkPlan.collect { case j: ShuffledJoin => j }.head
val leftPartitioning = join.left.outputPartitioning
assert(leftPartitioning.isInstanceOf[RangePartitioning])
assert(leftPartitioning.numPartitions == 2)
assert(join.right.outputPartitioning == UnknownPartitioning(0))
checkSorts(query, count, count)
}
}
}
}

class RemoveRedundantSortsSuite extends RemoveRedundantSortsSuiteBase
Expand Down