Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1115,6 +1115,8 @@ class Analyzer(
g.copy(join = true, child = addMissingAttr(g.child, missing))
case d: Distinct =>
throw new AnalysisException(s"Can't add $missingAttrs to $d")
case u: Union =>
u.withNewChildren(u.children.map(addMissingAttr(_, missingAttrs)))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is not the only issue in Union and I think binary operators have the same issue, e.g.,

scala> df3.join(df4).filter("grouping_id()=0").show()
org.apache.spark.sql.AnalysisException: cannot resolve '`spark_grouping_id`' given input columns: [a, sum(b), a, sum(b)];;
'Filter ('spark_grouping_id = 0)
+- Join Inner
   :- Aggregate [a#27, spark_grouping_id#25], [a#27, sum(cast(b#6 as bigint)) AS sum(b)#24L]
   :  +- Expand [List(a#5, b#6, a#26, 0), List(a#5, b#6, null, 1)], [a#5, b#6, a#27, spark_grouping_id#25]
   :     +- Project [a#5, b#6, a#5 AS a#26]
   :        +- Project [_1#0 AS a#5, _2#1 AS b#6]
   :           +- LocalRelation [_1#0, _2#1]
   +- Aggregate [a#38, spark_grouping_id#36], [a#38, sum(cast(b#16 as bigint)) AS sum(b)#35L]
      +- Expand [List(a#15, b#16, a#37, 0), List(a#15, b#16, null, 1)], [a#15, b#16, a#38, spark_grouping_id#36]
         +- Project [a#15, b#16, a#15 AS a#37]
            +- Project [_1#10 AS a#15, _2#11 AS b#16]
               +- LocalRelation [_1#10, _2#11]

So, we need more general solution for this case, I think.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, I agree with you. Current implementation only checks UnaryNode.
It is necessary to take all node types into consideration.
Thanks for suggestion, I will work on a general solution.

case u: UnaryNode =>
u.withNewChildren(addMissingAttr(u.child, missingAttrs) :: Nil)
case other =>
Expand All @@ -1133,6 +1135,8 @@ class Analyzer(
resolved
} else {
plan match {
case u: Union if !u.children.head.isInstanceOf[SubqueryAlias] =>
resolveExpressionRecursively(resolved, u.children.head)
case u: UnaryNode if !u.isInstanceOf[SubqueryAlias] =>
resolveExpressionRecursively(resolved, u.child)
case other => resolved
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2039,4 +2039,13 @@ class DataFrameSuite extends QueryTest with SharedSQLContext {
testData2.select(lit(7), 'a, 'b).orderBy(lit(1), lit(2), lit(3)),
Seq(Row(7, 1, 1), Row(7, 1, 2), Row(7, 2, 1), Row(7, 2, 2), Row(7, 3, 1), Row(7, 3, 2)))
}

test("SPARK-21966: ResolveMissingReference rule should not ignore Union") {
val df1 = Seq((1, 1), (2, 1), (2, 2)).toDF("a", "b")
val df2 = Seq((1, 1), (1, 2), (2, 3)).toDF("a", "b")
val df3 = df1.cube("a").sum("b")
val df4 = df2.cube("a").sum("b")
val df = df3.union(df4).filter("grouping_id() = 0")
checkAnswer(df, Seq(Row(1, 1), Row(2, 3), Row(1, 3), Row(2, 3)))
}
}