Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions docs/sql-migration-guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,8 @@ license: |
- In Spark 3.2, `FloatType` is mapped to `FLOAT` in MySQL. Prior to this, it used to be mapped to `REAL`, which is by default a synonym to `DOUBLE PRECISION` in MySQL.

- In Spark 3.2, the query executions triggered by `DataFrameWriter` are always named `command` when being sent to `QueryExecutionListener`. In Spark 3.1 and earlier, the name is one of `save`, `insertInto`, `saveAsTable`.

- In Spark 3.2, `Dataset.unionByName` with `allowMissingColumns` set to true will add missing nested fields to the end of structs. In Spark 3.1, nested struct fields are sorted alphabetically.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

union of top-level columns is also "left dominant", this makes sense.


## Upgrading from Spark SQL 3.0 to 3.1

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,136 +20,63 @@ package org.apache.spark.sql.catalyst.analysis
import scala.collection.mutable

import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.optimizer.{CombineUnions, OptimizeUpdateFields}
import org.apache.spark.sql.catalyst.optimizer.{CombineUnions}
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Project, Union}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.catalyst.trees.TreePattern.UNION
import org.apache.spark.sql.errors.QueryCompilationErrors
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.SchemaUtils
import org.apache.spark.unsafe.types.UTF8String

/**
* Resolves different children of Union to a common set of columns.
*/
object ResolveUnion extends Rule[LogicalPlan] {
/**
* This method sorts columns recursively in a struct expression based on column names.
* Adds missing fields recursively into given `col` expression, based on the expected struct
* fields from merging the two schemas. This is called by `compareAndAddFields` when we find two
* struct columns with same name but different nested fields. This method will recursively
* return a new struct with all of the expected fields, adding null values when `col` doesn't
* already contain them. Currently we don't support merging structs nested inside of arrays
* or maps.
*/
private def sortStructFields(expr: Expression): Expression = {
val existingExprs = expr.dataType.asInstanceOf[StructType].fieldNames.zipWithIndex.map {
case (name, i) =>
val fieldExpr = GetStructField(KnownNotNull(expr), i)
if (fieldExpr.dataType.isInstanceOf[StructType]) {
(name, sortStructFields(fieldExpr))
} else {
(name, fieldExpr)
}
}.sortBy(_._1).flatMap(pair => Seq(Literal(pair._1), pair._2))
private def addFields(col: Expression, targetType: StructType): Expression = {
assert(col.dataType.isInstanceOf[StructType], "Only support StructType.")

val newExpr = CreateNamedStruct(existingExprs)
if (expr.nullable) {
If(IsNull(expr), Literal(null, newExpr.dataType), newExpr)
} else {
newExpr
}
}
val resolver = conf.resolver
val colType = col.dataType.asInstanceOf[StructType]

/**
* Assumes input expressions are field expression of `CreateNamedStruct`. This method
* sorts the expressions based on field names.
*/
private def sortFieldExprs(fieldExprs: Seq[Expression]): Seq[Expression] = {
fieldExprs.grouped(2).map { e =>
Seq(e.head, e.last)
}.toSeq.sortBy { pair =>
assert(pair.head.isInstanceOf[Literal])
pair.head.eval().asInstanceOf[UTF8String].toString
}.flatten
}
val newStructFields = mutable.ArrayBuffer.empty[Expression]

/**
* This helper method sorts fields in a `UpdateFields` expression by field name.
*/
private def sortStructFieldsInWithFields(expr: Expression): Expression = expr transformUp {
case u: UpdateFields if u.resolved =>
u.evalExpr match {
case i @ If(IsNull(_), _, CreateNamedStruct(fieldExprs)) =>
val sorted = sortFieldExprs(fieldExprs)
val newStruct = CreateNamedStruct(sorted)
i.copy(trueValue = Literal(null, newStruct.dataType), falseValue = newStruct)
case CreateNamedStruct(fieldExprs) =>
val sorted = sortFieldExprs(fieldExprs)
val newStruct = CreateNamedStruct(sorted)
newStruct
case other =>
throw new IllegalStateException(s"`UpdateFields` has incorrect expression: $other. " +
"Please file a bug report with this error message, stack trace, and the query.")
}
}
targetType.fields.foreach { expectedField =>
val currentField = colType.fields.find(f => resolver(f.name, expectedField.name))

/**
* Adds missing fields recursively into given `col` expression, based on the target `StructType`.
* This is called by `compareAndAddFields` when we find two struct columns with same name but
* different nested fields. This method will find out the missing nested fields from `col` to
* `target` struct and add these missing nested fields. Currently we don't support finding out
* missing nested fields of struct nested in array or struct nested in map.
*/
private def addFields(col: NamedExpression, target: StructType): Expression = {
assert(col.dataType.isInstanceOf[StructType], "Only support StructType.")
val newExpression = (currentField, expectedField.dataType) match {
case (Some(cf), expectedType: StructType) if cf.dataType.isInstanceOf[StructType] =>
val extractedValue = ExtractValue(col, Literal(cf.name), resolver)
addFields(extractedValue, expectedType)
case (Some(cf), _) =>
ExtractValue(col, Literal(cf.name), resolver)
case (None, expectedType) =>
Literal(null, expectedType)
}
newStructFields ++= Literal(expectedField.name) :: newExpression :: Nil
}

val resolver = conf.resolver
val missingFieldsOpt =
StructType.findMissingFields(col.dataType.asInstanceOf[StructType], target, resolver)
colType.fields
.filter(f => targetType.fields.find(tf => resolver(f.name, tf.name)).isEmpty)
.foreach { f =>
newStructFields ++= Literal(f.name) :: ExtractValue(col, Literal(f.name), resolver) :: Nil
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this to add fields only in left side at the end of struct? Doesn't it match original field order?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

When the left is projected this should match the original, but when the right is projected this will contain things on the right that aren't in the left. Basically it's

rightChild = left ++ (right - left)
leftChild = rightChild ++ (left - rightChild) = rightChild

Copy link
Member

@viirya viirya Jun 23, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Where is the project? Do you mean

val rightChild = Project(rightProjectList ++ notFoundAttrs, right)

?

It is top level column projection. I mean the nested column field order.

newStructFields contains the (nested) struct fields both in left and right column in right order.

Then here it adds (nested) struct fields only in left back to newStructFields, before create new struct (CreateNamedStruct).

Do we reorder the fields later?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

rightProjectList contains the nested structs mapped in the order of left fields then remaining right fields recursively, so that's where all the reordering happens

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

And then leftChild is created from the fields in rightChild which already has all the fields as that point, which is the left fields and then the right fields

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

rightProjectList contains the nested structs mapped in the order of left fields then remaining right fields recursively, so that's where all the reordering happens

The projection projects original right attributes to rightProjectList. If you have different nested column order, it will be projected to new order.

Copy link
Member

@viirya viirya Jun 23, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The projection is not for reordering the nested column.

I look the code more in details.

targetType is actually left side type in first call. So here we align a right struct column to left struct column.

So it makes sense to add left nested columns first (newStructFields), then add nested columns only in right struct.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah I kept most of the naming which gets a little weird with how left/right/source/target are constructed and depends on where in the codepath you are

}

// We need to sort columns in result, because we might add another column in other side.
// E.g., we want to union two structs "a int, b long" and "a int, c string".
// If we don't sort, we will have "a int, b long, c string" and
// "a int, c string, b long", which are not compatible.
if (missingFieldsOpt.isEmpty) {
sortStructFields(col)
val newStruct = CreateNamedStruct(newStructFields.toSeq)
if (col.nullable) {
If(IsNull(col), Literal(null, newStruct.dataType), newStruct)
} else {
missingFieldsOpt.map { s =>
val struct = addFieldsInto(col, s.fields)
// Combines `WithFields`s to reduce expression tree.
val reducedStruct = struct.transformUp(OptimizeUpdateFields.optimizeUpdateFields)
val sorted = sortStructFieldsInWithFields(reducedStruct)
sorted
}.get
newStruct
}
}

/**
* Adds missing fields recursively into given `col` expression. The missing fields are given
* in `fields`. For example, given `col` as "z struct<z:int, y:int>, x int", and `fields` is
* "z struct<w:long>, w string". This method will add a nested `z.w` field and a top-level
* `w` field to `col` and fill null values for them. Note that because we might also add missing
* fields at other side of Union, we must make sure corresponding attributes at two sides have
* same field order in structs, so when we adding missing fields, we will sort the fields based on
* field names. So the data type of returned expression will be
* "w string, x int, z struct<w:long, y:int, z:int>".
*/
private def addFieldsInto(
col: Expression,
fields: Seq[StructField]): Expression = {
fields.foldLeft(col) { case (currCol, field) =>
field.dataType match {
case st: StructType =>
val resolver = conf.resolver
val colField = currCol.dataType.asInstanceOf[StructType]
.find(f => resolver(f.name, field.name))
if (colField.isEmpty) {
// The whole struct is missing. Add a null.
UpdateFields(currCol, field.name, Literal(null, st))
} else {
UpdateFields(currCol, field.name,
addFieldsInto(ExtractValue(currCol, Literal(field.name), resolver), st.fields))
}
case dt =>
UpdateFields(currCol, field.name, Literal(null, dt))
}
}
}

/**
* This method will compare right to left plan's outputs. If there is one struct attribute
Expand All @@ -175,11 +102,9 @@ object ResolveUnion extends Rule[LogicalPlan] {
(foundDt, lattr.dataType) match {
case (source: StructType, target: StructType)
if allowMissingCol && !source.sameType(target) =>
// Having an output with same name, but different struct type.
// We need to add missing fields. Note that if there are deeply nested structs such as
// nested struct of array in struct, we don't support to add missing deeply nested field
// like that. We will sort columns in the struct expression to make sure two sides of
// union have consistent schema.
// We have two structs with different types, so make sure the two structs have their
// fields in the same order by using `target`'s fields and then inluding any remaining
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: inluding -> including

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🤦

// in `foundAttr`.
aliased += foundAttr
Alias(addFields(foundAttr, target), foundAttr.name)()
case _ =>
Expand Down Expand Up @@ -208,13 +133,11 @@ object ResolveUnion extends Rule[LogicalPlan] {
left: LogicalPlan,
right: LogicalPlan,
allowMissingCol: Boolean): LogicalPlan = {
val rightOutputAttrs = right.output

// Builds a project list for `right` based on `left` output names
val (rightProjectList, aliased) = compareAndAddFields(left, right, allowMissingCol)

// Delegates failure checks to `CheckAnalysis`
val notFoundAttrs = rightOutputAttrs.diff(rightProjectList ++ aliased)
val notFoundAttrs = right.output.diff(rightProjectList ++ aliased)
val rightChild = Project(rightProjectList ++ notFoundAttrs, right)

// Builds a project for `logicalPlan` based on `right` output names, if allowing
Expand All @@ -230,6 +153,7 @@ object ResolveUnion extends Rule[LogicalPlan] {
} else {
left
}

Union(leftChild, rightChild)
}

Expand Down
6 changes: 2 additions & 4 deletions sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
Original file line number Diff line number Diff line change
Expand Up @@ -2081,10 +2081,8 @@ class Dataset[T] private[sql](
* }}}
*
* Note that `allowMissingColumns` supports nested column in struct types. Missing nested columns
* of struct columns with same name will also be filled with null values. This currently does not
* support nested columns in array and map types. Note that if there is any missing nested columns
* to be filled, in order to make consistent schema between two sides of union, the nested fields
* of structs will be sorted after merging schema.
* of struct columns with the same name will also be filled with null values and added to the end
* of struct. This currently does not support nested columns in array and map types.
*
* @group typedrel
* @since 3.1.0
Expand Down
Loading