Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
65 commits
Select commit Hold shift + click to select a range
36d07bb
spelling: accumulated
jsoref Nov 11, 2020
e703975
spelling: anonymous
jsoref Nov 11, 2020
45c00c8
spelling: arguments
jsoref Nov 11, 2020
0d65584
spelling: attribute
jsoref Nov 11, 2020
5a9413f
spelling: backslash
jsoref Nov 11, 2020
1c78144
spelling: beginning
jsoref Nov 11, 2020
7c29148
spelling: behavior
jsoref Nov 11, 2020
5799028
spelling: calendar
jsoref Nov 11, 2020
9737889
spelling: cannot
jsoref Nov 11, 2020
eb35f3c
spelling: canonicalize
jsoref Nov 11, 2020
42129f6
spelling: carry
jsoref Nov 11, 2020
5752ab9
spelling: classes
jsoref Nov 11, 2020
6fce734
spelling: column
jsoref Nov 11, 2020
15be5a2
spelling: coming
jsoref Nov 11, 2020
4cf463c
spelling: condition
jsoref Nov 11, 2020
138e548
spelling: continuous
jsoref Nov 11, 2020
59084f9
spelling: correlated
jsoref Nov 11, 2020
78d9bdb
spelling: corresponding
jsoref Nov 11, 2020
d11b6e3
spelling: doesn't
jsoref Nov 11, 2020
1b4c6cf
spelling: don't
jsoref Nov 11, 2020
a57b890
spelling: e.g.
jsoref Nov 11, 2020
3bfde6a
spelling: expr
jsoref Nov 11, 2020
8a4227e
spelling: expression
jsoref Nov 11, 2020
4f0832a
spelling: fails
jsoref Nov 11, 2020
2e13fdc
spelling: falling back
jsoref Nov 11, 2020
77e56c3
spelling: falls back
jsoref Nov 11, 2020
a1a0bab
spelling: frequency
jsoref Nov 11, 2020
75fd210
spelling: grouping
jsoref Nov 11, 2020
bf814c7
spelling: i.e.
jsoref Nov 11, 2020
a704d00
spelling: interpret
jsoref Nov 11, 2020
d02aafa
spelling: invocation
jsoref Nov 11, 2020
8621fd2
spelling: join
jsoref Nov 11, 2020
994517f
spelling: left
jsoref Nov 11, 2020
e2a2379
spelling: location
jsoref Nov 11, 2020
f5f4457
spelling: message
jsoref Nov 11, 2020
d8d2165
spelling: metastore
jsoref Nov 11, 2020
5fd1b5c
spelling: micros
jsoref Nov 11, 2020
7f456e9
spelling: milliseconds
jsoref Nov 11, 2020
749ad96
spelling: multi
jsoref Nov 11, 2020
685727f
spelling: multiple
jsoref Nov 11, 2020
d3c59e0
spelling: multiplier
jsoref Nov 11, 2020
c2b988a
spelling: nonexistent
jsoref Nov 11, 2020
790c5cd
spelling: nullability
jsoref Nov 11, 2020
851564b
spelling: occurred
jsoref Nov 11, 2020
4d1b4cf
spelling: outermost
jsoref Nov 11, 2020
8ba417d
spelling: parameters
jsoref Nov 11, 2020
926f8f6
spelling: params
jsoref Nov 11, 2020
ae8adfb
spelling: partitioning
jsoref Nov 11, 2020
6ae6938
spelling: partitions
jsoref Nov 11, 2020
2c6ed01
spelling: pattern
jsoref Nov 11, 2020
a09e19c
spelling: processing
jsoref Nov 11, 2020
1039e70
spelling: ranges
jsoref Nov 11, 2020
f921f41
spelling: receive
jsoref Nov 11, 2020
1d9423c
spelling: serializes
jsoref Nov 11, 2020
6b5f422
spelling: simplifying
jsoref Nov 11, 2020
c0cb9b3
spelling: subexpression
jsoref Nov 11, 2020
d75554c
spelling: thrift
jsoref Nov 11, 2020
53c6710
spelling: trimmed
jsoref Nov 11, 2020
8d8621f
spelling: unexpected
jsoref Nov 11, 2020
2eb4c49
spelling: unknown
jsoref Nov 11, 2020
21ff23c
spelling: unmodified
jsoref Nov 11, 2020
cc78112
spelling: unnecessary
jsoref Nov 11, 2020
e9f2242
spelling: unrecognized
jsoref Nov 11, 2020
2ee44e2
spelling: upper
jsoref Nov 11, 2020
1f9f1bb
spelling: when
jsoref Nov 11, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions sql/catalyst/src/main/scala/org/apache/spark/sql/Row.scala
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,7 @@ trait Row extends Serializable {
/**
* Returns the value at position i.
* For primitive types if value is null it returns 'zero value' specific for primitive
* ie. 0 for Int - use isNullAt to ensure that value is not null
* i.e. 0 for Int - use isNullAt to ensure that value is not null
*
* @throws ClassCastException when data type does not match.
*/
Expand All @@ -360,7 +360,7 @@ trait Row extends Serializable {
/**
* Returns the value of a given fieldName.
* For primitive types if value is null it returns 'zero value' specific for primitive
* ie. 0 for Int - use isNullAt to ensure that value is not null
* i.e. 0 for Int - use isNullAt to ensure that value is not null
*
* @throws UnsupportedOperationException when schema is not defined.
* @throws IllegalArgumentException when fieldName do not exist.
Expand All @@ -381,7 +381,7 @@ trait Row extends Serializable {
/**
* Returns a Map consisting of names and values for the requested fieldNames
* For primitive types if value is null it returns 'zero value' specific for primitive
* ie. 0 for Int - use isNullAt to ensure that value is not null
* i.e. 0 for Int - use isNullAt to ensure that value is not null
*
* @throws UnsupportedOperationException when schema is not defined.
* @throws IllegalArgumentException when fieldName do not exist.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ abstract class StructFilters(pushedFilters: Seq[sources.Filter], schema: StructT

/**
* Resets states of pushed down filters. The method must be called before
* precessing any new row otherwise `skipRow()` may return wrong result.
* processing any new row otherwise `skipRow()` may return wrong result.
*/
def reset(): Unit

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1473,7 +1473,7 @@ class Analyzer(override val catalogManager: CatalogManager)
val rightRes = rightAttributes
.map(x => resolveExpressionBottomUp(x, right).asInstanceOf[Attribute])
f.copy(leftAttributes = leftRes, rightAttributes = rightRes)
// intersect/except will be rewritten to join at the begininng of optimizer. Here we need to
// intersect/except will be rewritten to join at the beginning of optimizer. Here we need to
// deduplicate the right side plan, so that we won't produce an invalid self-join later.
case i @ Intersect(left, right, _) if !i.duplicateResolved =>
i.copy(right = dedupRight(left, right))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -579,7 +579,7 @@ trait CheckAnalysis extends PredicateHelper {

case showPartitions: ShowPartitions => checkShowPartitions(showPartitions)

case _ => // Fallbacks to the following checks
case _ => // Falls back to the following checks
}

operator match {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ object StreamingJoinHelper extends PredicateHelper with Logging {
* given the join condition and the event time watermark. This is how it works.
* - The condition is split into conjunctive predicates, and we find the predicates of the
* form `leftTime + c1 < rightTime + c2` (or <=, >, >=).
* - We canoncalize the predicate and solve it with the event time watermark value to find the
* - We canonicalize the predicate and solve it with the event time watermark value to find the
* value of the state watermark.
* This function is supposed to make best-effort attempt to get the state watermark. If there is
* any error, it will return None.
Expand Down Expand Up @@ -94,7 +94,7 @@ object StreamingJoinHelper extends PredicateHelper with Logging {

// The generated the state watermark cleanup expression is inclusive of the state watermark.
// If state watermark is W, all state where timestamp <= W will be cleaned up.
// Now when the canonicalized join condition solves to leftTime >= W, we dont want to clean
// Now when the canonicalized join condition solves to leftTime >= W, we don't want to clean
// up leftTime <= W. Rather we should clean up leftTime <= W - 1. Hence the -1 below.
val stateWatermark = predicate match {
case LessThan(l, r) => getStateWatermarkSafely(l, r)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import org.apache.spark.sql.catalyst.rules.Rule
* Updates nullability of Attributes in a resolved LogicalPlan by using the nullability of
* corresponding Attributes of its children output Attributes. This step is needed because
* users can use a resolved AttributeReference in the Dataset API and outer joins
* can change the nullability of an AttribtueReference. Without this rule, a nullable column's
* can change the nullability of an AttributeReference. Without this rule, a nullable column's
* nullable field can be actually set as non-nullable, which cause illegal optimization
* (e.g., NULL propagation) and wrong answers.
* See SPARK-13484 and SPARK-13801 for the concrete queries of this case.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1610,7 +1610,7 @@ class SessionCatalog(
}

/**
* Validate the new locatoin before renaming a managed table, which should be non-existent.
* Validate the new location before renaming a managed table, which should be non-existent.
*/
private def validateNewLocationOfRename(
oldName: TableIdentifier,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ class CSVInferSchema(val options: CSVOptions) extends Serializable {
// The conversion can fail when the `field` is not a form of number.
val bigDecimal = decimalParser(field)
// Because many other formats do not support decimal, it reduces the cases for
// decimals by disallowing values having scale (eg. `1.1`).
// decimals by disallowing values having scale (e.g. `1.1`).
if (bigDecimal.scale <= 0) {
// `DecimalType` conversion can fail when
// 1. The precision is bigger than 38.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ object ExpressionEncoder {
}

/**
* Function that serializesa an object of type `T` to an [[InternalRow]]. This class is not
* Function that serializes an object of type `T` to an [[InternalRow]]. This class is not
* thread-safe. Note that multiple calls to `apply(..)` return the same actual [[InternalRow]]
* object. Thus, the caller should copy the result before making another call if required.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ trait AliasHelper {

/**
* Replace all attributes, that reference an alias, with the aliased expression,
* but keep the name of the outmost attribute.
* but keep the name of the outermost attribute.
*/
protected def replaceAliasButKeepName(
expr: NamedExpression,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1145,7 +1145,7 @@ case class ScalaUDF(
val resultConverter = s"$convertersTerm[${children.length}]"
val boxedType = CodeGenerator.boxedType(dataType)

val funcInvokation = if (isPrimitive(dataType)
val funcInvocation = if (isPrimitive(dataType)
// If the output is nullable, the returned value must be unwrapped from the Option
&& !nullable) {
s"$resultTerm = ($boxedType)$getFuncResult"
Expand All @@ -1156,7 +1156,7 @@ case class ScalaUDF(
s"""
|$boxedType $resultTerm = null;
|try {
| $funcInvokation;
| $funcInvocation;
|} catch (Exception e) {
| throw new org.apache.spark.SparkException($errorMsgTerm, e);
|}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -191,13 +191,13 @@ case class Percentile(

val sortedCounts = buffer.toSeq.sortBy(_._1)(
child.dataType.asInstanceOf[NumericType].ordering.asInstanceOf[Ordering[AnyRef]])
val accumlatedCounts = sortedCounts.scanLeft((sortedCounts.head._1, 0L)) {
val accumulatedCounts = sortedCounts.scanLeft((sortedCounts.head._1, 0L)) {
case ((key1, count1), (key2, count2)) => (key2, count1 + count2)
}.tail
val maxPosition = accumlatedCounts.last._2 - 1
val maxPosition = accumulatedCounts.last._2 - 1

percentages.map { percentile =>
getPercentile(accumlatedCounts, maxPosition * percentile)
getPercentile(accumulatedCounts, maxPosition * percentile)
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ abstract class BinaryArithmetic extends BinaryOperator with NullIntolerant {
case DoubleType | FloatType =>
// When Double/Float overflows, there can be 2 cases:
// - precision loss: according to SQL standard, the number is truncated;
// - returns (+/-)Infinite: same behavior also other DBs have (eg. Postgres)
// - returns (+/-)Infinite: same behavior also other DBs have (e.g. Postgres)
nullSafeCodeGen(ctx, ev, (eval1, eval2) => {
s"""
|${ev.value} = $eval1 $symbol $eval2;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ class CodegenContext extends Logging {
mutable.ArrayBuffer.empty[(String, String)]

/**
* The mapping between mutable state types and corrseponding compacted arrays.
* The mapping between mutable state types and corresponding compacted arrays.
* The keys are java type string. The values are [[MutableStateArrays]] which encapsulates
* the compacted arrays for the mutable states with the same java type.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, ArrayData, Generic
import org.apache.spark.sql.types._

/**
* Java can not access Projection (in package object)
* Java cannot access Projection (in package object)
*/
abstract class BaseProjection extends Projection {}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -907,7 +907,7 @@ object HiveHashFunction extends InterpretedHashFunction {
* - year, month (stored as HiveIntervalYearMonth)
* - day, hour, minute, second, nanosecond (stored as HiveIntervalDayTime)
*
* eg. (INTERVAL '30' YEAR + INTERVAL '-23' DAY) fails in Hive
* e.g. (INTERVAL '30' YEAR + INTERVAL '-23' DAY) fails in Hive
*
* This method mimics HiveIntervalDayTime.hashCode() in Hive.
*
Expand All @@ -919,7 +919,7 @@ object HiveHashFunction extends InterpretedHashFunction {
*
* - Spark's [[CalendarInterval]] has precision upto microseconds but Hive's
* HiveIntervalDayTime can store data with precision upto nanoseconds. So, any input intervals
* with nanosecond values will lead to wrong output hashes (ie. non adherent with Hive output)
* with nanosecond values will lead to wrong output hashes (i.e. non adherent with Hive output)
*/
def hashCalendarInterval(calendarInterval: CalendarInterval): Long = {
val totalMicroSeconds = calendarInterval.days * MICROS_PER_DAY + calendarInterval.microseconds
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ trait HigherOrderFunction extends Expression with ExpectsInputTypes {
def argumentTypes: Seq[AbstractDataType]

/**
* All arguments have been resolved. This means that the types and nullabilty of (most of) the
* All arguments have been resolved. This means that the types and nullability of (most of) the
* lambda function arguments is known, and that we can start binding the lambda functions.
*/
lazy val argumentsResolved: Boolean = arguments.forall(_.resolved)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -808,10 +808,10 @@ case class SchemaOfJson(
}

/**
* A function that returns the number of elements in the outmost JSON array.
* A function that returns the number of elements in the outermost JSON array.
*/
@ExpressionDescription(
usage = "_FUNC_(jsonArray) - Returns the number of elements in the outmost JSON array.",
usage = "_FUNC_(jsonArray) - Returns the number of elements in the outermost JSON array.",
arguments = """
Arguments:
* jsonArray - A JSON array. `NULL` is returned in case of any other valid JSON string,
Expand Down Expand Up @@ -877,13 +877,13 @@ case class LengthOfJsonArray(child: Expression) extends UnaryExpression
}

/**
* A function which returns all the keys of the outmost JSON object.
* A function which returns all the keys of the outermost JSON object.
*/
@ExpressionDescription(
usage = "_FUNC_(json_object) - Returns all the keys of the outmost JSON object as an array.",
usage = "_FUNC_(json_object) - Returns all the keys of the outermost JSON object as an array.",
arguments = """
Arguments:
* json_object - A JSON object. If a valid JSON object is given, all the keys of the outmost
* json_object - A JSON object. If a valid JSON object is given, all the keys of the outermost
object will be returned as an array. If it is any other valid JSON string, an invalid JSON
string or an empty string, the function returns null.
""",
Expand Down Expand Up @@ -921,7 +921,7 @@ case class JsonObjectKeys(child: Expression) extends UnaryExpression with Codege
if (parser.nextToken() == null || parser.currentToken() != JsonToken.START_OBJECT) {
return null
}
// Parse the JSON string to get all the keys of outmost JSON object
// Parse the JSON string to get all the keys of outermost JSON object
getJsonKeys(parser, input)
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ abstract class StringRegexExpression extends BinaryExpression
Since Spark 2.0, string literals are unescaped in our SQL parser. For example, in order
to match "\abc", the pattern should be "\\abc".

When SQL config 'spark.sql.parser.escapedStringLiterals' is enabled, it fallbacks
When SQL config 'spark.sql.parser.escapedStringLiterals' is enabled, it falls back
to Spark 1.6 behavior regarding string literal parsing. For example, if the config is
enabled, the pattern to match "\abc" should be "\abc".
* escape - an character added since Spark 3.0. The default escape character is the '\'.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ sealed trait WindowFrame extends Expression with Unevaluable {
case object UnspecifiedFrame extends WindowFrame

/**
* A specified Window Frame. The val lower/uppper can be either a foldable [[Expression]] or a
* A specified Window Frame. The val lower/upper can be either a foldable [[Expression]] or a
* [[SpecialFrameBoundary]].
*/
case class SpecifiedWindowFrame(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ object NestedColumnAliasing {
}

/**
* This prunes unnessary nested columns from `Generate` and optional `Project` on top
* This prunes unnecessary nested columns from `Generate` and optional `Project` on top
* of it.
*/
object GeneratorNestedColumnAliasing {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -844,7 +844,7 @@ object CollapseWindow extends Rule[LogicalPlan] {
* of the child window expression, transpose them.
*/
object TransposeWindow extends Rule[LogicalPlan] {
private def compatibleParititions(ps1 : Seq[Expression], ps2: Seq[Expression]): Boolean = {
private def compatiblePartitions(ps1 : Seq[Expression], ps2: Seq[Expression]): Boolean = {
ps1.length < ps2.length && ps2.take(ps1.length).permutations.exists(ps1.zip(_).forall {
case (l, r) => l.semanticEquals(r)
})
Expand All @@ -855,7 +855,7 @@ object TransposeWindow extends Rule[LogicalPlan] {
if w1.references.intersect(w2.windowOutputSet).isEmpty &&
w1.expressions.forall(_.deterministic) &&
w2.expressions.forall(_.deterministic) &&
compatibleParititions(ps1, ps2) =>
compatiblePartitions(ps1, ps2) =>
Project(w1.output, Window(we2, ps2, os2, Window(we1, ps1, os1, grandChild)))
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ object PushDownLeftSemiAntiJoin extends Rule[LogicalPlan] with PredicateHelper {
* TODO:
* Currently this rule can push down the left semi or left anti joins to either
* left or right leg of the child join. This matches the behaviour of `PushPredicateThroughJoin`
* when the lefi semi or left anti join is in expression form. We need to explore the possibility
* when the left semi or left anti join is in expression form. We need to explore the possibility
* to push the left semi/anti joins to both legs of join if the join condition refers to
* both left and right legs of the child join.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ object ConstantFolding extends Rule[LogicalPlan] {
/**
* Substitutes [[Attribute Attributes]] which can be statically evaluated with their corresponding
* value in conjunctive [[Expression Expressions]]
* eg.
* e.g.
* {{{
* SELECT * FROM table WHERE i = 5 AND j = i + 3
* ==> SELECT * FROM table WHERE i = 5 AND j = 8
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ object RewritePredicateSubquery extends Rule[LogicalPlan] with PredicateHelper {
// the produced join then becomes unresolved and break structural integrity. We should
// de-duplicate conflicting attributes.
// SPARK-26078: it may also happen that the subquery has conflicting attributes with the outer
// values. In this case, the resulting join would contain trivially true conditions (eg.
// values. In this case, the resulting join would contain trivially true conditions (e.g.
// id#3 = id#3) which cannot be de-duplicated after. In this method, if there are conflicting
// attributes in the join condition, the subquery's conflicting attributes are changed using
// a projection which aliases them and resolves the problem.
Expand Down Expand Up @@ -174,7 +174,7 @@ object RewritePredicateSubquery extends Rule[LogicalPlan] with PredicateHelper {
val inConditions = values.zip(sub.output).map(EqualTo.tupled)
// To handle a null-aware predicate not-in-subquery in nested conditions
// (e.g., `v > 0 OR t1.id NOT IN (SELECT id FROM t2)`), we transform
// `inConditon` (t1.id=t2.id) into `(inCondition) OR ISNULL(inCondition)`.
// `inCondition` (t1.id=t2.id) into `(inCondition) OR ISNULL(inCondition)`.
//
// For example, `SELECT * FROM t1 WHERE v > 0 OR t1.id NOT IN (SELECT id FROM t2)`
// is transformed into a plan below;
Expand Down Expand Up @@ -565,7 +565,7 @@ object RewriteCorrelatedScalarSubquery extends Rule[LogicalPlan] with AliasHelpe
subqueryRoot = Project(projList ++ havingInputs, subqueryRoot)
case s @ SubqueryAlias(alias, _) =>
subqueryRoot = SubqueryAlias(alias, subqueryRoot)
case op => sys.error(s"Unexpected operator $op in corelated subquery")
case op => sys.error(s"Unexpected operator $op in correlated subquery")
}

// CASE WHEN alwaysTrue IS NULL THEN resultOnZeroTups
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ object ParserUtils {
}
}

/** Unescape baskslash-escaped string enclosed by quotes. */
/** Unescape backslash-escaped string enclosed by quotes. */
def unescapeSQLString(b: String): String = {
var enclosure: Character = null
val sb = new StringBuilder(b.length())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -397,7 +397,7 @@ abstract class QueryPlan[PlanType <: QueryPlan[PlanType]]

case ar: AttributeReference if allAttributes.indexOf(ar.exprId) == -1 =>
// Top level `AttributeReference` may also be used for output like `Alias`, we should
// normalize the epxrId too.
// normalize the exprId too.
id += 1
ar.withExprId(ExprId(id)).canonicalized

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ abstract class LogicalPlan
def outputOrdering: Seq[SortOrder] = Nil

/**
* Returns true iff `other`'s output is semantically the same, ie.:
* Returns true iff `other`'s output is semantically the same, i.e.:
* - it contains the same number of `Attribute`s;
* - references are the same;
* - the order is equal too.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ object PlanHelper {
/**
* Check if there's any expression in this query plan operator that is
* - A WindowExpression but the plan is not Window
* - An AggregateExpresion but the plan is not Aggregate or Window
* - An AggregateExpression but the plan is not Aggregate or Window
* - A Generator but the plan is not Generate
* Returns the list of invalid expressions that this operator hosts. This can happen when
* 1. The input query from users contain invalid expressions.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -642,7 +642,7 @@ object Expand {
val numAttributes = attrMap.size
assert(numAttributes <= GroupingID.dataType.defaultSize * 8)
val mask = if (numAttributes != 64) (1L << numAttributes) - 1 else 0xFFFFFFFFFFFFFFFFL
// Calculate the attrbute masks of selected grouping set. For example, if we have GroupBy
// Calculate the attribute masks of selected grouping set. For example, if we have GroupBy
// attributes (a, b, c, d), grouping set (a, c) will produce the following sequence:
// (15, 7, 13), whose binary form is (1111, 0111, 1101)
val masks = (mask +: groupingSetAttrs.map(attrMap).map(index =>
Expand Down
Loading