diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala index 07578261781b7..5935017704eda 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala @@ -302,7 +302,7 @@ class AstBuilder(conf: SQLConf) extends SqlBaseBaseVisitor[AnyRef] with Logging */ override def visitQuerySpecification( ctx: QuerySpecificationContext): LogicalPlan = withOrigin(ctx) { - val from = OneRowRelation.optional(ctx.fromClause) { + val from = OneRowRelation().optional(ctx.fromClause) { visitFromClause(ctx.fromClause) } withQuerySpecification(ctx, from) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala index 0bd3166352d35..303014e0b8d31 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala @@ -772,10 +772,13 @@ case class RepartitionByExpression( /** * A relation with one row. This is used in "SELECT ..." without a from clause. */ -case object OneRowRelation extends LeafNode { +case class OneRowRelation() extends LeafNode { override def maxRows: Option[Long] = Some(1) override def output: Seq[Attribute] = Nil override def computeStats(): Statistics = Statistics(sizeInBytes = 1) + + /** [[org.apache.spark.sql.catalyst.trees.TreeNode.makeCopy()]] does not support 0-arg ctor. */ + override def makeCopy(newArgs: Array[AnyRef]): OneRowRelation = OneRowRelation() } /** A logical plan for `dropDuplicates`. */ diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala index 9bcf4773fa903..847713a0455b9 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala @@ -384,7 +384,7 @@ class AnalysisSuite extends AnalysisTest with ShouldMatchers { expression: Expression, expectedDataType: DataType): Unit = { val afterAnalyze = - Project(Seq(Alias(expression, "a")()), OneRowRelation).analyze.expressions.head + Project(Seq(Alias(expression, "a")()), OneRowRelation()).analyze.expressions.head if (!afterAnalyze.dataType.equals(expectedDataType)) { fail( s""" diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala index b6399edb68dd6..0496d611ec3c7 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala @@ -187,7 +187,7 @@ trait ExpressionEvalHelper extends GeneratorDrivenPropertyChecks { expression: Expression, expected: Any, inputRow: InternalRow = EmptyRow): Unit = { - val plan = Project(Alias(expression, s"Optimized($expression)")() :: Nil, OneRowRelation) + val plan = Project(Alias(expression, s"Optimized($expression)")() :: Nil, OneRowRelation()) val optimizedPlan = SimpleTestOptimizer.execute(plan) checkEvaluationWithoutCodegen(optimizedPlan.expressions.head, expected, inputRow) } diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/MathExpressionsSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/MathExpressionsSuite.scala index 9ee777529aeda..39e0060d41dd4 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/MathExpressionsSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/MathExpressionsSuite.scala @@ -151,7 +151,7 @@ class MathExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper { private def checkNaNWithOptimization( expression: Expression, inputRow: InternalRow = EmptyRow): Unit = { - val plan = Project(Alias(expression, s"Optimized($expression)")() :: Nil, OneRowRelation) + val plan = Project(Alias(expression, s"Optimized($expression)")() :: Nil, OneRowRelation()) val optimizedPlan = SimpleTestOptimizer.execute(plan) checkNaNWithoutCodegen(optimizedPlan.expressions.head, inputRow) } diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/ColumnPruningSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/ColumnPruningSuite.scala index 08e58d47e0e25..77e4eff26c69b 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/ColumnPruningSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/ColumnPruningSuite.scala @@ -164,7 +164,7 @@ class ColumnPruningSuite extends PlanTest { } test("Eliminate the Project with an empty projectList") { - val input = OneRowRelation + val input = OneRowRelation() val expected = Project(Literal(1).as("1") :: Nil, input).analyze val query1 = diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/CombineConcatsSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/CombineConcatsSuite.scala index 7aa9fbba9a10a..412e199dfaae3 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/CombineConcatsSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/CombineConcatsSuite.scala @@ -32,8 +32,8 @@ class CombineConcatsSuite extends PlanTest { } protected def assertEquivalent(e1: Expression, e2: Expression): Unit = { - val correctAnswer = Project(Alias(e2, "out")() :: Nil, OneRowRelation).analyze - val actual = Optimize.execute(Project(Alias(e1, "out")() :: Nil, OneRowRelation).analyze) + val correctAnswer = Project(Alias(e2, "out")() :: Nil, OneRowRelation()).analyze + val actual = Optimize.execute(Project(Alias(e1, "out")() :: Nil, OneRowRelation()).analyze) comparePlans(actual, correctAnswer) } diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/EliminateSubqueryAliasesSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/EliminateSubqueryAliasesSuite.scala index 9b6d68aee803a..4df1a145a271b 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/EliminateSubqueryAliasesSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/EliminateSubqueryAliasesSuite.scala @@ -35,8 +35,8 @@ class EliminateSubqueryAliasesSuite extends PlanTest with PredicateHelper { } private def assertEquivalent(e1: Expression, e2: Expression): Unit = { - val correctAnswer = Project(Alias(e2, "out")() :: Nil, OneRowRelation).analyze - val actual = Optimize.execute(Project(Alias(e1, "out")() :: Nil, OneRowRelation).analyze) + val correctAnswer = Project(Alias(e2, "out")() :: Nil, OneRowRelation()).analyze + val actual = Optimize.execute(Project(Alias(e1, "out")() :: Nil, OneRowRelation()).analyze) comparePlans(actual, correctAnswer) } diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FoldablePropagationSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FoldablePropagationSuite.scala index d128315b68869..dccb32f0379a8 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FoldablePropagationSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FoldablePropagationSuite.scala @@ -34,12 +34,12 @@ class FoldablePropagationSuite extends PlanTest { val testRelation = LocalRelation('a.int, 'b.int) test("Propagate from subquery") { - val query = OneRowRelation + val query = OneRowRelation() .select(Literal(1).as('a), Literal(2).as('b)) .subquery('T) .select('a, 'b) val optimized = Optimize.execute(query.analyze) - val correctAnswer = OneRowRelation + val correctAnswer = OneRowRelation() .select(Literal(1).as('a), Literal(2).as('b)) .subquery('T) .select(Literal(1).as('a), Literal(2).as('b)).analyze @@ -152,7 +152,7 @@ class FoldablePropagationSuite extends PlanTest { val expand = Expand( Seq(Seq(Literal(null), 'b), Seq('a, Literal(null))), Seq(a1, a2), - OneRowRelation.select(c1, c2)) + OneRowRelation().select(c1, c2)) val query = expand.where(a1.isNotNull).select(a1, a2).analyze val optimized = Optimize.execute(query) val correctExpand = expand.copy(projections = Seq( diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/OptimizeCodegenSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/OptimizeCodegenSuite.scala index 2abf9fe6aa490..b1157f3e3edd2 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/OptimizeCodegenSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/OptimizeCodegenSuite.scala @@ -32,8 +32,8 @@ class OptimizeCodegenSuite extends PlanTest { } protected def assertEquivalent(e1: Expression, e2: Expression): Unit = { - val correctAnswer = Project(Alias(e2, "out")() :: Nil, OneRowRelation).analyze - val actual = Optimize.execute(Project(Alias(e1, "out")() :: Nil, OneRowRelation).analyze) + val correctAnswer = Project(Alias(e2, "out")() :: Nil, OneRowRelation()).analyze + val actual = Optimize.execute(Project(Alias(e1, "out")() :: Nil, OneRowRelation()).analyze) comparePlans(actual, correctAnswer) } @@ -58,13 +58,13 @@ class OptimizeCodegenSuite extends PlanTest { } test("Multiple CaseWhen in one operator.") { - val plan = OneRowRelation + val plan = OneRowRelation() .select( CaseWhen(Seq((TrueLiteral, Literal(1))), Literal(2)), CaseWhen(Seq((FalseLiteral, Literal(3))), Literal(4)), CaseWhen(List.fill(20)((TrueLiteral, Literal(0))), Literal(0)), CaseWhen(Seq((TrueLiteral, Literal(5))), Literal(6))).analyze - val correctAnswer = OneRowRelation + val correctAnswer = OneRowRelation() .select( CaseWhen(Seq((TrueLiteral, Literal(1))), Literal(2)).toCodegen(), CaseWhen(Seq((FalseLiteral, Literal(3))), Literal(4)).toCodegen(), @@ -75,7 +75,7 @@ class OptimizeCodegenSuite extends PlanTest { } test("Multiple CaseWhen in different operators") { - val plan = OneRowRelation + val plan = OneRowRelation() .select( CaseWhen(Seq((TrueLiteral, Literal(1))), Literal(2)), CaseWhen(Seq((FalseLiteral, Literal(3))), Literal(4)), @@ -85,7 +85,7 @@ class OptimizeCodegenSuite extends PlanTest { CaseWhen(Seq((TrueLiteral, Literal(5))), Literal(6)), CaseWhen(List.fill(20)((TrueLiteral, Literal(0))), Literal(0))) ).analyze - val correctAnswer = OneRowRelation + val correctAnswer = OneRowRelation() .select( CaseWhen(Seq((TrueLiteral, Literal(1))), Literal(2)).toCodegen(), CaseWhen(Seq((FalseLiteral, Literal(3))), Literal(4)).toCodegen(), diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SetOperationSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SetOperationSuite.scala index 21b7f49e14bd5..aa8841109329c 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SetOperationSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SetOperationSuite.scala @@ -79,11 +79,11 @@ class SetOperationSuite extends PlanTest { } test("Remove unnecessary distincts in multiple unions") { - val query1 = OneRowRelation + val query1 = OneRowRelation() .select(Literal(1).as('a)) - val query2 = OneRowRelation + val query2 = OneRowRelation() .select(Literal(2).as('b)) - val query3 = OneRowRelation + val query3 = OneRowRelation() .select(Literal(3).as('c)) // D - U - D - U - query1 @@ -111,13 +111,13 @@ class SetOperationSuite extends PlanTest { } test("Keep necessary distincts in multiple unions") { - val query1 = OneRowRelation + val query1 = OneRowRelation() .select(Literal(1).as('a)) - val query2 = OneRowRelation + val query2 = OneRowRelation() .select(Literal(2).as('b)) - val query3 = OneRowRelation + val query3 = OneRowRelation() .select(Literal(3).as('c)) - val query4 = OneRowRelation + val query4 = OneRowRelation() .select(Literal(4).as('d)) // U - D - U - query1 diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SimplifyConditionalSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SimplifyConditionalSuite.scala index adb3e8fc8a564..b597c8e162c83 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SimplifyConditionalSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SimplifyConditionalSuite.scala @@ -33,8 +33,8 @@ class SimplifyConditionalSuite extends PlanTest with PredicateHelper { } protected def assertEquivalent(e1: Expression, e2: Expression): Unit = { - val correctAnswer = Project(Alias(e2, "out")() :: Nil, OneRowRelation).analyze - val actual = Optimize.execute(Project(Alias(e1, "out")() :: Nil, OneRowRelation).analyze) + val correctAnswer = Project(Alias(e2, "out")() :: Nil, OneRowRelation()).analyze + val actual = Optimize.execute(Project(Alias(e1, "out")() :: Nil, OneRowRelation()).analyze) comparePlans(actual, correctAnswer) } diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala index c7f39ae18162e..5fa72e1e92660 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala @@ -88,11 +88,11 @@ class PlanParserSuite extends AnalysisTest { cte(table("cte1").select(star()), "cte1" -> table("a").select(star()))) assertEqual( "with cte1 (select 1) select * from cte1", - cte(table("cte1").select(star()), "cte1" -> OneRowRelation.select(1))) + cte(table("cte1").select(star()), "cte1" -> OneRowRelation().select(1))) assertEqual( "with cte1 (select 1), cte2 as (select * from cte1) select * from cte2", cte(table("cte2").select(star()), - "cte1" -> OneRowRelation.select(1), + "cte1" -> OneRowRelation().select(1), "cte2" -> table("cte1").select(star()))) intercept( "with cte1 (select 1), cte1 as (select 1 from cte1) select * from cte1", @@ -100,8 +100,8 @@ class PlanParserSuite extends AnalysisTest { } test("simple select query") { - assertEqual("select 1", OneRowRelation.select(1)) - assertEqual("select a, b", OneRowRelation.select('a, 'b)) + assertEqual("select 1", OneRowRelation().select(1)) + assertEqual("select a, b", OneRowRelation().select('a, 'b)) assertEqual("select a, b from db.c", table("db", "c").select('a, 'b)) assertEqual("select a, b from db.c where x < 1", table("db", "c").where('x < 1).select('a, 'b)) assertEqual( @@ -109,7 +109,7 @@ class PlanParserSuite extends AnalysisTest { table("db", "c").select('a, 'b).where('x < 1)) assertEqual("select distinct a, b from db.c", Distinct(table("db", "c").select('a, 'b))) assertEqual("select all a, b from db.c", table("db", "c").select('a, 'b)) - assertEqual("select from tbl", OneRowRelation.select('from.as("tbl"))) + assertEqual("select from tbl", OneRowRelation().select('from.as("tbl"))) } test("reverse select query") { diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/plans/PlanTest.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/plans/PlanTest.scala index 5389bf3389da4..10bdfafd6f933 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/plans/PlanTest.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/plans/PlanTest.scala @@ -116,7 +116,7 @@ trait PlanTest extends SparkFunSuite with PredicateHelper { /** Fails the test if the two expressions do not match */ protected def compareExpressions(e1: Expression, e2: Expression): Unit = { - comparePlans(Filter(e1, OneRowRelation), Filter(e2, OneRowRelation), checkAnalysis = false) + comparePlans(Filter(e1, OneRowRelation()), Filter(e2, OneRowRelation()), checkAnalysis = false) } /** Fails the test if the join order in the two plans do not match */ diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala index 2f8e416e7df1b..d4414b6f78ca2 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala @@ -304,7 +304,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder(conf) { codegen = ctx.CODEGEN != null, cost = ctx.COST != null) } else { - ExplainCommand(OneRowRelation) + ExplainCommand(OneRowRelation()) } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala index a57d5abb90c0e..691f71a7d4ac2 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala @@ -422,7 +422,7 @@ abstract class SparkStrategies extends QueryPlanner[SparkPlan] { execution.GenerateExec( generator, join = join, outer = outer, g.qualifiedGeneratorOutput, planLater(child)) :: Nil - case logical.OneRowRelation => + case _: logical.OneRowRelation => execution.RDDScanExec(Nil, singleRowRdd, "OneRowRelation") :: Nil case r: logical.Range => execution.RangeExec(r) :: Nil diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/InMemoryRelation.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/InMemoryRelation.scala index 39cf8fcac5116..bc98d8d9d6d61 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/InMemoryRelation.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/InMemoryRelation.scala @@ -62,7 +62,6 @@ case class InMemoryRelation( @transient var _cachedColumnBuffers: RDD[CachedBatch] = null, val batchStats: LongAccumulator = child.sqlContext.sparkContext.longAccumulator) extends logical.LeafNode with MultiInstanceRelation { - override def innerChildren: Seq[SparkPlan] = Seq(child) override def producedAttributes: AttributeSet = outputSet diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala index b97fa54446e0c..cb8dc1e041a9b 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala @@ -479,7 +479,7 @@ object PreWriteCheck extends (LogicalPlan => Unit) { case InsertIntoTable(t, _, _, _, _) if !t.isInstanceOf[LeafNode] || t.isInstanceOf[Range] || - t == OneRowRelation || + t.isInstanceOf[OneRowRelation] || t.isInstanceOf[LocalRelation] => failAnalysis(s"Inserting into an RDD-based table is not allowed.") diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala index 7c500728bdec9..b41ff3f921363 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala @@ -1333,7 +1333,7 @@ class DataFrameSuite extends QueryTest with SharedSQLContext { assert(e2.getMessage.contains("Inserting into an RDD-based table is not allowed.")) // error case: insert into an OneRowRelation - Dataset.ofRows(spark, OneRowRelation).createOrReplaceTempView("one_row") + Dataset.ofRows(spark, OneRowRelation()).createOrReplaceTempView("one_row") val e3 = intercept[AnalysisException] { insertion.write.insertInto("one_row") } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/QueryExecutionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/QueryExecutionSuite.scala index afccbe5cc6d19..964440346deb0 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/QueryExecutionSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/QueryExecutionSuite.scala @@ -27,7 +27,7 @@ class QueryExecutionSuite extends SharedSQLContext { override def apply(plan: LogicalPlan): Seq[SparkPlan] = Nil }) - def qe: QueryExecution = new QueryExecution(spark, OneRowRelation) + def qe: QueryExecution = new QueryExecution(spark, OneRowRelation()) // Nothing! assert(qe.toString.contains("OneRowRelation")) diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala index 801f9b9923641..3a5c0c397b15e 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala @@ -463,7 +463,7 @@ private[hive] class TestHiveSparkSession( // has already set the execution id. if (sparkContext.getLocalProperty(SQLExecution.EXECUTION_ID_KEY) == null) { // We don't actually have a `QueryExecution` here, use a fake one instead. - SQLExecution.withNewExecutionId(this, new QueryExecution(this, OneRowRelation)) { + SQLExecution.withNewExecutionId(this, new QueryExecution(this, OneRowRelation())) { createCmds.foreach(_()) } } else {