Skip to content

Commit a73fda4

Browse files
committed
Fix the method calls QueryTest.checkAnswer in tests
1 parent 81996f9 commit a73fda4

File tree

5 files changed

+26
-13
lines changed

5 files changed

+26
-13
lines changed

sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala

Lines changed: 20 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,8 @@ import java.util.{Locale, TimeZone}
2121

2222
import scala.collection.JavaConverters._
2323

24+
import org.scalatest.Assertions
25+
2426
import org.apache.spark.sql.catalyst.plans._
2527
import org.apache.spark.sql.catalyst.util._
2628
import org.apache.spark.sql.execution.SQLExecution
@@ -150,10 +152,7 @@ abstract class QueryTest extends PlanTest {
150152

151153
assertEmptyMissingInput(analyzedDF)
152154

153-
QueryTest.checkAnswer(analyzedDF, expectedAnswer) match {
154-
case Some(errorMessage) => fail(errorMessage)
155-
case None =>
156-
}
155+
QueryTest.checkAnswer(analyzedDF, expectedAnswer)
157156
}
158157

159158
protected def checkAnswer(df: => DataFrame, expectedAnswer: Row): Unit = {
@@ -235,7 +234,21 @@ abstract class QueryTest extends PlanTest {
235234
}
236235
}
237236

238-
object QueryTest {
237+
object QueryTest extends Assertions {
238+
/**
239+
* Runs the plan and makes sure the answer matches the expected result.
240+
*
241+
* @param df the [[DataFrame]] to be executed
242+
* @param expectedAnswer the expected result in a [[Seq]] of [[Row]]s.
243+
* @param checkToRDD whether to verify deserialization to an RDD. This runs the query twice.
244+
*/
245+
def checkAnswer(df: DataFrame, expectedAnswer: Seq[Row], checkToRDD: Boolean = true): Unit = {
246+
getErrorMessageInCheckAnswer(df, expectedAnswer, checkToRDD) match {
247+
case Some(errorMessage) => fail(errorMessage)
248+
case None =>
249+
}
250+
}
251+
239252
/**
240253
* Runs the plan and makes sure the answer matches the expected result.
241254
* If there was exception during the execution or the contents of the DataFrame does not
@@ -246,7 +259,7 @@ object QueryTest {
246259
* @param expectedAnswer the expected result in a [[Seq]] of [[Row]]s.
247260
* @param checkToRDD whether to verify deserialization to an RDD. This runs the query twice.
248261
*/
249-
def checkAnswer(
262+
def getErrorMessageInCheckAnswer(
250263
df: DataFrame,
251264
expectedAnswer: Seq[Row],
252265
checkToRDD: Boolean = true): Option[String] = {
@@ -409,7 +422,7 @@ object QueryTest {
409422
}
410423

411424
def checkAnswer(df: DataFrame, expectedAnswer: java.util.List[Row]): String = {
412-
checkAnswer(df, expectedAnswer.asScala) match {
425+
getErrorMessageInCheckAnswer(df, expectedAnswer.asScala) match {
413426
case Some(errorMessage) => errorMessage
414427
case None => null
415428
}

sql/core/src/test/scala/org/apache/spark/sql/execution/ReduceNumShufflePartitionsSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -259,7 +259,7 @@ class ReduceNumShufflePartitionsSuite extends SparkFunSuite with BeforeAndAfterA
259259
val numInputPartitions: Int = 10
260260

261261
def checkAnswer(actual: => DataFrame, expectedAnswer: Seq[Row]): Unit = {
262-
QueryTest.checkAnswer(actual, expectedAnswer) match {
262+
QueryTest.getErrorMessageInCheckAnswer(actual, expectedAnswer) match {
263263
case Some(errorMessage) => fail(errorMessage)
264264
case None =>
265265
}

sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/binaryfile/BinaryFileFormatSuite.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -352,15 +352,15 @@ class BinaryFileFormatSuite extends QueryTest with SharedSparkSession {
352352
.select(CONTENT)
353353
}
354354
val expected = Seq(Row(content))
355-
QueryTest.checkAnswer(readContent(), expected)
355+
checkAnswer(readContent, expected)
356356
withSQLConf(SOURCES_BINARY_FILE_MAX_LENGTH.key -> content.length.toString) {
357-
QueryTest.checkAnswer(readContent(), expected)
357+
checkAnswer(readContent, expected)
358358
}
359359
// Disable read. If the implementation attempts to read, the exception would be different.
360360
file.setReadable(false)
361361
val caught = intercept[SparkException] {
362362
withSQLConf(SOURCES_BINARY_FILE_MAX_LENGTH.key -> (content.length - 1).toString) {
363-
QueryTest.checkAnswer(readContent(), expected)
363+
checkAnswer(readContent, expected)
364364
}
365365
}
366366
assert(caught.getMessage.contains("exceeds the max length allowed"))

sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -755,7 +755,7 @@ class StreamSuite extends StreamTest {
755755
inputData.addData(9)
756756
streamingQuery.processAllAvailable()
757757

758-
QueryTest.checkAnswer(spark.table("counts").toDF(),
758+
checkAnswer(spark.table("counts").toDF(),
759759
Row("1", 1) :: Row("2", 1) :: Row("3", 2) :: Row("4", 2) ::
760760
Row("5", 2) :: Row("6", 2) :: Row("7", 1) :: Row("8", 1) :: Row("9", 1) :: Nil)
761761
} finally {

sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1063,7 +1063,7 @@ class HashAggregationQueryWithControlledFallbackSuite extends AggregationQuerySu
10631063
// todo: remove it?
10641064
val newActual = Dataset.ofRows(spark, actual.logicalPlan)
10651065

1066-
QueryTest.checkAnswer(newActual, expectedAnswer) match {
1066+
QueryTest.getErrorMessageInCheckAnswer(newActual, expectedAnswer) match {
10671067
case Some(errorMessage) =>
10681068
val newErrorMessage =
10691069
s"""

0 commit comments

Comments
 (0)