diff --git a/core/src/test/scala/org/apache/spark/benchmark/BenchmarkBase.scala b/core/src/test/scala/org/apache/spark/benchmark/BenchmarkBase.scala index 89e927e5784d..24e596e1ecda 100644 --- a/core/src/test/scala/org/apache/spark/benchmark/BenchmarkBase.scala +++ b/core/src/test/scala/org/apache/spark/benchmark/BenchmarkBase.scala @@ -30,7 +30,7 @@ abstract class BenchmarkBase { * Implementations of this method are supposed to use the wrapper method `runBenchmark` * for each benchmark scenario. */ - def runBenchmarkSuite(): Unit + def runBenchmarkSuite(mainArgs: Array[String]): Unit final def runBenchmark(benchmarkName: String)(func: => Any): Unit = { val separator = "=" * 96 @@ -51,7 +51,7 @@ abstract class BenchmarkBase { output = Some(new FileOutputStream(file)) } - runBenchmarkSuite() + runBenchmarkSuite(args) output.foreach { o => if (o != null) { diff --git a/core/src/test/scala/org/apache/spark/serializer/KryoBenchmark.scala b/core/src/test/scala/org/apache/spark/serializer/KryoBenchmark.scala index 8a52c131af84..d7730f23da10 100644 --- a/core/src/test/scala/org/apache/spark/serializer/KryoBenchmark.scala +++ b/core/src/test/scala/org/apache/spark/serializer/KryoBenchmark.scala @@ -39,7 +39,7 @@ import org.apache.spark.serializer.KryoTest._ object KryoBenchmark extends BenchmarkBase { val N = 1000000 - override def runBenchmarkSuite(): Unit = { + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { val name = "Benchmark Kryo Unsafe vs safe Serialization" runBenchmark(name) { val benchmark = new Benchmark(name, N, 10, output = output) diff --git a/mllib/src/test/scala/org/apache/spark/mllib/linalg/UDTSerializationBenchmark.scala b/mllib/src/test/scala/org/apache/spark/mllib/linalg/UDTSerializationBenchmark.scala index 6c1d58089867..5f19e466ecad 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/linalg/UDTSerializationBenchmark.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/linalg/UDTSerializationBenchmark.scala @@ -32,7 +32,7 @@ import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder */ object UDTSerializationBenchmark extends BenchmarkBase { - override def runBenchmarkSuite(): Unit = { + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { runBenchmark("VectorUDT de/serialization") { val iters = 1e2.toInt diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/HashBenchmark.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/HashBenchmark.scala index 4226ab3773fe..3b4b80daf084 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/HashBenchmark.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/HashBenchmark.scala @@ -102,7 +102,7 @@ object HashBenchmark extends BenchmarkBase { } } - override def runBenchmarkSuite(): Unit = { + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { val singleInt = new StructType().add("i", IntegerType) test("single ints", singleInt, 1 << 15, 1 << 14) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/HashByteArrayBenchmark.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/HashByteArrayBenchmark.scala index 7dc865d85af0..dbfa7bb18aa6 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/HashByteArrayBenchmark.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/HashByteArrayBenchmark.scala @@ -83,7 +83,7 @@ object HashByteArrayBenchmark extends BenchmarkBase { benchmark.run() } - override def runBenchmarkSuite(): Unit = { + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { runBenchmark("Benchmark for MurMurHash 3 and xxHash64") { test(8, 42L, 1 << 10, 1 << 11) test(16, 42L, 1 << 10, 1 << 11) diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/UnsafeProjectionBenchmark.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/UnsafeProjectionBenchmark.scala index e7a99485cdf0..42a4cfc91f82 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/UnsafeProjectionBenchmark.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/UnsafeProjectionBenchmark.scala @@ -41,7 +41,7 @@ object UnsafeProjectionBenchmark extends BenchmarkBase { (1 to numRows).map(_ => encoder.toRow(generator().asInstanceOf[Row]).copy()).toArray } - override def runBenchmarkSuite(): Unit = { + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { runBenchmark("unsafe projection") { val iters = 1024 * 16 val numRows = 1024 * 16 diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/DatasetBenchmark.scala index e3df449b41f0..dba906f63aed 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetBenchmark.scala @@ -256,7 +256,7 @@ object DatasetBenchmark extends SqlBasedBenchmark { .getOrCreate() } - override def runBenchmarkSuite(): Unit = { + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { val numRows = 100000000 val numChains = 10 runBenchmark("Dataset Benchmark") { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala index 86e0df2fea35..b7d28988274b 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala @@ -44,7 +44,7 @@ import org.apache.spark.unsafe.map.BytesToBytesMap */ object AggregateBenchmark extends SqlBasedBenchmark { - override def runBenchmarkSuite(): Unit = { + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { runBenchmark("aggregate without grouping") { val N = 500L << 22 codegenBenchmark("agg w/o group", N) { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/BloomFilterBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/BloomFilterBenchmark.scala index 2f3caca849cd..f727ebcf3fd1 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/BloomFilterBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/BloomFilterBenchmark.scala @@ -80,7 +80,7 @@ object BloomFilterBenchmark extends SqlBasedBenchmark { } } - override def runBenchmarkSuite(): Unit = { + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { writeBenchmark() readBenchmark() } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala index a1e7f9e36f4b..a1f51f8e5480 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala @@ -585,7 +585,7 @@ object DataSourceReadBenchmark extends BenchmarkBase with SQLHelper { } } - override def runBenchmarkSuite(): Unit = { + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { runBenchmark("SQL Single Numeric Column Scan") { Seq(ByteType, ShortType, IntegerType, LongType, FloatType, DoubleType).foreach { dataType => numericScanBenchmark(1024 * 1024 * 15, dataType) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala index cf05ca336171..017b74aabff7 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala @@ -198,7 +198,7 @@ object FilterPushdownBenchmark extends BenchmarkBase with SQLHelper { } } - override def runBenchmarkSuite(): Unit = { + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { runBenchmark("Pushdown for many distinct value case") { withTempPath { dir => withTempTable("orcTable", "parquetTable") { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/JoinBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/JoinBenchmark.scala index 7bad4cb927b4..ad81711a1394 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/JoinBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/JoinBenchmark.scala @@ -164,7 +164,7 @@ object JoinBenchmark extends SqlBasedBenchmark { } } - override def runBenchmarkSuite(): Unit = { + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { runBenchmark("Join Benchmark") { broadcastHashJoinLongKey() broadcastHashJoinLongKeyWithDuplicates() diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/MiscBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/MiscBenchmark.scala index 43380869fefe..c4662c8999e4 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/MiscBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/MiscBenchmark.scala @@ -148,7 +148,7 @@ object MiscBenchmark extends SqlBasedBenchmark { } } - override def runBenchmarkSuite(): Unit = { + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { filterAndAggregateWithoutGroup(500L << 22) limitAndAggregateWithoutGroup(500L << 20) sample(500 << 18) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/PrimitiveArrayBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/PrimitiveArrayBenchmark.scala index 83edf73abfae..8b1c422e63a3 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/PrimitiveArrayBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/PrimitiveArrayBenchmark.scala @@ -36,7 +36,7 @@ object PrimitiveArrayBenchmark extends BenchmarkBase { .config("spark.sql.autoBroadcastJoinThreshold", 1) .getOrCreate() - override def runBenchmarkSuite(): Unit = { + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { runBenchmark("Write primitive arrays in dataset") { writeDatasetArray(4) } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/RangeBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/RangeBenchmark.scala index a844e02dcba3..a9f873f9094b 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/RangeBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/RangeBenchmark.scala @@ -32,7 +32,7 @@ import org.apache.spark.benchmark.Benchmark */ object RangeBenchmark extends SqlBasedBenchmark { - override def runBenchmarkSuite(): Unit = { + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { import spark.implicits._ runBenchmark("range") { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/SortBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/SortBenchmark.scala index 9a54e2320b80..784438cd43eb 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/SortBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/SortBenchmark.scala @@ -119,7 +119,7 @@ object SortBenchmark extends BenchmarkBase { benchmark.run() } - override def runBenchmarkSuite(): Unit = { + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { runBenchmark("radix sort") { sortBenchmark() } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/UnsafeArrayDataBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/UnsafeArrayDataBenchmark.scala index 79eaeab9c399..f582d844cdc4 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/UnsafeArrayDataBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/UnsafeArrayDataBenchmark.scala @@ -194,7 +194,7 @@ object UnsafeArrayDataBenchmark extends BenchmarkBase { benchmark.run } - override def runBenchmarkSuite(): Unit = { + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { runBenchmark("Benchmark UnsafeArrayData") { readUnsafeArray(10) writeUnsafeArray(10) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/WideSchemaBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/WideSchemaBenchmark.scala index 124661986ca0..f4642e7d353e 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/WideSchemaBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/WideSchemaBenchmark.scala @@ -208,7 +208,7 @@ object WideSchemaBenchmark extends SqlBasedBenchmark { deleteTmpFiles() } - override def runBenchmarkSuite(): Unit = { + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { runBenchmarkWithDeleteTmpFiles("parsing large select expressions") { parsingLargeSelectExpressions() diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala index 0f9079744a22..8ea20f28a37b 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala @@ -233,7 +233,7 @@ object CompressionSchemeBenchmark extends BenchmarkBase with AllCompressionSchem runDecodeBenchmark("STRING Decode", iters, count, STRING, testData) } - override def runBenchmarkSuite(): Unit = { + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { runBenchmark("Compression Scheme Benchmark") { bitEncodingBenchmark(1024) shortEncodingBenchmark(1024) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchBenchmark.scala index f311465e582a..953b3a67d976 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchBenchmark.scala @@ -443,7 +443,7 @@ object ColumnarBatchBenchmark extends BenchmarkBase { benchmark.run } - override def runBenchmarkSuite(): Unit = { + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { runBenchmark("Int Read/Write") { intAccess(1024 * 40) } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/execution/benchmark/ObjectHashAggregateExecBenchmark.scala b/sql/hive/src/test/scala/org/apache/spark/sql/execution/benchmark/ObjectHashAggregateExecBenchmark.scala index 50ee09678e2c..3226e3a5f318 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/execution/benchmark/ObjectHashAggregateExecBenchmark.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/execution/benchmark/ObjectHashAggregateExecBenchmark.scala @@ -212,7 +212,7 @@ object ObjectHashAggregateExecBenchmark extends BenchmarkBase with SQLHelper { Column(approxPercentile.toAggregateExpression(isDistinct)) } - override def runBenchmarkSuite(): Unit = { + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { runBenchmark("Hive UDAF vs Spark AF") { hiveUDAFvsSparkAF(2 << 15) } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcReadBenchmark.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcReadBenchmark.scala index 870ad4818eb2..ec13288f759a 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcReadBenchmark.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcReadBenchmark.scala @@ -336,7 +336,7 @@ object OrcReadBenchmark extends BenchmarkBase with SQLHelper { } } - override def runBenchmarkSuite(): Unit = { + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { runBenchmark("SQL Single Numeric Column Scan") { Seq(ByteType, ShortType, IntegerType, LongType, FloatType, DoubleType).foreach { dataType => numericScanBenchmark(1024 * 1024 * 15, dataType)