@@ -20,14 +20,10 @@ package org.apache.spark.sql.parquet
2020
2121import java .io .File
2222
23- import org .apache .spark .sql .hive .execution .HiveTableScan
2423import org .scalatest .BeforeAndAfterAll
2524
26- import scala .reflect .ClassTag
27-
28- import org .apache .spark .sql .{SQLConf , QueryTest }
29- import org .apache .spark .sql .execution .{BroadcastHashJoin , ShuffledHashJoin }
30- import org .apache .spark .sql .hive .test .TestHive
25+ import org .apache .spark .sql .QueryTest
26+ import org .apache .spark .sql .hive .execution .HiveTableScan
3127import org .apache .spark .sql .hive .test .TestHive ._
3228
3329case class ParquetData (intField : Int , stringField : String )
@@ -36,27 +32,19 @@ case class ParquetData(intField: Int, stringField: String)
3632 * Tests for our SerDe -> Native parquet scan conversion.
3733 */
3834class ParquetMetastoreSuite extends QueryTest with BeforeAndAfterAll {
39-
4035 override def beforeAll (): Unit = {
41- setConf(" spark.sql.hive.convertMetastoreParquet" , " true" )
42- }
43-
44- override def afterAll (): Unit = {
45- setConf(" spark.sql.hive.convertMetastoreParquet" , " false" )
46- }
47-
48- val partitionedTableDir = File .createTempFile(" parquettests" , " sparksql" )
49- partitionedTableDir.delete()
50- partitionedTableDir.mkdir()
51-
52- (1 to 10 ).foreach { p =>
53- val partDir = new File (partitionedTableDir, s " p= $p" )
54- sparkContext.makeRDD(1 to 10 )
55- .map(i => ParquetData (i, s " part- $p" ))
56- .saveAsParquetFile(partDir.getCanonicalPath)
57- }
58-
59- sql(s """
36+ val partitionedTableDir = File .createTempFile(" parquettests" , " sparksql" )
37+ partitionedTableDir.delete()
38+ partitionedTableDir.mkdir()
39+
40+ (1 to 10 ).foreach { p =>
41+ val partDir = new File (partitionedTableDir, s " p= $p" )
42+ sparkContext.makeRDD(1 to 10 )
43+ .map(i => ParquetData (i, s " part- $p" ))
44+ .saveAsParquetFile(partDir.getCanonicalPath)
45+ }
46+
47+ sql(s """
6048 create external table partitioned_parquet
6149 (
6250 intField INT,
@@ -70,7 +58,7 @@ class ParquetMetastoreSuite extends QueryTest with BeforeAndAfterAll {
7058 location ' ${partitionedTableDir.getCanonicalPath}'
7159 """ )
7260
73- sql(s """
61+ sql(s """
7462 create external table normal_parquet
7563 (
7664 intField INT,
@@ -83,8 +71,15 @@ class ParquetMetastoreSuite extends QueryTest with BeforeAndAfterAll {
8371 location ' ${new File (partitionedTableDir, " p=1" ).getCanonicalPath}'
8472 """ )
8573
86- (1 to 10 ).foreach { p =>
87- sql(s " ALTER TABLE partitioned_parquet ADD PARTITION (p= $p) " )
74+ (1 to 10 ).foreach { p =>
75+ sql(s " ALTER TABLE partitioned_parquet ADD PARTITION (p= $p) " )
76+ }
77+
78+ setConf(" spark.sql.hive.convertMetastoreParquet" , " true" )
79+ }
80+
81+ override def afterAll (): Unit = {
82+ setConf(" spark.sql.hive.convertMetastoreParquet" , " false" )
8883 }
8984
9085 test(" project the partitioning column" ) {
0 commit comments