@@ -52,23 +52,24 @@ class InMemoryCatalogedDDLSuite extends DDLSuite with SharedSQLContext with Befo
5252 protected override def generateTable (
5353 catalog : SessionCatalog ,
5454 name : TableIdentifier ,
55- isDataSource : Boolean = true ): CatalogTable = {
55+ isDataSource : Boolean = true ,
56+ partitionCols : Seq [String ] = Seq (" a" , " b" )): CatalogTable = {
5657 val storage =
5758 CatalogStorageFormat .empty.copy(locationUri = Some (catalog.defaultTablePath(name)))
5859 val metadata = new MetadataBuilder ()
5960 .putString(" key" , " value" )
6061 .build()
62+ val schema = new StructType ()
63+ .add(" col1" , " int" , nullable = true , metadata = metadata)
64+ .add(" col2" , " string" )
6165 CatalogTable (
6266 identifier = name,
6367 tableType = CatalogTableType .EXTERNAL ,
6468 storage = storage,
65- schema = new StructType ()
66- .add(" col1" , " int" , nullable = true , metadata = metadata)
67- .add(" col2" , " string" )
68- .add(" a" , " int" )
69- .add(" b" , " int" ),
69+ schema = schema.copy(
70+ fields = schema.fields ++ partitionCols.map(StructField (_, IntegerType ))),
7071 provider = Some (" parquet" ),
71- partitionColumnNames = Seq ( " a " , " b " ) ,
72+ partitionColumnNames = partitionCols ,
7273 createTime = 0L ,
7374 createVersion = org.apache.spark.SPARK_VERSION ,
7475 tracksPartitionsInCatalog = true )
@@ -176,7 +177,8 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils {
176177 protected def generateTable (
177178 catalog : SessionCatalog ,
178179 name : TableIdentifier ,
179- isDataSource : Boolean = true ): CatalogTable
180+ isDataSource : Boolean = true ,
181+ partitionCols : Seq [String ] = Seq (" a" , " b" )): CatalogTable
180182
181183 private val escapedIdentifier = " `(.+)`" .r
182184
@@ -228,8 +230,10 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils {
228230 private def createTable (
229231 catalog : SessionCatalog ,
230232 name : TableIdentifier ,
231- isDataSource : Boolean = true ): Unit = {
232- catalog.createTable(generateTable(catalog, name, isDataSource), ignoreIfExists = false )
233+ isDataSource : Boolean = true ,
234+ partitionCols : Seq [String ] = Seq (" a" , " b" )): Unit = {
235+ catalog.createTable(
236+ generateTable(catalog, name, isDataSource, partitionCols), ignoreIfExists = false )
233237 }
234238
235239 private def createTablePartition (
@@ -1131,7 +1135,7 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils {
11311135 }
11321136
11331137 test(" alter table: recover partition (parallel)" ) {
1134- withSQLConf(" spark.rdd.parallelListingThreshold" -> " 1 " ) {
1138+ withSQLConf(" spark.rdd.parallelListingThreshold" -> " 0 " ) {
11351139 testRecoverPartitions()
11361140 }
11371141 }
@@ -1144,23 +1148,32 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils {
11441148 }
11451149
11461150 val tableIdent = TableIdentifier (" tab1" )
1147- createTable(catalog, tableIdent)
1148- val part1 = Map (" a" -> " 1" , " b" -> " 5" )
1151+ createTable(catalog, tableIdent, partitionCols = Seq ( " a " , " b " , " c " ) )
1152+ val part1 = Map (" a" -> " 1" , " b" -> " 5" , " c " -> " 19 " )
11491153 createTablePartition(catalog, part1, tableIdent)
11501154 assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set (part1))
11511155
1152- val part2 = Map (" a" -> " 2" , " b" -> " 6" )
1156+ val part2 = Map (" a" -> " 2" , " b" -> " 6" , " c " -> " 31 " )
11531157 val root = new Path (catalog.getTableMetadata(tableIdent).location)
11541158 val fs = root.getFileSystem(spark.sessionState.newHadoopConf())
11551159 // valid
1156- fs.mkdirs(new Path (new Path (root, " a=1" ), " b=5" ))
1157- fs.createNewFile(new Path (new Path (root, " a=1/b=5" ), " a.csv" )) // file
1158- fs.createNewFile(new Path (new Path (root, " a=1/b=5" ), " _SUCCESS" )) // file
1159- fs.mkdirs(new Path (new Path (root, " A=2" ), " B=6" ))
1160- fs.createNewFile(new Path (new Path (root, " A=2/B=6" ), " b.csv" )) // file
1161- fs.createNewFile(new Path (new Path (root, " A=2/B=6" ), " c.csv" )) // file
1162- fs.createNewFile(new Path (new Path (root, " A=2/B=6" ), " .hiddenFile" )) // file
1163- fs.mkdirs(new Path (new Path (root, " A=2/B=6" ), " _temporary" ))
1160+ fs.mkdirs(new Path (new Path (new Path (root, " a=1" ), " b=5" ), " c=19" ))
1161+ fs.createNewFile(new Path (new Path (root, " a=1/b=5/c=19" ), " a.csv" )) // file
1162+ fs.createNewFile(new Path (new Path (root, " a=1/b=5/c=19" ), " _SUCCESS" )) // file
1163+
1164+ fs.mkdirs(new Path (new Path (new Path (root, " A=2" ), " B=6" ), " C=31" ))
1165+ fs.createNewFile(new Path (new Path (root, " A=2/B=6/C=31" ), " b.csv" )) // file
1166+ fs.createNewFile(new Path (new Path (root, " A=2/B=6/C=31" ), " c.csv" )) // file
1167+ fs.createNewFile(new Path (new Path (root, " A=2/B=6/C=31" ), " .hiddenFile" )) // file
1168+ fs.mkdirs(new Path (new Path (root, " A=2/B=6/C=31" ), " _temporary" ))
1169+
1170+ val parts = (10 to 100 ).map { a =>
1171+ val part = Map (" a" -> a.toString, " b" -> " 5" , " c" -> " 42" )
1172+ fs.mkdirs(new Path (new Path (new Path (root, s " a= $a" ), " b=5" ), " c=42" ))
1173+ fs.createNewFile(new Path (new Path (root, s " a= $a/b=5/c=42 " ), " a.csv" )) // file
1174+ createTablePartition(catalog, part, tableIdent)
1175+ part
1176+ }
11641177
11651178 // invalid
11661179 fs.mkdirs(new Path (new Path (root, " a" ), " b" )) // bad name
@@ -1174,7 +1187,7 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils {
11741187 try {
11751188 sql(" ALTER TABLE tab1 RECOVER PARTITIONS" )
11761189 assert(catalog.listPartitions(tableIdent).map(_.spec).toSet ==
1177- Set (part1, part2))
1190+ Set (part1, part2) ++ parts )
11781191 if (! isUsingHiveMetastore) {
11791192 assert(catalog.getPartition(tableIdent, part1).parameters(" numFiles" ) == " 1" )
11801193 assert(catalog.getPartition(tableIdent, part2).parameters(" numFiles" ) == " 2" )
0 commit comments