Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 13 additions & 8 deletions sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
Original file line number Diff line number Diff line change
Expand Up @@ -373,8 +373,19 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) {
throw new AnalysisException(s"Table $tableIdent already exists.")

case _ =>
val storage = DataSource.buildStorageFormatFromOptions(extraOptions.toMap)
val tableType = if (storage.locationUri.isDefined) {
val existingTable = if (tableExists) {
Copy link
Contributor

@cloud-fan cloud-fan Nov 23, 2016

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

shall we move this logic to CreateDataSourceTableAsSelectCommand?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nvm, I think it's fine here

Some(df.sparkSession.sessionState.catalog.getTableMetadata(tableIdent))
} else {
None
}
val storage = if (tableExists) {
existingTable.get.storage
} else {
DataSource.buildStorageFormatFromOptions(extraOptions.toMap)
}
val tableType = if (tableExists) {
existingTable.get.tableType
} else if (storage.locationUri.isDefined) {
CatalogTableType.EXTERNAL
} else {
CatalogTableType.MANAGED
Expand All @@ -391,12 +402,6 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) {
)
df.sparkSession.sessionState.executePlan(
CreateTable(tableDesc, mode, Some(df.logicalPlan))).toRdd
if (tableDesc.partitionColumnNames.nonEmpty &&
df.sparkSession.sqlContext.conf.manageFilesourcePartitions) {
// Need to recover partitions into the metastore so our saved data is visible.
df.sparkSession.sessionState.executePlan(
AlterTableRecoverPartitionsCommand(tableDesc.identifier)).toRdd
}
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,8 @@ case class CreateDataSourceTableAsSelectCommand(
className = provider,
partitionColumns = table.partitionColumnNames,
bucketSpec = table.bucketSpec,
options = table.storage.properties ++ pathOption)
options = table.storage.properties ++ pathOption,
catalogTable = Some(table))

val result = try {
dataSource.write(mode, df)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,25 @@ class PartitionProviderCompatibilitySuite
}
}

for (enabled <- Seq(true, false)) {
test(s"SPARK-18544 append with saveAsTable - partition management $enabled") {
withSQLConf(SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key -> enabled.toString) {
withTable("test") {
withTempDir { dir =>
setupPartitionedDatasourceTable("test", dir)
if (enabled) {
spark.sql("msck repair table test")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If HIVE_MANAGE_FILESOURCE_PARTITIONS is on, we have to repair the table; otherwise the table is empty to the external users. This looks weird to me. It is also inconsistent with the behavior when HIVE_MANAGE_FILESOURCE_PARTITIONS is off. I think we should repair the table after we create the table. Let me submit a PR and cc you then.

}
assert(spark.sql("select * from test").count() == 5)
spark.range(10).selectExpr("id as fieldOne", "id as partCol")
.write.partitionBy("partCol").mode("append").saveAsTable("test")
assert(spark.sql("select * from test").count() == 15)
}
}
}
}
}

/**
* Runs a test against a multi-level partitioned table, then validates that the custom locations
* were respected by the output writer.
Expand Down