Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@ import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.util.{escapeSingleQuotedString, quoteIdentifier}
import org.apache.spark.sql.execution.datasources.{DataSource, PartitioningUtils}
import org.apache.spark.sql.execution.datasources.csv.CSVFileFormat
import org.apache.spark.sql.execution.datasources.json.JsonFileFormat
import org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat
import org.apache.spark.sql.execution.datasources.v2.csv.CSVDataSourceV2
import org.apache.spark.sql.execution.datasources.v2.json.JsonDataSourceV2
Expand Down Expand Up @@ -238,7 +240,8 @@ case class AlterTableAddColumnsCommand(
// TextFileFormat only default to one column "value"
// Hive type is already considered as hive serde table, so the logic will not
// come in here.
case _: JsonDataSourceV2 | _: CSVDataSourceV2 | _: ParquetFileFormat | _: OrcDataSourceV2 =>
case _: CSVFileFormat | _: JsonFileFormat | _: ParquetFileFormat =>
case _: JsonDataSourceV2 | _: CSVDataSourceV2 | _: OrcDataSourceV2 =>
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These V2 data sources also support ADD COLUMNs? Do we have the test cases?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No, V2 doesn't support ADD COLUMN. If it requires catalog support, Spark will fall back V2 to V1.

Currently the result of DataSource.lookupDataSource for "csv"/"json"/"orc" will always be CSVDataSourceV2/JsonDataSourceV2/OrcDataSourceV2. So we need to match them here.

case s if s.getClass.getCanonicalName.endsWith("OrcFileFormat") =>
case s =>
throw new AnalysisException(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2566,7 +2566,10 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils {
}
}

val supportedNativeFileFormatsForAlterTableAddColumns = Seq("parquet", "json", "csv")
val supportedNativeFileFormatsForAlterTableAddColumns = Seq("csv", "json", "parquet",
"org.apache.spark.sql.execution.datasources.csv.CSVFileFormat",
"org.apache.spark.sql.execution.datasources.json.JsonFileFormat",
"org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat")

supportedNativeFileFormatsForAlterTableAddColumns.foreach { provider =>
test(s"alter datasource table add columns - $provider") {
Expand Down