diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/AlterTableAddPartitionExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/AddPartitionExec.scala similarity index 98% rename from sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/AlterTableAddPartitionExec.scala rename to sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/AddPartitionExec.scala index 5772a2b832a21..57d74ab4e4e39 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/AlterTableAddPartitionExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/AddPartitionExec.scala @@ -27,7 +27,7 @@ import org.apache.spark.sql.connector.catalog.{SupportsAtomicPartitionManagement /** * Physical plan node for adding partitions of table. */ -case class AlterTableAddPartitionExec( +case class AddPartitionExec( table: SupportsPartitionManagement, partSpecs: Seq[ResolvedPartitionSpec], ignoreIfExists: Boolean, diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourceV2Strategy.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourceV2Strategy.scala index 782e6b70e1b5b..976c7df841dd9 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourceV2Strategy.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourceV2Strategy.scala @@ -354,7 +354,7 @@ class DataSourceV2Strategy(session: SparkSession) extends Strategy with Predicat case AlterTableAddPartition( r @ ResolvedTable(_, _, table: SupportsPartitionManagement, _), parts, ignoreIfExists) => - AlterTableAddPartitionExec( + AddPartitionExec( table, parts.asResolvedPartitionSpecs, ignoreIfExists, @@ -365,7 +365,7 @@ class DataSourceV2Strategy(session: SparkSession) extends Strategy with Predicat parts, ignoreIfNotExists, purge) => - AlterTableDropPartitionExec( + DropPartitionExec( table, parts.asResolvedPartitionSpecs, ignoreIfNotExists, @@ -374,7 +374,7 @@ class DataSourceV2Strategy(session: SparkSession) extends Strategy with Predicat case AlterTableRenamePartition( r @ ResolvedTable(_, _, table: SupportsPartitionManagement, _), from, to) => - AlterTableRenamePartitionExec( + RenamePartitionExec( table, Seq(from).asResolvedPartitionSpecs.head, Seq(to).asResolvedPartitionSpecs.head, diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/AlterTableDropPartitionExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DropPartitionExec.scala similarity index 98% rename from sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/AlterTableDropPartitionExec.scala rename to sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DropPartitionExec.scala index f3137abbd1ba6..50e14483a9afd 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/AlterTableDropPartitionExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DropPartitionExec.scala @@ -25,7 +25,7 @@ import org.apache.spark.sql.connector.catalog.{SupportsAtomicPartitionManagement /** * Physical plan node for dropping partitions of table. */ -case class AlterTableDropPartitionExec( +case class DropPartitionExec( table: SupportsPartitionManagement, partSpecs: Seq[ResolvedPartitionSpec], ignoreIfNotExists: Boolean, diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/AlterTableRenamePartitionExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/RenamePartitionExec.scala similarity index 97% rename from sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/AlterTableRenamePartitionExec.scala rename to sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/RenamePartitionExec.scala index 0632bd75102fa..20b2dd1ab83cc 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/AlterTableRenamePartitionExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/RenamePartitionExec.scala @@ -25,7 +25,7 @@ import org.apache.spark.sql.connector.catalog.SupportsPartitionManagement /** * Physical plan node for renaming a table partition. */ -case class AlterTableRenamePartitionExec( +case class RenamePartitionExec( table: SupportsPartitionManagement, from: ResolvedPartitionSpec, to: ResolvedPartitionSpec,