diff --git a/api/src/main/java/org/apache/iceberg/PartitionSpec.java b/api/src/main/java/org/apache/iceberg/PartitionSpec.java index e984fc69d8ce..f24c0b193b2a 100644 --- a/api/src/main/java/org/apache/iceberg/PartitionSpec.java +++ b/api/src/main/java/org/apache/iceberg/PartitionSpec.java @@ -576,17 +576,25 @@ PartitionSpec buildUnchecked() { static void checkCompatibility(PartitionSpec spec, Schema schema) { for (PartitionField field : spec.fields) { Type sourceType = schema.findType(field.sourceId()); - ValidationException.check( - sourceType != null, "Cannot find source column for partition field: %s", field); - ValidationException.check( - sourceType.isPrimitiveType(), - "Cannot partition by non-primitive source field: %s", - sourceType); - ValidationException.check( - field.transform().canTransform(sourceType), - "Invalid source type %s for transform: %s", - sourceType, - field.transform()); + Transform transform = field.transform(); + // In the case of a Version 1 partition-spec field gets deleted, + // it is replaced with a void transform, see: + // https://iceberg.apache.org/spec/#partition-transforms + // We don't care about the source type since a VoidTransform is always compatible and skip the + // checks + if (!transform.equals(Transforms.alwaysNull())) { + ValidationException.check( + sourceType != null, "Cannot find source column for partition field: %s", field); + ValidationException.check( + sourceType.isPrimitiveType(), + "Cannot partition by non-primitive source field: %s", + sourceType); + ValidationException.check( + transform.canTransform(sourceType), + "Invalid source type %s for transform: %s", + sourceType, + transform); + } } } diff --git a/core/src/main/java/org/apache/iceberg/TableMetadataParser.java b/core/src/main/java/org/apache/iceberg/TableMetadataParser.java index ee0e8f6502fb..c744bf50a868 100644 --- a/core/src/main/java/org/apache/iceberg/TableMetadataParser.java +++ b/core/src/main/java/org/apache/iceberg/TableMetadataParser.java @@ -377,7 +377,12 @@ static TableMetadata fromJson(String metadataLocation, JsonNode node) { // parse the spec array ImmutableList.Builder builder = ImmutableList.builder(); for (JsonNode spec : specArray) { - builder.add(PartitionSpecParser.fromJson(schema, spec)); + UnboundPartitionSpec unboundSpec = PartitionSpecParser.fromJson(spec); + if (unboundSpec.specId() == defaultSpecId) { + builder.add(unboundSpec.bind(schema)); + } else { + builder.add(unboundSpec.bindUnchecked(schema)); + } } specs = builder.build(); diff --git a/spark/v3.2/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestAlterTablePartitionFields.java b/spark/v3.2/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestAlterTablePartitionFields.java index 8aee7c97752f..45fb8e0bbaee 100644 --- a/spark/v3.2/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestAlterTablePartitionFields.java +++ b/spark/v3.2/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestAlterTablePartitionFields.java @@ -421,6 +421,31 @@ public void testSparkTableAddDropPartitions() throws Exception { "spark table partition should be empty", 0, sparkTable().partitioning().length); } + @Test + public void testDropColumnOfOldPartitionFieldV1() { + // default table created in v1 format + sql( + "CREATE TABLE %s (id bigint NOT NULL, ts timestamp, day_of_ts date) USING iceberg PARTITIONED BY (day_of_ts)", + tableName); + + sql("ALTER TABLE %s REPLACE PARTITION FIELD day_of_ts WITH days(ts)", tableName); + + sql("ALTER TABLE %s DROP COLUMN day_of_ts", tableName); + } + + @Test + public void testDropColumnOfOldPartitionFieldV2() { + sql( + "CREATE TABLE %s (id bigint NOT NULL, ts timestamp, day_of_ts date) USING iceberg PARTITIONED BY (day_of_ts)", + tableName); + + sql("ALTER TABLE %s SET TBLPROPERTIES ('format-version' = '2');", tableName); + + sql("ALTER TABLE %s REPLACE PARTITION FIELD day_of_ts WITH days(ts)", tableName); + + sql("ALTER TABLE %s DROP COLUMN day_of_ts", tableName); + } + private void assertPartitioningEquals(SparkTable table, int len, String transform) { Assert.assertEquals("spark table partition should be " + len, len, table.partitioning().length); Assert.assertEquals( diff --git a/spark/v3.3/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestAlterTablePartitionFields.java b/spark/v3.3/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestAlterTablePartitionFields.java index 8aee7c97752f..45fb8e0bbaee 100644 --- a/spark/v3.3/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestAlterTablePartitionFields.java +++ b/spark/v3.3/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestAlterTablePartitionFields.java @@ -421,6 +421,31 @@ public void testSparkTableAddDropPartitions() throws Exception { "spark table partition should be empty", 0, sparkTable().partitioning().length); } + @Test + public void testDropColumnOfOldPartitionFieldV1() { + // default table created in v1 format + sql( + "CREATE TABLE %s (id bigint NOT NULL, ts timestamp, day_of_ts date) USING iceberg PARTITIONED BY (day_of_ts)", + tableName); + + sql("ALTER TABLE %s REPLACE PARTITION FIELD day_of_ts WITH days(ts)", tableName); + + sql("ALTER TABLE %s DROP COLUMN day_of_ts", tableName); + } + + @Test + public void testDropColumnOfOldPartitionFieldV2() { + sql( + "CREATE TABLE %s (id bigint NOT NULL, ts timestamp, day_of_ts date) USING iceberg PARTITIONED BY (day_of_ts)", + tableName); + + sql("ALTER TABLE %s SET TBLPROPERTIES ('format-version' = '2');", tableName); + + sql("ALTER TABLE %s REPLACE PARTITION FIELD day_of_ts WITH days(ts)", tableName); + + sql("ALTER TABLE %s DROP COLUMN day_of_ts", tableName); + } + private void assertPartitioningEquals(SparkTable table, int len, String transform) { Assert.assertEquals("spark table partition should be " + len, len, table.partitioning().length); Assert.assertEquals(