From e5ac1412ed7f6749afa4d9dd1ea1d138583c1e33 Mon Sep 17 00:00:00 2001 From: Tomohiro Tanaka Date: Sat, 3 Feb 2024 01:20:52 +0900 Subject: [PATCH 1/4] Migrate SparkExtensions sub-classes to JUnit5 + AssertJ --- .../TestAlterTablePartitionFields.java | 229 +++++++++--------- 1 file changed, 118 insertions(+), 111 deletions(-) diff --git a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestAlterTablePartitionFields.java b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestAlterTablePartitionFields.java index 948fc462de99..a89eeb9de742 100644 --- a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestAlterTablePartitionFields.java +++ b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestAlterTablePartitionFields.java @@ -18,6 +18,11 @@ */ package org.apache.iceberg.spark.extensions; +import static org.assertj.core.api.Assertions.assertThat; + +import org.apache.iceberg.Parameter; +import org.apache.iceberg.ParameterizedTestExtension; +import org.apache.iceberg.Parameters; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Table; import org.apache.iceberg.TableProperties; @@ -27,40 +32,52 @@ import org.apache.spark.sql.connector.catalog.CatalogManager; import org.apache.spark.sql.connector.catalog.Identifier; import org.apache.spark.sql.connector.catalog.TableCatalog; -import org.assertj.core.api.Assertions; -import org.junit.After; -import org.junit.Assert; -import org.junit.Test; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.api.extension.ExtendWith; -public class TestAlterTablePartitionFields extends SparkExtensionsTestBase { +@ExtendWith(ParameterizedTestExtension.class) +public class TestAlterTablePartitionFields extends ExtensionsTestBase { - @Parameterized.Parameters(name = "catalogConfig = {0}, formatVersion = {1}") + @Parameters( + name = + "catalogName = {0}, implementation = {1}, config = {2}, catalogConfig = {3}, formatVersion = {4}") public static Object[][] parameters() { return new Object[][] { - {SparkCatalogConfig.HIVE, 1}, - {SparkCatalogConfig.SPARK, 2} + { + SparkCatalogConfig.HIVE.catalogName(), + SparkCatalogConfig.HIVE.implementation(), + SparkCatalogConfig.HIVE.properties(), + SparkCatalogConfig.HIVE, + 1 + }, + { + SparkCatalogConfig.SPARK.catalogName(), + SparkCatalogConfig.SPARK.implementation(), + SparkCatalogConfig.SPARK.properties(), + SparkCatalogConfig.SPARK, + 2 + } }; } - private final int formatVersion; + @Parameter(index = 3) + SparkCatalogConfig catalogConfig; - public TestAlterTablePartitionFields(SparkCatalogConfig catalogConfig, int formatVersion) { - super(catalogConfig.catalogName(), catalogConfig.implementation(), catalogConfig.properties()); - this.formatVersion = formatVersion; - } + @Parameter(index = 4) + private int formatVersion; - @After + @AfterEach public void removeTable() { sql("DROP TABLE IF EXISTS %s", tableName); } - @Test + @TestTemplate public void testAddIdentityPartition() { createTable("id bigint NOT NULL, category string, ts timestamp, data string"); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unpartitioned", table.spec().isUnpartitioned()); + assertThat(table.spec().isUnpartitioned()).as("Table should start unpartitioned").isTrue(); sql("ALTER TABLE %s ADD PARTITION FIELD category", tableName); @@ -69,15 +86,15 @@ public void testAddIdentityPartition() { PartitionSpec expected = PartitionSpec.builderFor(table.schema()).withSpecId(1).identity("category").build(); - Assert.assertEquals("Should have new spec field", expected, table.spec()); + assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); } - @Test + @TestTemplate public void testAddBucketPartition() { createTable("id bigint NOT NULL, category string, ts timestamp, data string"); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unpartitioned", table.spec().isUnpartitioned()); + assertThat(table.spec().isUnpartitioned()).as("Table should start unpartitioned").isTrue(); sql("ALTER TABLE %s ADD PARTITION FIELD bucket(16, id)", tableName); @@ -89,15 +106,15 @@ public void testAddBucketPartition() { .bucket("id", 16, "id_bucket_16") .build(); - Assert.assertEquals("Should have new spec field", expected, table.spec()); + assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); } - @Test + @TestTemplate public void testAddTruncatePartition() { createTable("id bigint NOT NULL, category string, ts timestamp, data string"); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unpartitioned", table.spec().isUnpartitioned()); + assertThat(table.spec().isUnpartitioned()).as("Table should start unpartitioned").isTrue(); sql("ALTER TABLE %s ADD PARTITION FIELD truncate(data, 4)", tableName); @@ -109,15 +126,15 @@ public void testAddTruncatePartition() { .truncate("data", 4, "data_trunc_4") .build(); - Assert.assertEquals("Should have new spec field", expected, table.spec()); + assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); } - @Test + @TestTemplate public void testAddYearsPartition() { createTable("id bigint NOT NULL, category string, ts timestamp, data string"); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unpartitioned", table.spec().isUnpartitioned()); + assertThat(table.spec().isUnpartitioned()).as("Table should start unpartitioned").isTrue(); sql("ALTER TABLE %s ADD PARTITION FIELD years(ts)", tableName); @@ -126,15 +143,15 @@ public void testAddYearsPartition() { PartitionSpec expected = PartitionSpec.builderFor(table.schema()).withSpecId(1).year("ts").build(); - Assert.assertEquals("Should have new spec field", expected, table.spec()); + assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); } - @Test + @TestTemplate public void testAddMonthsPartition() { createTable("id bigint NOT NULL, category string, ts timestamp, data string"); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unpartitioned", table.spec().isUnpartitioned()); + assertThat(table.spec().isUnpartitioned()).as("Table should start unpartitioned").isTrue(); sql("ALTER TABLE %s ADD PARTITION FIELD months(ts)", tableName); @@ -143,15 +160,15 @@ public void testAddMonthsPartition() { PartitionSpec expected = PartitionSpec.builderFor(table.schema()).withSpecId(1).month("ts").build(); - Assert.assertEquals("Should have new spec field", expected, table.spec()); + assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); } - @Test + @TestTemplate public void testAddDaysPartition() { createTable("id bigint NOT NULL, category string, ts timestamp, data string"); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unpartitioned", table.spec().isUnpartitioned()); + assertThat(table.spec().isUnpartitioned()).as("Table should start unpartitioned").isTrue(); sql("ALTER TABLE %s ADD PARTITION FIELD days(ts)", tableName); @@ -160,15 +177,15 @@ public void testAddDaysPartition() { PartitionSpec expected = PartitionSpec.builderFor(table.schema()).withSpecId(1).day("ts").build(); - Assert.assertEquals("Should have new spec field", expected, table.spec()); + assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); } - @Test + @TestTemplate public void testAddHoursPartition() { createTable("id bigint NOT NULL, category string, ts timestamp, data string"); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unpartitioned", table.spec().isUnpartitioned()); + assertThat(table.spec().isUnpartitioned()).as("Table should start unpartitioned").isTrue(); sql("ALTER TABLE %s ADD PARTITION FIELD hours(ts)", tableName); @@ -177,17 +194,15 @@ public void testAddHoursPartition() { PartitionSpec expected = PartitionSpec.builderFor(table.schema()).withSpecId(1).hour("ts").build(); - Assert.assertEquals("Should have new spec field", expected, table.spec()); + assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); } - @Test + @TestTemplate public void testAddYearPartition() { createTable("id bigint NOT NULL, category string, ts timestamp, data string"); Table table = validationCatalog.loadTable(tableIdent); - Assertions.assertThat(table.spec().isUnpartitioned()) - .as("Table should start unpartitioned") - .isTrue(); + assertThat(table.spec().isUnpartitioned()).as("Table should start unpartitioned").isTrue(); sql("ALTER TABLE %s ADD PARTITION FIELD year(ts)", tableName); @@ -196,17 +211,15 @@ public void testAddYearPartition() { PartitionSpec expected = PartitionSpec.builderFor(table.schema()).withSpecId(1).year("ts").build(); - Assertions.assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); + assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); } - @Test + @TestTemplate public void testAddMonthPartition() { createTable("id bigint NOT NULL, category string, ts timestamp, data string"); Table table = validationCatalog.loadTable(tableIdent); - Assertions.assertThat(table.spec().isUnpartitioned()) - .as("Table should start unpartitioned") - .isTrue(); + assertThat(table.spec().isUnpartitioned()).as("Table should start unpartitioned").isTrue(); sql("ALTER TABLE %s ADD PARTITION FIELD month(ts)", tableName); @@ -215,17 +228,15 @@ public void testAddMonthPartition() { PartitionSpec expected = PartitionSpec.builderFor(table.schema()).withSpecId(1).month("ts").build(); - Assertions.assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); + assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); } - @Test + @TestTemplate public void testAddDayPartition() { createTable("id bigint NOT NULL, category string, ts timestamp, data string"); Table table = validationCatalog.loadTable(tableIdent); - Assertions.assertThat(table.spec().isUnpartitioned()) - .as("Table should start unpartitioned") - .isTrue(); + assertThat(table.spec().isUnpartitioned()).as("Table should start unpartitioned").isTrue(); sql("ALTER TABLE %s ADD PARTITION FIELD day(ts)", tableName); @@ -234,17 +245,15 @@ public void testAddDayPartition() { PartitionSpec expected = PartitionSpec.builderFor(table.schema()).withSpecId(1).day("ts").build(); - Assertions.assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); + assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); } - @Test + @TestTemplate public void testAddHourPartition() { createTable("id bigint NOT NULL, category string, ts timestamp, data string"); Table table = validationCatalog.loadTable(tableIdent); - Assertions.assertThat(table.spec().isUnpartitioned()) - .as("Table should start unpartitioned") - .isTrue(); + assertThat(table.spec().isUnpartitioned()).as("Table should start unpartitioned").isTrue(); sql("ALTER TABLE %s ADD PARTITION FIELD hour(ts)", tableName); @@ -253,15 +262,15 @@ public void testAddHourPartition() { PartitionSpec expected = PartitionSpec.builderFor(table.schema()).withSpecId(1).hour("ts").build(); - Assertions.assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); + assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); } - @Test + @TestTemplate public void testAddNamedPartition() { createTable("id bigint NOT NULL, category string, ts timestamp, data string"); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unpartitioned", table.spec().isUnpartitioned()); + assertThat(table.spec().isUnpartitioned()).as("Table should start unpartitioned").isTrue(); sql("ALTER TABLE %s ADD PARTITION FIELD bucket(16, id) AS shard", tableName); @@ -270,16 +279,15 @@ public void testAddNamedPartition() { PartitionSpec expected = PartitionSpec.builderFor(table.schema()).withSpecId(1).bucket("id", 16, "shard").build(); - Assert.assertEquals("Should have new spec field", expected, table.spec()); + assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); } - @Test + @TestTemplate public void testDropIdentityPartition() { createTable("id bigint NOT NULL, category string, data string", "category"); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertEquals( - "Table should start with 1 partition field", 1, table.spec().fields().size()); + assertThat(table.spec().fields()).as("Table should start with 1 partition field").hasSize(1); sql("ALTER TABLE %s DROP PARTITION FIELD category", tableName); @@ -291,19 +299,18 @@ public void testDropIdentityPartition() { .withSpecId(1) .alwaysNull("category", "category") .build(); - Assert.assertEquals("Should have new spec field", expected, table.spec()); + assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); } else { - Assert.assertTrue("New spec must be unpartitioned", table.spec().isUnpartitioned()); + assertThat(table.spec().isUnpartitioned()).as("New spec must be unpartitioned").isTrue(); } } - @Test + @TestTemplate public void testDropDaysPartition() { createTable("id bigint NOT NULL, ts timestamp, data string", "days(ts)"); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertEquals( - "Table should start with 1 partition field", 1, table.spec().fields().size()); + assertThat(table.spec().fields()).as("Table should start with 1 partition field").hasSize(1); sql("ALTER TABLE %s DROP PARTITION FIELD days(ts)", tableName); @@ -312,19 +319,18 @@ public void testDropDaysPartition() { if (formatVersion == 1) { PartitionSpec expected = PartitionSpec.builderFor(table.schema()).withSpecId(1).alwaysNull("ts", "ts_day").build(); - Assert.assertEquals("Should have new spec field", expected, table.spec()); + assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); } else { - Assert.assertTrue("New spec must be unpartitioned", table.spec().isUnpartitioned()); + assertThat(table.spec().isUnpartitioned()).as("New spec must be unpartitioned").isTrue(); } } - @Test + @TestTemplate public void testDropBucketPartition() { createTable("id bigint NOT NULL, data string", "bucket(16, id)"); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertEquals( - "Table should start with 1 partition field", 1, table.spec().fields().size()); + assertThat(table.spec().fields()).as("Table should start with 1 partition field").hasSize(1); sql("ALTER TABLE %s DROP PARTITION FIELD bucket(16, id)", tableName); @@ -336,24 +342,24 @@ public void testDropBucketPartition() { .withSpecId(1) .alwaysNull("id", "id_bucket") .build(); - Assert.assertEquals("Should have new spec field", expected, table.spec()); + assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); } else { - Assert.assertTrue("New spec must be unpartitioned", table.spec().isUnpartitioned()); + assertThat(table.spec().isUnpartitioned()).as("New spec must be unpartitioned").isTrue(); } } - @Test + @TestTemplate public void testDropPartitionByName() { createTable("id bigint NOT NULL, category string, ts timestamp, data string"); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unpartitioned", table.spec().isUnpartitioned()); + assertThat(table.spec().isUnpartitioned()).as("Table should start unpartitioned").isTrue(); sql("ALTER TABLE %s ADD PARTITION FIELD bucket(16, id) AS shard", tableName); table.refresh(); - Assert.assertEquals("Table should have 1 partition field", 1, table.spec().fields().size()); + assertThat(table.spec().fields()).as("Table should have 1 partition field").hasSize(1); // Should be recognized as iceberg command even with extra white spaces sql("ALTER TABLE %s DROP PARTITION \n FIELD shard", tableName); @@ -363,23 +369,23 @@ public void testDropPartitionByName() { if (formatVersion == 1) { PartitionSpec expected = PartitionSpec.builderFor(table.schema()).withSpecId(2).alwaysNull("id", "shard").build(); - Assert.assertEquals("Should have new spec field", expected, table.spec()); + assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); } else { - Assert.assertTrue("New spec must be unpartitioned", table.spec().isUnpartitioned()); + assertThat(table.spec().isUnpartitioned()).as("New spec must be unpartitioned").isTrue(); } } - @Test + @TestTemplate public void testReplacePartition() { createTable("id bigint NOT NULL, category string, ts timestamp, data string"); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unpartitioned", table.spec().isUnpartitioned()); + assertThat(table.spec().isUnpartitioned()).as("Table should start unpartitioned").isTrue(); sql("ALTER TABLE %s ADD PARTITION FIELD days(ts)", tableName); table.refresh(); PartitionSpec expected = PartitionSpec.builderFor(table.schema()).withSpecId(1).day("ts").build(); - Assert.assertEquals("Should have new spec field", expected, table.spec()); + assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); sql("ALTER TABLE %s REPLACE PARTITION FIELD days(ts) WITH hours(ts)", tableName); table.refresh(); @@ -398,21 +404,22 @@ public void testReplacePartition() { .addField("hour", 3, 1001, "ts_hour") .build(); } - Assert.assertEquals( - "Should changed from daily to hourly partitioned field", expected, table.spec()); + assertThat(table.spec()) + .as("Should changed from daily to hourly partitioned field") + .isEqualTo(expected); } - @Test + @TestTemplate public void testReplacePartitionAndRename() { createTable("id bigint NOT NULL, category string, ts timestamp, data string"); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unpartitioned", table.spec().isUnpartitioned()); + assertThat(table.spec().isUnpartitioned()).as("Table should start unpartitioned").isTrue(); sql("ALTER TABLE %s ADD PARTITION FIELD days(ts)", tableName); table.refresh(); PartitionSpec expected = PartitionSpec.builderFor(table.schema()).withSpecId(1).day("ts").build(); - Assert.assertEquals("Should have new spec field", expected, table.spec()); + assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); sql("ALTER TABLE %s REPLACE PARTITION FIELD days(ts) WITH hours(ts) AS hour_col", tableName); table.refresh(); @@ -431,21 +438,22 @@ public void testReplacePartitionAndRename() { .addField("hour", 3, 1001, "hour_col") .build(); } - Assert.assertEquals( - "Should changed from daily to hourly partitioned field", expected, table.spec()); + assertThat(table.spec()) + .as("Should changed from daily to hourly partitioned field") + .isEqualTo(expected); } - @Test + @TestTemplate public void testReplaceNamedPartition() { createTable("id bigint NOT NULL, category string, ts timestamp, data string"); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unpartitioned", table.spec().isUnpartitioned()); + assertThat(table.spec().isUnpartitioned()).as("Table should start unpartitioned").isTrue(); sql("ALTER TABLE %s ADD PARTITION FIELD days(ts) AS day_col", tableName); table.refresh(); PartitionSpec expected = PartitionSpec.builderFor(table.schema()).withSpecId(1).day("ts", "day_col").build(); - Assert.assertEquals("Should have new spec field", expected, table.spec()); + assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); sql("ALTER TABLE %s REPLACE PARTITION FIELD day_col WITH hours(ts)", tableName); table.refresh(); @@ -464,21 +472,22 @@ public void testReplaceNamedPartition() { .addField("hour", 3, 1001, "ts_hour") .build(); } - Assert.assertEquals( - "Should changed from daily to hourly partitioned field", expected, table.spec()); + assertThat(table.spec()) + .as("Should changed from daily to hourly partitioned field") + .isEqualTo(expected); } - @Test + @TestTemplate public void testReplaceNamedPartitionAndRenameDifferently() { createTable("id bigint NOT NULL, category string, ts timestamp, data string"); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unpartitioned", table.spec().isUnpartitioned()); + assertThat(table.spec().isUnpartitioned()).as("Table should start unpartitioned").isTrue(); sql("ALTER TABLE %s ADD PARTITION FIELD days(ts) AS day_col", tableName); table.refresh(); PartitionSpec expected = PartitionSpec.builderFor(table.schema()).withSpecId(1).day("ts", "day_col").build(); - Assert.assertEquals("Should have new spec field", expected, table.spec()); + assertThat(table.spec()).as("Should have new spec field").isEqualTo(expected); sql("ALTER TABLE %s REPLACE PARTITION FIELD day_col WITH hours(ts) AS hour_col", tableName); table.refresh(); @@ -497,15 +506,15 @@ public void testReplaceNamedPartitionAndRenameDifferently() { .addField("hour", 3, 1001, "hour_col") .build(); } - Assert.assertEquals( - "Should changed from daily to hourly partitioned field", expected, table.spec()); + assertThat(table.spec()) + .as("Should changed from daily to hourly partitioned field") + .isEqualTo(expected); } - @Test + @TestTemplate public void testSparkTableAddDropPartitions() throws Exception { createTable("id bigint NOT NULL, ts timestamp, data string"); - Assert.assertEquals( - "spark table partition should be empty", 0, sparkTable().partitioning().length); + assertThat(sparkTable().partitioning()).as("spark table partition should be empty").hasSize(0); sql("ALTER TABLE %s ADD PARTITION FIELD bucket(16, id) AS shard", tableName); assertPartitioningEquals(sparkTable(), 1, "bucket(16, id)"); @@ -524,11 +533,10 @@ public void testSparkTableAddDropPartitions() throws Exception { sql("ALTER TABLE %s DROP PARTITION FIELD shard", tableName); sql("DESCRIBE %s", tableName); - Assert.assertEquals( - "spark table partition should be empty", 0, sparkTable().partitioning().length); + assertThat(sparkTable().partitioning()).as("spark table partition should be empty").hasSize(0); } - @Test + @TestTemplate public void testDropColumnOfOldPartitionFieldV1() { // default table created in v1 format sql( @@ -540,7 +548,7 @@ public void testDropColumnOfOldPartitionFieldV1() { sql("ALTER TABLE %s DROP COLUMN day_of_ts", tableName); } - @Test + @TestTemplate public void testDropColumnOfOldPartitionFieldV2() { sql( "CREATE TABLE %s (id bigint NOT NULL, ts timestamp, day_of_ts date) USING iceberg PARTITIONED BY (day_of_ts) TBLPROPERTIES('format-version' = '2')", @@ -552,11 +560,10 @@ public void testDropColumnOfOldPartitionFieldV2() { } private void assertPartitioningEquals(SparkTable table, int len, String transform) { - Assert.assertEquals("spark table partition should be " + len, len, table.partitioning().length); - Assert.assertEquals( - "latest spark table partition transform should match", - transform, - table.partitioning()[len - 1].toString()); + assertThat(table.partitioning()).as("spark table partition should be " + len).hasSize(len); + assertThat(table.partitioning()[len - 1].toString()) + .as("latest spark table partition transform should match") + .isEqualTo(transform); } private SparkTable sparkTable() throws Exception { From 1bdcf2cf3b4c2b427985331f97cf5e05d06bea07 Mon Sep 17 00:00:00 2001 From: Tomohiro Tanaka Date: Mon, 5 Feb 2024 19:58:25 +0900 Subject: [PATCH 2/4] Fix parameter initialization and add other tests for DDLs --- .../TestAlterTablePartitionFields.java | 9 +- .../extensions/TestAlterTableSchema.java | 103 +++++------ .../spark/extensions/TestBranchDDL.java | 173 +++++++++--------- 3 files changed, 140 insertions(+), 145 deletions(-) diff --git a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestAlterTablePartitionFields.java b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestAlterTablePartitionFields.java index a89eeb9de742..38e5c942c9ff 100644 --- a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestAlterTablePartitionFields.java +++ b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestAlterTablePartitionFields.java @@ -39,32 +39,25 @@ @ExtendWith(ParameterizedTestExtension.class) public class TestAlterTablePartitionFields extends ExtensionsTestBase { - @Parameters( - name = - "catalogName = {0}, implementation = {1}, config = {2}, catalogConfig = {3}, formatVersion = {4}") + @Parameters(name = "catalogName = {0}, implementation = {1}, config = {2}, formatVersion = {3}") public static Object[][] parameters() { return new Object[][] { { SparkCatalogConfig.HIVE.catalogName(), SparkCatalogConfig.HIVE.implementation(), SparkCatalogConfig.HIVE.properties(), - SparkCatalogConfig.HIVE, 1 }, { SparkCatalogConfig.SPARK.catalogName(), SparkCatalogConfig.SPARK.implementation(), SparkCatalogConfig.SPARK.properties(), - SparkCatalogConfig.SPARK, 2 } }; } @Parameter(index = 3) - SparkCatalogConfig catalogConfig; - - @Parameter(index = 4) private int formatVersion; @AfterEach diff --git a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestAlterTableSchema.java b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestAlterTableSchema.java index 7a6eb9aa387f..91126776528a 100644 --- a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestAlterTableSchema.java +++ b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestAlterTableSchema.java @@ -18,60 +18,59 @@ */ package org.apache.iceberg.spark.extensions; -import java.util.Map; +import static org.assertj.core.api.Assertions.assertThat; + +import org.apache.iceberg.ParameterizedTestExtension; import org.apache.iceberg.Table; import org.apache.iceberg.relocated.com.google.common.collect.Sets; import org.assertj.core.api.Assertions; -import org.junit.After; import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.api.extension.ExtendWith; -public class TestAlterTableSchema extends SparkExtensionsTestBase { - public TestAlterTableSchema( - String catalogName, String implementation, Map config) { - super(catalogName, implementation, config); - } +@ExtendWith(ParameterizedTestExtension.class) +public class TestAlterTableSchema extends ExtensionsTestBase { - @After + @AfterEach public void removeTable() { sql("DROP TABLE IF EXISTS %s", tableName); } - @Test + @TestTemplate public void testSetIdentifierFields() { sql( "CREATE TABLE %s (id bigint NOT NULL, " + "location struct NOT NULL) USING iceberg", tableName); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue( - "Table should start without identifier", table.schema().identifierFieldIds().isEmpty()); + assertThat(table.schema().identifierFieldIds()) + .as("Table should start without identifier") + .isEmpty(); sql("ALTER TABLE %s SET IDENTIFIER FIELDS id", tableName); table.refresh(); - Assert.assertEquals( - "Should have new identifier field", - Sets.newHashSet(table.schema().findField("id").fieldId()), - table.schema().identifierFieldIds()); + assertThat(table.schema().identifierFieldIds()) + .as("Should have new identifier field") + .isEqualTo(Sets.newHashSet(table.schema().findField("id").fieldId())); sql("ALTER TABLE %s SET IDENTIFIER FIELDS id, location.lon", tableName); table.refresh(); - Assert.assertEquals( - "Should have new identifier field", - Sets.newHashSet( - table.schema().findField("id").fieldId(), - table.schema().findField("location.lon").fieldId()), - table.schema().identifierFieldIds()); + assertThat(table.schema().identifierFieldIds()) + .as("Should have new identifier field") + .isEqualTo( + Sets.newHashSet( + table.schema().findField("id").fieldId(), + table.schema().findField("location.lon").fieldId())); sql("ALTER TABLE %s SET IDENTIFIER FIELDS location.lon", tableName); table.refresh(); - Assert.assertEquals( - "Should have new identifier field", - Sets.newHashSet(table.schema().findField("location.lon").fieldId()), - table.schema().identifierFieldIds()); + assertThat(table.schema().identifierFieldIds()) + .as("Should have new identifier field") + .isEqualTo(Sets.newHashSet(table.schema().findField("location.lon").fieldId())); } - @Test + @TestTemplate public void testSetInvalidIdentifierFields() { sql("CREATE TABLE %s (id bigint NOT NULL, id2 bigint) USING iceberg", tableName); Table table = validationCatalog.loadTable(tableIdent); @@ -87,56 +86,58 @@ public void testSetInvalidIdentifierFields() { .hasMessageEndingWith("not a required field"); } - @Test + @TestTemplate public void testDropIdentifierFields() { sql( "CREATE TABLE %s (id bigint NOT NULL, " + "location struct NOT NULL) USING iceberg", tableName); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue( - "Table should start without identifier", table.schema().identifierFieldIds().isEmpty()); + assertThat(table.schema().identifierFieldIds()) + .as("Table should start without identifier") + .isEmpty(); sql("ALTER TABLE %s SET IDENTIFIER FIELDS id, location.lon", tableName); table.refresh(); - Assert.assertEquals( - "Should have new identifier fields", - Sets.newHashSet( - table.schema().findField("id").fieldId(), - table.schema().findField("location.lon").fieldId()), - table.schema().identifierFieldIds()); + assertThat(table.schema().identifierFieldIds()) + .as("Should have new identifier fields") + .isEqualTo( + Sets.newHashSet( + table.schema().findField("id").fieldId(), + table.schema().findField("location.lon").fieldId())); sql("ALTER TABLE %s DROP IDENTIFIER FIELDS id", tableName); table.refresh(); - Assert.assertEquals( - "Should removed identifier field", - Sets.newHashSet(table.schema().findField("location.lon").fieldId()), - table.schema().identifierFieldIds()); + assertThat(table.schema().identifierFieldIds()) + .as("Should removed identifier field") + .isEqualTo(Sets.newHashSet(table.schema().findField("location.lon").fieldId())); sql("ALTER TABLE %s SET IDENTIFIER FIELDS id, location.lon", tableName); table.refresh(); - Assert.assertEquals( - "Should have new identifier fields", - Sets.newHashSet( - table.schema().findField("id").fieldId(), - table.schema().findField("location.lon").fieldId()), - table.schema().identifierFieldIds()); + assertThat(table.schema().identifierFieldIds()) + .as("Should have new identifier fields") + .isEqualTo( + Sets.newHashSet( + table.schema().findField("id").fieldId(), + table.schema().findField("location.lon").fieldId())); sql("ALTER TABLE %s DROP IDENTIFIER FIELDS id, location.lon", tableName); table.refresh(); - Assert.assertEquals( - "Should have no identifier field", Sets.newHashSet(), table.schema().identifierFieldIds()); + assertThat(table.schema().identifierFieldIds()) + .as("Should have no identifier field") + .isEqualTo(Sets.newHashSet()); } - @Test + @TestTemplate public void testDropInvalidIdentifierFields() { sql( "CREATE TABLE %s (id bigint NOT NULL, data string NOT NULL, " + "location struct NOT NULL) USING iceberg", tableName); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue( - "Table should start without identifier", table.schema().identifierFieldIds().isEmpty()); + assertThat(table.schema().identifierFieldIds()) + .as("Table should start without identifier") + .isEmpty(); Assertions.assertThatThrownBy( () -> sql("ALTER TABLE %s DROP IDENTIFIER FIELDS unknown", tableName)) .isInstanceOf(IllegalArgumentException.class) diff --git a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestBranchDDL.java b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestBranchDDL.java index a6bf194b3df5..022edecc31e0 100644 --- a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestBranchDDL.java +++ b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestBranchDDL.java @@ -21,8 +21,9 @@ import static org.assertj.core.api.Assertions.assertThat; import java.util.List; -import java.util.Map; import java.util.concurrent.TimeUnit; +import org.apache.iceberg.ParameterizedTestExtension; +import org.apache.iceberg.Parameters; import org.apache.iceberg.Snapshot; import org.apache.iceberg.SnapshotRef; import org.apache.iceberg.Table; @@ -34,25 +35,25 @@ import org.apache.spark.sql.catalyst.analysis.NoSuchTableException; import org.apache.spark.sql.catalyst.parser.extensions.IcebergParseException; import org.assertj.core.api.Assertions; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.api.extension.ExtendWith; -public class TestBranchDDL extends SparkExtensionsTestBase { +@ExtendWith(ParameterizedTestExtension.class) +public class TestBranchDDL extends ExtensionsTestBase { - @Before - public void before() { + @BeforeEach + public void createTable() { sql("CREATE TABLE %s (id INT, data STRING) USING iceberg", tableName); } - @After + @AfterEach public void removeTable() { sql("DROP TABLE IF EXISTS %s", tableName); } - @Parameterized.Parameters(name = "catalogName = {0}, implementation = {1}, config = {2}") + @Parameters(name = "catalogName = {0}, implementation = {1}, config = {2}") public static Object[][] parameters() { return new Object[][] { { @@ -63,11 +64,7 @@ public static Object[][] parameters() { }; } - public TestBranchDDL(String catalog, String implementation, Map properties) { - super(catalog, implementation, properties); - } - - @Test + @TestTemplate public void testCreateBranch() throws NoSuchTableException { Table table = insertRows(); long snapshotId = table.currentSnapshot().snapshotId(); @@ -80,10 +77,11 @@ public void testCreateBranch() throws NoSuchTableException { tableName, branchName, snapshotId, maxRefAge, minSnapshotsToKeep, maxSnapshotAge); table.refresh(); SnapshotRef ref = table.refs().get(branchName); - Assert.assertEquals(table.currentSnapshot().snapshotId(), ref.snapshotId()); - Assert.assertEquals(minSnapshotsToKeep, ref.minSnapshotsToKeep()); - Assert.assertEquals(TimeUnit.DAYS.toMillis(maxSnapshotAge), ref.maxSnapshotAgeMs().longValue()); - Assert.assertEquals(TimeUnit.DAYS.toMillis(maxRefAge), ref.maxRefAgeMs().longValue()); + assertThat(ref.snapshotId()).isEqualTo(table.currentSnapshot().snapshotId()); + assertThat(ref.minSnapshotsToKeep()).isEqualTo(minSnapshotsToKeep); + assertThat(ref.maxSnapshotAgeMs().longValue()) + .isEqualTo(TimeUnit.DAYS.toMillis(maxSnapshotAge)); + assertThat(ref.maxRefAgeMs().longValue()).isEqualTo(TimeUnit.DAYS.toMillis(maxRefAge)); Assertions.assertThatThrownBy( () -> sql("ALTER TABLE %s CREATE BRANCH %s", tableName, branchName)) @@ -91,43 +89,43 @@ public void testCreateBranch() throws NoSuchTableException { .hasMessage("Ref b1 already exists"); } - @Test + @TestTemplate public void testCreateBranchOnEmptyTable() { String branchName = "b1"; sql("ALTER TABLE %s CREATE BRANCH %s", tableName, "b1"); Table table = validationCatalog.loadTable(tableIdent); SnapshotRef mainRef = table.refs().get(SnapshotRef.MAIN_BRANCH); - Assertions.assertThat(mainRef).isNull(); + assertThat(mainRef).isNull(); SnapshotRef ref = table.refs().get(branchName); - Assertions.assertThat(ref).isNotNull(); - Assertions.assertThat(ref.minSnapshotsToKeep()).isNull(); - Assertions.assertThat(ref.maxSnapshotAgeMs()).isNull(); - Assertions.assertThat(ref.maxRefAgeMs()).isNull(); + assertThat(ref).isNotNull(); + assertThat(ref.minSnapshotsToKeep()).isNull(); + assertThat(ref.maxSnapshotAgeMs()).isNull(); + assertThat(ref.maxRefAgeMs()).isNull(); Snapshot snapshot = table.snapshot(ref.snapshotId()); - Assertions.assertThat(snapshot.parentId()).isNull(); - Assertions.assertThat(snapshot.addedDataFiles(table.io())).isEmpty(); - Assertions.assertThat(snapshot.removedDataFiles(table.io())).isEmpty(); - Assertions.assertThat(snapshot.addedDeleteFiles(table.io())).isEmpty(); - Assertions.assertThat(snapshot.removedDeleteFiles(table.io())).isEmpty(); + assertThat(snapshot.parentId()).isNull(); + assertThat(snapshot.addedDataFiles(table.io())).isEmpty(); + assertThat(snapshot.removedDataFiles(table.io())).isEmpty(); + assertThat(snapshot.addedDeleteFiles(table.io())).isEmpty(); + assertThat(snapshot.removedDeleteFiles(table.io())).isEmpty(); } - @Test + @TestTemplate public void testCreateBranchUseDefaultConfig() throws NoSuchTableException { Table table = insertRows(); String branchName = "b1"; sql("ALTER TABLE %s CREATE BRANCH %s", tableName, branchName); table.refresh(); SnapshotRef ref = table.refs().get(branchName); - Assert.assertEquals(table.currentSnapshot().snapshotId(), ref.snapshotId()); - Assert.assertNull(ref.minSnapshotsToKeep()); - Assert.assertNull(ref.maxSnapshotAgeMs()); - Assert.assertNull(ref.maxRefAgeMs()); + assertThat(ref.snapshotId()).isEqualTo(table.currentSnapshot().snapshotId()); + assertThat(ref.minSnapshotsToKeep()).isNull(); + assertThat(ref.maxSnapshotAgeMs()).isNull(); + assertThat(ref.maxRefAgeMs()).isNull(); } - @Test + @TestTemplate public void testCreateBranchUseCustomMinSnapshotsToKeep() throws NoSuchTableException { Integer minSnapshotsToKeep = 2; Table table = insertRows(); @@ -137,13 +135,13 @@ public void testCreateBranchUseCustomMinSnapshotsToKeep() throws NoSuchTableExce tableName, branchName, minSnapshotsToKeep); table.refresh(); SnapshotRef ref = table.refs().get(branchName); - Assert.assertEquals(table.currentSnapshot().snapshotId(), ref.snapshotId()); - Assert.assertEquals(minSnapshotsToKeep, ref.minSnapshotsToKeep()); - Assert.assertNull(ref.maxSnapshotAgeMs()); - Assert.assertNull(ref.maxRefAgeMs()); + assertThat(ref.snapshotId()).isEqualTo(table.currentSnapshot().snapshotId()); + assertThat(ref.minSnapshotsToKeep()).isEqualTo(minSnapshotsToKeep); + assertThat(ref.maxSnapshotAgeMs()).isNull(); + assertThat(ref.maxRefAgeMs()).isNull(); } - @Test + @TestTemplate public void testCreateBranchUseCustomMaxSnapshotAge() throws NoSuchTableException { long maxSnapshotAge = 2L; Table table = insertRows(); @@ -153,13 +151,14 @@ public void testCreateBranchUseCustomMaxSnapshotAge() throws NoSuchTableExceptio tableName, branchName, maxSnapshotAge); table.refresh(); SnapshotRef ref = table.refs().get(branchName); - Assert.assertNotNull(ref); - Assert.assertNull(ref.minSnapshotsToKeep()); - Assert.assertEquals(TimeUnit.DAYS.toMillis(maxSnapshotAge), ref.maxSnapshotAgeMs().longValue()); - Assert.assertNull(ref.maxRefAgeMs()); + assertThat(ref).isNotNull(); + assertThat(ref.minSnapshotsToKeep()).isNull(); + assertThat(ref.maxSnapshotAgeMs().longValue()) + .isEqualTo(TimeUnit.DAYS.toMillis(maxSnapshotAge)); + assertThat(ref.maxRefAgeMs()).isNull(); } - @Test + @TestTemplate public void testCreateBranchIfNotExists() throws NoSuchTableException { long maxSnapshotAge = 2L; Table table = insertRows(); @@ -171,13 +170,14 @@ public void testCreateBranchIfNotExists() throws NoSuchTableException { table.refresh(); SnapshotRef ref = table.refs().get(branchName); - Assert.assertEquals(table.currentSnapshot().snapshotId(), ref.snapshotId()); - Assert.assertNull(ref.minSnapshotsToKeep()); - Assert.assertEquals(TimeUnit.DAYS.toMillis(maxSnapshotAge), ref.maxSnapshotAgeMs().longValue()); - Assert.assertNull(ref.maxRefAgeMs()); + assertThat(ref.snapshotId()).isEqualTo(table.currentSnapshot().snapshotId()); + assertThat(ref.minSnapshotsToKeep()).isNull(); + assertThat(ref.maxSnapshotAgeMs().longValue()) + .isEqualTo(TimeUnit.DAYS.toMillis(maxSnapshotAge)); + assertThat(ref.maxRefAgeMs()).isNull(); } - @Test + @TestTemplate public void testCreateBranchUseCustomMinSnapshotsToKeepAndMaxSnapshotAge() throws NoSuchTableException { Integer minSnapshotsToKeep = 2; @@ -189,10 +189,11 @@ public void testCreateBranchUseCustomMinSnapshotsToKeepAndMaxSnapshotAge() tableName, branchName, minSnapshotsToKeep, maxSnapshotAge); table.refresh(); SnapshotRef ref = table.refs().get(branchName); - Assert.assertEquals(table.currentSnapshot().snapshotId(), ref.snapshotId()); - Assert.assertEquals(minSnapshotsToKeep, ref.minSnapshotsToKeep()); - Assert.assertEquals(TimeUnit.DAYS.toMillis(maxSnapshotAge), ref.maxSnapshotAgeMs().longValue()); - Assert.assertNull(ref.maxRefAgeMs()); + assertThat(ref.snapshotId()).isEqualTo(table.currentSnapshot().snapshotId()); + assertThat(ref.minSnapshotsToKeep()).isEqualTo(minSnapshotsToKeep); + assertThat(ref.maxSnapshotAgeMs().longValue()) + .isEqualTo(TimeUnit.DAYS.toMillis(maxSnapshotAge)); + assertThat(ref.maxRefAgeMs()).isNull(); Assertions.assertThatThrownBy( () -> @@ -203,7 +204,7 @@ public void testCreateBranchUseCustomMinSnapshotsToKeepAndMaxSnapshotAge() .hasMessageContaining("no viable alternative at input 'WITH SNAPSHOT RETENTION'"); } - @Test + @TestTemplate public void testCreateBranchUseCustomMaxRefAge() throws NoSuchTableException { long maxRefAge = 10L; Table table = insertRows(); @@ -211,10 +212,10 @@ public void testCreateBranchUseCustomMaxRefAge() throws NoSuchTableException { sql("ALTER TABLE %s CREATE BRANCH %s RETAIN %d DAYS", tableName, branchName, maxRefAge); table.refresh(); SnapshotRef ref = table.refs().get(branchName); - Assert.assertEquals(table.currentSnapshot().snapshotId(), ref.snapshotId()); - Assert.assertNull(ref.minSnapshotsToKeep()); - Assert.assertNull(ref.maxSnapshotAgeMs()); - Assert.assertEquals(TimeUnit.DAYS.toMillis(maxRefAge), ref.maxRefAgeMs().longValue()); + assertThat(ref.snapshotId()).isEqualTo(table.currentSnapshot().snapshotId()); + assertThat(ref.minSnapshotsToKeep()).isNull(); + assertThat(ref.maxSnapshotAgeMs()).isNull(); + assertThat(ref.maxRefAgeMs().longValue()).isEqualTo(TimeUnit.DAYS.toMillis(maxRefAge)); Assertions.assertThatThrownBy( () -> sql("ALTER TABLE %s CREATE BRANCH %s RETAIN", tableName, branchName)) @@ -236,7 +237,7 @@ public void testCreateBranchUseCustomMaxRefAge() throws NoSuchTableException { .hasMessageContaining("mismatched input 'SECONDS' expecting {'DAYS', 'HOURS', 'MINUTES'}"); } - @Test + @TestTemplate public void testDropBranch() throws NoSuchTableException { insertRows(); @@ -244,16 +245,16 @@ public void testDropBranch() throws NoSuchTableException { String branchName = "b1"; table.manageSnapshots().createBranch(branchName, table.currentSnapshot().snapshotId()).commit(); SnapshotRef ref = table.refs().get(branchName); - Assert.assertEquals(table.currentSnapshot().snapshotId(), ref.snapshotId()); + assertThat(ref.snapshotId()).isEqualTo(table.currentSnapshot().snapshotId()); sql("ALTER TABLE %s DROP BRANCH %s", tableName, branchName); table.refresh(); ref = table.refs().get(branchName); - Assert.assertNull(ref); + assertThat(ref).isNull(); } - @Test + @TestTemplate public void testDropBranchDoesNotExist() { Assertions.assertThatThrownBy( () -> sql("ALTER TABLE %s DROP BRANCH %s", tableName, "nonExistingBranch")) @@ -261,7 +262,7 @@ public void testDropBranchDoesNotExist() { .hasMessage("Branch does not exist: nonExistingBranch"); } - @Test + @TestTemplate public void testDropBranchFailsForTag() throws NoSuchTableException { String tagName = "b1"; Table table = insertRows(); @@ -272,31 +273,31 @@ public void testDropBranchFailsForTag() throws NoSuchTableException { .hasMessage("Ref b1 is a tag not a branch"); } - @Test + @TestTemplate public void testDropBranchNonConformingName() { Assertions.assertThatThrownBy(() -> sql("ALTER TABLE %s DROP BRANCH %s", tableName, "123")) .isInstanceOf(IcebergParseException.class) .hasMessageContaining("mismatched input '123'"); } - @Test + @TestTemplate public void testDropMainBranchFails() { Assertions.assertThatThrownBy(() -> sql("ALTER TABLE %s DROP BRANCH main", tableName)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot remove main branch"); } - @Test + @TestTemplate public void testDropBranchIfExists() { String branchName = "nonExistingBranch"; Table table = validationCatalog.loadTable(tableIdent); - Assert.assertNull(table.refs().get(branchName)); + assertThat(table.refs().get(branchName)).isNull(); sql("ALTER TABLE %s DROP BRANCH IF EXISTS %s", tableName, branchName); table.refresh(); SnapshotRef ref = table.refs().get(branchName); - Assert.assertNull(ref); + assertThat(ref).isNull(); } private Table insertRows() throws NoSuchTableException { @@ -307,7 +308,7 @@ private Table insertRows() throws NoSuchTableException { return validationCatalog.loadTable(tableIdent); } - @Test + @TestTemplate public void createOrReplace() throws NoSuchTableException { Table table = insertRows(); long first = table.currentSnapshot().snapshotId(); @@ -323,30 +324,30 @@ public void createOrReplace() throws NoSuchTableException { assertThat(table.refs().get(branchName).snapshotId()).isEqualTo(second); } - @Test + @TestTemplate public void testCreateOrReplaceBranchOnEmptyTable() { String branchName = "b1"; sql("ALTER TABLE %s CREATE OR REPLACE BRANCH %s", tableName, "b1"); Table table = validationCatalog.loadTable(tableIdent); SnapshotRef mainRef = table.refs().get(SnapshotRef.MAIN_BRANCH); - Assertions.assertThat(mainRef).isNull(); + assertThat(mainRef).isNull(); SnapshotRef ref = table.refs().get(branchName); - Assertions.assertThat(ref).isNotNull(); - Assertions.assertThat(ref.minSnapshotsToKeep()).isNull(); - Assertions.assertThat(ref.maxSnapshotAgeMs()).isNull(); - Assertions.assertThat(ref.maxRefAgeMs()).isNull(); + assertThat(ref).isNotNull(); + assertThat(ref.minSnapshotsToKeep()).isNull(); + assertThat(ref.maxSnapshotAgeMs()).isNull(); + assertThat(ref.maxRefAgeMs()).isNull(); Snapshot snapshot = table.snapshot(ref.snapshotId()); - Assertions.assertThat(snapshot.parentId()).isNull(); - Assertions.assertThat(snapshot.addedDataFiles(table.io())).isEmpty(); - Assertions.assertThat(snapshot.removedDataFiles(table.io())).isEmpty(); - Assertions.assertThat(snapshot.addedDeleteFiles(table.io())).isEmpty(); - Assertions.assertThat(snapshot.removedDeleteFiles(table.io())).isEmpty(); + assertThat(snapshot.parentId()).isNull(); + assertThat(snapshot.addedDataFiles(table.io())).isEmpty(); + assertThat(snapshot.removedDataFiles(table.io())).isEmpty(); + assertThat(snapshot.addedDeleteFiles(table.io())).isEmpty(); + assertThat(snapshot.removedDeleteFiles(table.io())).isEmpty(); } - @Test + @TestTemplate public void createOrReplaceWithNonExistingBranch() throws NoSuchTableException { Table table = insertRows(); String branchName = "b1"; @@ -360,7 +361,7 @@ public void createOrReplaceWithNonExistingBranch() throws NoSuchTableException { assertThat(table.refs().get(branchName).snapshotId()).isEqualTo(snapshotId); } - @Test + @TestTemplate public void replaceBranch() throws NoSuchTableException { Table table = insertRows(); long first = table.currentSnapshot().snapshotId(); @@ -382,7 +383,7 @@ public void replaceBranch() throws NoSuchTableException { assertThat(ref.maxRefAgeMs()).isEqualTo(expectedMaxRefAgeMs); } - @Test + @TestTemplate public void replaceBranchDoesNotExist() throws NoSuchTableException { Table table = insertRows(); From c42c235a3c5c09691d48899364f30e4245fd3bdf Mon Sep 17 00:00:00 2001 From: Tomohiro Tanaka Date: Tue, 6 Feb 2024 13:10:20 +0900 Subject: [PATCH 3/4] Add more 4 migrations for DDL extensions to JUnit 5 --- .../spark/extensions/TestReplaceBranch.java | 92 +++++------ .../TestRequiredDistributionAndOrdering.java | 39 +++-- .../TestSetWriteDistributionAndOrdering.java | 117 +++++++------- .../iceberg/spark/extensions/TestTagDDL.java | 152 +++++++++--------- 4 files changed, 194 insertions(+), 206 deletions(-) diff --git a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestReplaceBranch.java b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestReplaceBranch.java index eb167ed25be4..0899cd2870c3 100644 --- a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestReplaceBranch.java +++ b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestReplaceBranch.java @@ -18,9 +18,12 @@ */ package org.apache.iceberg.spark.extensions; +import static org.assertj.core.api.Assertions.assertThat; + import java.util.List; -import java.util.Map; import java.util.concurrent.TimeUnit; +import org.apache.iceberg.ParameterizedTestExtension; +import org.apache.iceberg.Parameters; import org.apache.iceberg.SnapshotRef; import org.apache.iceberg.Table; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; @@ -30,16 +33,16 @@ import org.apache.spark.sql.Row; import org.apache.spark.sql.catalyst.analysis.NoSuchTableException; import org.assertj.core.api.Assertions; -import org.junit.After; -import org.junit.Assert; -import org.junit.Test; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.api.extension.ExtendWith; -public class TestReplaceBranch extends SparkExtensionsTestBase { +@ExtendWith(ParameterizedTestExtension.class) +public class TestReplaceBranch extends ExtensionsTestBase { private static final String[] TIME_UNITS = {"DAYS", "HOURS", "MINUTES"}; - @Parameterized.Parameters(name = "catalogName = {0}, implementation = {1}, config = {2}") + @Parameters(name = "catalogName = {0}, implementation = {1}, config = {2}") public static Object[][] parameters() { return new Object[][] { { @@ -50,16 +53,12 @@ public static Object[][] parameters() { }; } - public TestReplaceBranch(String catalogName, String implementation, Map config) { - super(catalogName, implementation, config); - } - - @After + @AfterEach public void removeTable() { sql("DROP TABLE IF EXISTS %s", tableName); } - @Test + @TestTemplate public void testReplaceBranchFailsForTag() throws NoSuchTableException { sql("CREATE TABLE %s (id INT, data STRING) USING iceberg", tableName); String tagName = "tag1"; @@ -83,7 +82,7 @@ public void testReplaceBranchFailsForTag() throws NoSuchTableException { .hasMessage("Ref tag1 is a tag not a branch"); } - @Test + @TestTemplate public void testReplaceBranch() throws NoSuchTableException { sql("CREATE TABLE %s (id INT, data STRING) USING iceberg", tableName); List records = @@ -112,14 +111,14 @@ public void testReplaceBranch() throws NoSuchTableException { table.refresh(); SnapshotRef ref = table.refs().get(branchName); - Assert.assertNotNull(ref); - Assert.assertEquals(second, ref.snapshotId()); - Assert.assertEquals(expectedMinSnapshotsToKeep, ref.minSnapshotsToKeep().intValue()); - Assert.assertEquals(expectedMaxSnapshotAgeMs, ref.maxSnapshotAgeMs().longValue()); - Assert.assertEquals(expectedMaxRefAgeMs, ref.maxRefAgeMs().longValue()); + assertThat(ref).isNotNull(); + assertThat(ref.snapshotId()).isEqualTo(second); + assertThat(ref.minSnapshotsToKeep().intValue()).isEqualTo(expectedMinSnapshotsToKeep); + assertThat(ref.maxSnapshotAgeMs().longValue()).isEqualTo(expectedMaxSnapshotAgeMs); + assertThat(ref.maxRefAgeMs().longValue()).isEqualTo(expectedMaxRefAgeMs); } - @Test + @TestTemplate public void testReplaceBranchDoesNotExist() throws NoSuchTableException { sql("CREATE TABLE %s (id INT, data STRING) USING iceberg", tableName); List records = @@ -137,7 +136,7 @@ public void testReplaceBranchDoesNotExist() throws NoSuchTableException { .hasMessage("Branch does not exist: someBranch"); } - @Test + @TestTemplate public void testReplaceBranchWithRetain() throws NoSuchTableException { sql("CREATE TABLE %s (id INT, data STRING) USING iceberg", tableName); List records = @@ -149,9 +148,6 @@ public void testReplaceBranchWithRetain() throws NoSuchTableException { long first = table.currentSnapshot().snapshotId(); String branchName = "b1"; table.manageSnapshots().createBranch(branchName, first).commit(); - SnapshotRef b1 = table.refs().get(branchName); - Integer minSnapshotsToKeep = b1.minSnapshotsToKeep(); - Long maxSnapshotAgeMs = b1.maxSnapshotAgeMs(); df.writeTo(tableName).append(); long second = table.currentSnapshot().snapshotId(); @@ -163,16 +159,16 @@ public void testReplaceBranchWithRetain() throws NoSuchTableException { table.refresh(); SnapshotRef ref = table.refs().get(branchName); - Assert.assertNotNull(ref); - Assert.assertEquals(second, ref.snapshotId()); - Assert.assertEquals(minSnapshotsToKeep, ref.minSnapshotsToKeep()); - Assert.assertEquals(maxSnapshotAgeMs, ref.maxSnapshotAgeMs()); - Assert.assertEquals( - TimeUnit.valueOf(timeUnit).toMillis(maxRefAge), ref.maxRefAgeMs().longValue()); + assertThat(ref).isNotNull(); + assertThat(ref.snapshotId()).isEqualTo(second); + assertThat(ref.minSnapshotsToKeep()).isNull(); + assertThat(ref.maxSnapshotAgeMs()).isNull(); + assertThat(ref.maxRefAgeMs().longValue()) + .isEqualTo(TimeUnit.valueOf(timeUnit).toMillis(maxRefAge)); } } - @Test + @TestTemplate public void testReplaceBranchWithSnapshotRetention() throws NoSuchTableException { sql("CREATE TABLE %s (id INT, data STRING) USING iceberg", tableName); List records = @@ -196,16 +192,16 @@ public void testReplaceBranchWithSnapshotRetention() throws NoSuchTableException table.refresh(); SnapshotRef ref = table.refs().get(branchName); - Assert.assertNotNull(ref); - Assert.assertEquals(second, ref.snapshotId()); - Assert.assertEquals(minSnapshotsToKeep, ref.minSnapshotsToKeep()); - Assert.assertEquals( - TimeUnit.valueOf(timeUnit).toMillis(maxSnapshotAge), ref.maxSnapshotAgeMs().longValue()); - Assert.assertEquals(maxRefAgeMs, ref.maxRefAgeMs()); + assertThat(ref).isNotNull(); + assertThat(ref.snapshotId()).isEqualTo(second); + assertThat(ref.minSnapshotsToKeep()).isEqualTo(minSnapshotsToKeep); + assertThat(ref.maxSnapshotAgeMs().longValue()) + .isEqualTo(TimeUnit.valueOf(timeUnit).toMillis(maxSnapshotAge)); + assertThat(ref.maxRefAgeMs()).isEqualTo(maxRefAgeMs); } } - @Test + @TestTemplate public void testReplaceBranchWithRetainAndSnapshotRetention() throws NoSuchTableException { sql("CREATE TABLE %s (id INT, data STRING) USING iceberg", tableName); List records = @@ -237,17 +233,17 @@ public void testReplaceBranchWithRetainAndSnapshotRetention() throws NoSuchTable table.refresh(); SnapshotRef ref = table.refs().get(branchName); - Assert.assertNotNull(ref); - Assert.assertEquals(second, ref.snapshotId()); - Assert.assertEquals(minSnapshotsToKeep, ref.minSnapshotsToKeep()); - Assert.assertEquals( - TimeUnit.valueOf(timeUnit).toMillis(maxSnapshotAge), ref.maxSnapshotAgeMs().longValue()); - Assert.assertEquals( - TimeUnit.valueOf(timeUnit).toMillis(maxRefAge), ref.maxRefAgeMs().longValue()); + assertThat(ref).isNotNull(); + assertThat(ref.snapshotId()).isEqualTo(second); + assertThat(ref.minSnapshotsToKeep()).isEqualTo(minSnapshotsToKeep); + assertThat(ref.maxSnapshotAgeMs().longValue()) + .isEqualTo(TimeUnit.valueOf(timeUnit).toMillis(maxSnapshotAge)); + assertThat(ref.maxRefAgeMs().longValue()) + .isEqualTo(TimeUnit.valueOf(timeUnit).toMillis(maxRefAge)); } } - @Test + @TestTemplate public void testCreateOrReplace() throws NoSuchTableException { sql("CREATE TABLE %s (id INT, data STRING) USING iceberg", tableName); List records = @@ -268,7 +264,7 @@ public void testCreateOrReplace() throws NoSuchTableException { table.refresh(); SnapshotRef ref = table.refs().get(branchName); - Assert.assertNotNull(ref); - Assert.assertEquals(first, ref.snapshotId()); + assertThat(ref).isNotNull(); + assertThat(ref.snapshotId()).isEqualTo(first); } } diff --git a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestRequiredDistributionAndOrdering.java b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestRequiredDistributionAndOrdering.java index 4c678ce9b767..43368ab20e0a 100644 --- a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestRequiredDistributionAndOrdering.java +++ b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestRequiredDistributionAndOrdering.java @@ -20,7 +20,7 @@ import java.math.BigDecimal; import java.util.List; -import java.util.Map; +import org.apache.iceberg.ParameterizedTestExtension; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; import org.apache.iceberg.spark.SparkWriteOptions; import org.apache.iceberg.spark.source.ThreeColumnRecord; @@ -28,22 +28,19 @@ import org.apache.spark.sql.Row; import org.apache.spark.sql.catalyst.analysis.NoSuchTableException; import org.assertj.core.api.Assertions; -import org.junit.After; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.api.extension.ExtendWith; -public class TestRequiredDistributionAndOrdering extends SparkExtensionsTestBase { +@ExtendWith(ParameterizedTestExtension.class) +public class TestRequiredDistributionAndOrdering extends ExtensionsTestBase { - public TestRequiredDistributionAndOrdering( - String catalogName, String implementation, Map config) { - super(catalogName, implementation, config); - } - - @After + @AfterEach public void dropTestTable() { sql("DROP TABLE IF EXISTS %s", tableName); } - @Test + @TestTemplate public void testDefaultLocalSortWithBucketTransforms() throws NoSuchTableException { sql( "CREATE TABLE %s (c1 INT, c2 STRING, c3 STRING) " @@ -72,7 +69,7 @@ public void testDefaultLocalSortWithBucketTransforms() throws NoSuchTableExcepti sql("SELECT count(*) FROM %s", tableName)); } - @Test + @TestTemplate public void testPartitionColumnsArePrependedForRangeDistribution() throws NoSuchTableException { sql( "CREATE TABLE %s (c1 INT, c2 STRING, c3 STRING) " @@ -103,7 +100,7 @@ public void testPartitionColumnsArePrependedForRangeDistribution() throws NoSuch sql("SELECT count(*) FROM %s", tableName)); } - @Test + @TestTemplate public void testSortOrderIncludesPartitionColumns() throws NoSuchTableException { sql( "CREATE TABLE %s (c1 INT, c2 STRING, c3 STRING) " @@ -134,7 +131,7 @@ public void testSortOrderIncludesPartitionColumns() throws NoSuchTableException sql("SELECT count(*) FROM %s", tableName)); } - @Test + @TestTemplate public void testHashDistributionOnBucketedColumn() throws NoSuchTableException { sql( "CREATE TABLE %s (c1 INT, c2 STRING, c3 STRING) " @@ -165,7 +162,7 @@ public void testHashDistributionOnBucketedColumn() throws NoSuchTableException { sql("SELECT count(*) FROM %s", tableName)); } - @Test + @TestTemplate public void testDisabledDistributionAndOrdering() { sql( "CREATE TABLE %s (c1 INT, c2 STRING, c3 STRING) " @@ -200,7 +197,7 @@ public void testDisabledDistributionAndOrdering() { + "and by partition within each spec. Either cluster the incoming records or switch to fanout writers."); } - @Test + @TestTemplate public void testDefaultSortOnDecimalBucketedColumn() { sql( "CREATE TABLE %s (c1 INT, c2 DECIMAL(20, 2)) " @@ -219,7 +216,7 @@ public void testDefaultSortOnDecimalBucketedColumn() { assertEquals("Rows must match", expected, sql("SELECT * FROM %s ORDER BY c1", tableName)); } - @Test + @TestTemplate public void testDefaultSortOnStringBucketedColumn() { sql( "CREATE TABLE %s (c1 INT, c2 STRING) " @@ -234,7 +231,7 @@ public void testDefaultSortOnStringBucketedColumn() { assertEquals("Rows must match", expected, sql("SELECT * FROM %s ORDER BY c1", tableName)); } - @Test + @TestTemplate public void testDefaultSortOnBinaryBucketedColumn() { sql( "CREATE TABLE %s (c1 INT, c2 Binary) " @@ -251,7 +248,7 @@ public void testDefaultSortOnBinaryBucketedColumn() { assertEquals("Rows must match", expected, sql("SELECT * FROM %s ORDER BY c1", tableName)); } - @Test + @TestTemplate public void testDefaultSortOnDecimalTruncatedColumn() { sql( "CREATE TABLE %s (c1 INT, c2 DECIMAL(20, 2)) " @@ -267,7 +264,7 @@ public void testDefaultSortOnDecimalTruncatedColumn() { assertEquals("Rows must match", expected, sql("SELECT * FROM %s ORDER BY c1", tableName)); } - @Test + @TestTemplate public void testDefaultSortOnLongTruncatedColumn() { sql( "CREATE TABLE %s (c1 INT, c2 BIGINT) " @@ -282,7 +279,7 @@ public void testDefaultSortOnLongTruncatedColumn() { assertEquals("Rows must match", expected, sql("SELECT * FROM %s ORDER BY c1", tableName)); } - @Test + @TestTemplate public void testRangeDistributionWithQuotedColumnNames() throws NoSuchTableException { sql( "CREATE TABLE %s (`c.1` INT, c2 STRING, c3 STRING) " diff --git a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestSetWriteDistributionAndOrdering.java b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestSetWriteDistributionAndOrdering.java index caa9752004c2..696503ce6bd4 100644 --- a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestSetWriteDistributionAndOrdering.java +++ b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestSetWriteDistributionAndOrdering.java @@ -19,31 +19,30 @@ package org.apache.iceberg.spark.extensions; import static org.apache.iceberg.expressions.Expressions.bucket; +import static org.assertj.core.api.Assertions.assertThat; -import java.util.Map; import org.apache.iceberg.NullOrder; +import org.apache.iceberg.ParameterizedTestExtension; import org.apache.iceberg.SortOrder; import org.apache.iceberg.Table; import org.apache.iceberg.TableProperties; import org.apache.iceberg.exceptions.ValidationException; import org.apache.spark.sql.internal.SQLConf; import org.assertj.core.api.Assertions; -import org.junit.After; import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.api.extension.ExtendWith; -public class TestSetWriteDistributionAndOrdering extends SparkExtensionsTestBase { - public TestSetWriteDistributionAndOrdering( - String catalogName, String implementation, Map config) { - super(catalogName, implementation, config); - } +@ExtendWith(ParameterizedTestExtension.class) +public class TestSetWriteDistributionAndOrdering extends ExtensionsTestBase { - @After + @AfterEach public void removeTable() { sql("DROP TABLE IF EXISTS %s", tableName); } - @Test + @TestTemplate public void testSetWriteOrderByColumn() { sql( "CREATE TABLE %s (id bigint NOT NULL, category string, ts timestamp, data string) USING iceberg", @@ -56,7 +55,7 @@ public void testSetWriteOrderByColumn() { table.refresh(); String distributionMode = table.properties().get(TableProperties.WRITE_DISTRIBUTION_MODE); - Assert.assertEquals("Distribution mode must match", "range", distributionMode); + assertThat(distributionMode).as("Distribution mode must match").isEqualTo("range"); SortOrder expected = SortOrder.builderFor(table.schema()) @@ -64,16 +63,16 @@ public void testSetWriteOrderByColumn() { .asc("category", NullOrder.NULLS_FIRST) .asc("id", NullOrder.NULLS_FIRST) .build(); - Assert.assertEquals("Should have expected order", expected, table.sortOrder()); + assertThat(table.sortOrder()).as("Should have expected order").isEqualTo(expected); } - @Test + @TestTemplate public void testSetWriteOrderWithCaseSensitiveColumnNames() { sql( "CREATE TABLE %s (Id bigint NOT NULL, Category string, ts timestamp, data string) USING iceberg", tableName); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unsorted", table.sortOrder().isUnsorted()); + assertThat(table.sortOrder().isUnsorted()).as("Table should start unsorted").isTrue(); sql("SET %s=true", SQLConf.CASE_SENSITIVE().key()); Assertions.assertThatThrownBy( () -> { @@ -87,23 +86,23 @@ public void testSetWriteOrderWithCaseSensitiveColumnNames() { table = validationCatalog.loadTable(tableIdent); SortOrder expected = SortOrder.builderFor(table.schema()).withOrderId(1).asc("Category").asc("Id").build(); - Assert.assertEquals("Should have expected order", expected, table.sortOrder()); + assertThat(table.sortOrder()).as("Should have expected order").isEqualTo(expected); } - @Test + @TestTemplate public void testSetWriteOrderByColumnWithDirection() { sql( "CREATE TABLE %s (id bigint NOT NULL, category string, ts timestamp, data string) USING iceberg", tableName); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unsorted", table.sortOrder().isUnsorted()); + assertThat(table.sortOrder().isUnsorted()).as("Table should start unsorted").isTrue(); sql("ALTER TABLE %s WRITE ORDERED BY category ASC, id DESC", tableName); table.refresh(); String distributionMode = table.properties().get(TableProperties.WRITE_DISTRIBUTION_MODE); - Assert.assertEquals("Distribution mode must match", "range", distributionMode); + assertThat(distributionMode).as("Distribution mode must match").isEqualTo("range"); SortOrder expected = SortOrder.builderFor(table.schema()) @@ -111,23 +110,23 @@ public void testSetWriteOrderByColumnWithDirection() { .asc("category", NullOrder.NULLS_FIRST) .desc("id", NullOrder.NULLS_LAST) .build(); - Assert.assertEquals("Should have expected order", expected, table.sortOrder()); + assertThat(table.sortOrder()).as("Should have expected order").isEqualTo(expected); } - @Test + @TestTemplate public void testSetWriteOrderByColumnWithDirectionAndNullOrder() { sql( "CREATE TABLE %s (id bigint NOT NULL, category string, ts timestamp, data string) USING iceberg", tableName); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unsorted", table.sortOrder().isUnsorted()); + assertThat(table.sortOrder().isUnsorted()).as("Table should start unsorted").isTrue(); sql("ALTER TABLE %s WRITE ORDERED BY category ASC NULLS LAST, id DESC NULLS FIRST", tableName); table.refresh(); String distributionMode = table.properties().get(TableProperties.WRITE_DISTRIBUTION_MODE); - Assert.assertEquals("Distribution mode must match", "range", distributionMode); + assertThat(distributionMode).as("Distribution mode must match").isEqualTo("range"); SortOrder expected = SortOrder.builderFor(table.schema()) @@ -135,10 +134,10 @@ public void testSetWriteOrderByColumnWithDirectionAndNullOrder() { .asc("category", NullOrder.NULLS_LAST) .desc("id", NullOrder.NULLS_FIRST) .build(); - Assert.assertEquals("Should have expected order", expected, table.sortOrder()); + assertThat(table.sortOrder()).as("Should have expected order").isEqualTo(expected); } - @Test + @TestTemplate public void testSetWriteOrderByTransform() { sql( "CREATE TABLE %s (id bigint NOT NULL, category string, ts timestamp, data string) USING iceberg", @@ -151,7 +150,7 @@ public void testSetWriteOrderByTransform() { table.refresh(); String distributionMode = table.properties().get(TableProperties.WRITE_DISTRIBUTION_MODE); - Assert.assertEquals("Distribution mode must match", "range", distributionMode); + assertThat(distributionMode).as("Distribution mode must match").isEqualTo("range"); SortOrder expected = SortOrder.builderFor(table.schema()) @@ -160,50 +159,50 @@ public void testSetWriteOrderByTransform() { .asc(bucket("id", 16)) .asc("id") .build(); - Assert.assertEquals("Should have expected order", expected, table.sortOrder()); + assertThat(table.sortOrder()).as("Should have expected order").isEqualTo(expected); } - @Test + @TestTemplate public void testSetWriteUnordered() { sql( "CREATE TABLE %s (id bigint NOT NULL, category string, ts timestamp, data string) USING iceberg", tableName); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unsorted", table.sortOrder().isUnsorted()); + assertThat(table.sortOrder().isUnsorted()).as("Table should start unsorted").isTrue(); sql("ALTER TABLE %s WRITE ORDERED BY category DESC, bucket(16, id), id", tableName); table.refresh(); String distributionMode = table.properties().get(TableProperties.WRITE_DISTRIBUTION_MODE); - Assert.assertEquals("Distribution mode must match", "range", distributionMode); + assertThat(distributionMode).as("Distribution mode must match").isEqualTo("range"); - Assert.assertNotEquals("Table must be sorted", SortOrder.unsorted(), table.sortOrder()); + assertThat(table.sortOrder()).as("Table must be sorted").isNotEqualTo(SortOrder.unsorted()); sql("ALTER TABLE %s WRITE UNORDERED", tableName); table.refresh(); String newDistributionMode = table.properties().get(TableProperties.WRITE_DISTRIBUTION_MODE); - Assert.assertEquals("New distribution mode must match", "none", newDistributionMode); + assertThat(newDistributionMode).as("New distribution mode must match").isEqualTo("none"); - Assert.assertEquals("New sort order must match", SortOrder.unsorted(), table.sortOrder()); + assertThat(table.sortOrder()).as("New sort order must match").isEqualTo(SortOrder.unsorted()); } - @Test + @TestTemplate public void testSetWriteLocallyOrdered() { sql( "CREATE TABLE %s (id bigint NOT NULL, category string, ts timestamp, data string) USING iceberg", tableName); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unsorted", table.sortOrder().isUnsorted()); + assertThat(table.sortOrder().isUnsorted()).as("Table should start unsorted").isTrue(); sql("ALTER TABLE %s WRITE LOCALLY ORDERED BY category DESC, bucket(16, id), id", tableName); table.refresh(); String distributionMode = table.properties().get(TableProperties.WRITE_DISTRIBUTION_MODE); - Assert.assertEquals("Distribution mode must match", "none", distributionMode); + assertThat(distributionMode).as("Distribution mode must match").isEqualTo("none"); SortOrder expected = SortOrder.builderFor(table.schema()) @@ -212,29 +211,29 @@ public void testSetWriteLocallyOrdered() { .asc(bucket("id", 16)) .asc("id") .build(); - Assert.assertEquals("Sort order must match", expected, table.sortOrder()); + assertThat(table.sortOrder()).as("Sort order must match").isEqualTo(expected); } - @Test + @TestTemplate public void testSetWriteDistributedByWithSort() { sql( "CREATE TABLE %s (id bigint NOT NULL, category string) USING iceberg PARTITIONED BY (category)", tableName); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unsorted", table.sortOrder().isUnsorted()); + assertThat(table.sortOrder().isUnsorted()).as("Table should start unsorted").isTrue(); sql("ALTER TABLE %s WRITE DISTRIBUTED BY PARTITION ORDERED BY id", tableName); table.refresh(); String distributionMode = table.properties().get(TableProperties.WRITE_DISTRIBUTION_MODE); - Assert.assertEquals("Distribution mode must match", "hash", distributionMode); + assertThat(distributionMode).as("Distribution mode must match").isEqualTo("hash"); SortOrder expected = SortOrder.builderFor(table.schema()).withOrderId(1).asc("id").build(); - Assert.assertEquals("Sort order must match", expected, table.sortOrder()); + assertThat(table.sortOrder()).as("Sort order must match").isEqualTo(expected); } - @Test + @TestTemplate public void testSetWriteDistributedByWithLocalSort() { sql( "CREATE TABLE %s (id bigint NOT NULL, category string) USING iceberg PARTITIONED BY (category)", @@ -247,82 +246,82 @@ public void testSetWriteDistributedByWithLocalSort() { table.refresh(); String distributionMode = table.properties().get(TableProperties.WRITE_DISTRIBUTION_MODE); - Assert.assertEquals("Distribution mode must match", "hash", distributionMode); + assertThat(distributionMode).as("Distribution mode must match").isEqualTo("hash"); SortOrder expected = SortOrder.builderFor(table.schema()).withOrderId(1).asc("id").build(); - Assert.assertEquals("Sort order must match", expected, table.sortOrder()); + assertThat(table.sortOrder()).as("Sort order must match").isEqualTo(expected); } - @Test + @TestTemplate public void testSetWriteDistributedByAndUnordered() { sql( "CREATE TABLE %s (id bigint NOT NULL, category string) USING iceberg PARTITIONED BY (category)", tableName); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unsorted", table.sortOrder().isUnsorted()); + assertThat(table.sortOrder().isUnsorted()).as("Table should start unsorted").isTrue(); sql("ALTER TABLE %s WRITE DISTRIBUTED BY PARTITION UNORDERED", tableName); table.refresh(); String distributionMode = table.properties().get(TableProperties.WRITE_DISTRIBUTION_MODE); - Assert.assertEquals("Distribution mode must match", "hash", distributionMode); + assertThat(distributionMode).as("Distribution mode must match").isEqualTo("hash"); - Assert.assertEquals("Sort order must match", SortOrder.unsorted(), table.sortOrder()); + assertThat(table.sortOrder()).as("Sort order must match").isEqualTo(SortOrder.unsorted()); } - @Test + @TestTemplate public void testSetWriteDistributedByOnly() { sql( "CREATE TABLE %s (id bigint NOT NULL, category string) USING iceberg PARTITIONED BY (category)", tableName); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unsorted", table.sortOrder().isUnsorted()); + assertThat(table.sortOrder().isUnsorted()).as("Table should start unsorted").isTrue(); sql("ALTER TABLE %s WRITE DISTRIBUTED BY PARTITION UNORDERED", tableName); table.refresh(); String distributionMode = table.properties().get(TableProperties.WRITE_DISTRIBUTION_MODE); - Assert.assertEquals("Distribution mode must match", "hash", distributionMode); + assertThat(distributionMode).as("Distribution mode must match").isEqualTo("hash"); - Assert.assertEquals("Sort order must match", SortOrder.unsorted(), table.sortOrder()); + assertThat(table.sortOrder()).as("Sort order must match").isEqualTo(SortOrder.unsorted()); } - @Test + @TestTemplate public void testSetWriteDistributedAndUnorderedInverted() { sql( "CREATE TABLE %s (id bigint NOT NULL, category string) USING iceberg PARTITIONED BY (category)", tableName); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unsorted", table.sortOrder().isUnsorted()); + assertThat(table.sortOrder().isUnsorted()).as("Table should start unsorted").isTrue(); sql("ALTER TABLE %s WRITE UNORDERED DISTRIBUTED BY PARTITION", tableName); table.refresh(); String distributionMode = table.properties().get(TableProperties.WRITE_DISTRIBUTION_MODE); - Assert.assertEquals("Distribution mode must match", "hash", distributionMode); + assertThat(distributionMode).as("Distribution mode must match").isEqualTo("hash"); - Assert.assertEquals("Sort order must match", SortOrder.unsorted(), table.sortOrder()); + assertThat(table.sortOrder()).as("Sort order must match").isEqualTo(SortOrder.unsorted()); } - @Test + @TestTemplate public void testSetWriteDistributedAndLocallyOrderedInverted() { sql( "CREATE TABLE %s (id bigint NOT NULL, category string) USING iceberg PARTITIONED BY (category)", tableName); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unsorted", table.sortOrder().isUnsorted()); + assertThat(table.sortOrder().isUnsorted()).as("Table should start unsorted").isTrue(); sql("ALTER TABLE %s WRITE ORDERED BY id DISTRIBUTED BY PARTITION", tableName); table.refresh(); String distributionMode = table.properties().get(TableProperties.WRITE_DISTRIBUTION_MODE); - Assert.assertEquals("Distribution mode must match", "hash", distributionMode); + assertThat(distributionMode).as("Distribution mode must match").isEqualTo("hash"); SortOrder expected = SortOrder.builderFor(table.schema()).withOrderId(1).asc("id").build(); - Assert.assertEquals("Sort order must match", expected, table.sortOrder()); + assertThat(table.sortOrder()).as("Sort order must match").isEqualTo(expected); } } diff --git a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestTagDDL.java b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestTagDDL.java index 52b9134089fb..7f33d302fbf8 100644 --- a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestTagDDL.java +++ b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestTagDDL.java @@ -22,8 +22,9 @@ import java.util.List; import java.util.Locale; -import java.util.Map; import java.util.concurrent.TimeUnit; +import org.apache.iceberg.ParameterizedTestExtension; +import org.apache.iceberg.Parameters; import org.apache.iceberg.SnapshotRef; import org.apache.iceberg.Table; import org.apache.iceberg.exceptions.ValidationException; @@ -35,16 +36,16 @@ import org.apache.spark.sql.catalyst.analysis.NoSuchTableException; import org.apache.spark.sql.catalyst.parser.extensions.IcebergParseException; import org.assertj.core.api.Assertions; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.api.extension.ExtendWith; -public class TestTagDDL extends SparkExtensionsTestBase { +@ExtendWith(ParameterizedTestExtension.class) +public class TestTagDDL extends ExtensionsTestBase { private static final String[] TIME_UNITS = {"DAYS", "HOURS", "MINUTES"}; - @Parameterized.Parameters(name = "catalogName = {0}, implementation = {1}, config = {2}") + @Parameters(name = "catalogName = {0}, implementation = {1}, config = {2}") public static Object[][] parameters() { return new Object[][] { { @@ -55,21 +56,17 @@ public static Object[][] parameters() { }; } - public TestTagDDL(String catalogName, String implementation, Map config) { - super(catalogName, implementation, config); - } - - @Before - public void before() { + @BeforeEach + public void createTable() { sql("CREATE TABLE %s (id INT, data STRING) USING iceberg", tableName); } - @After + @AfterEach public void removeTable() { sql("DROP TABLE IF EXISTS %s", tableName); } - @Test + @TestTemplate public void testCreateTagWithRetain() throws NoSuchTableException { Table table = insertRows(); long firstSnapshotId = table.currentSnapshot().snapshotId(); @@ -87,12 +84,12 @@ public void testCreateTagWithRetain() throws NoSuchTableException { tableName, tagName, firstSnapshotId, maxRefAge, timeUnit); table.refresh(); SnapshotRef ref = table.refs().get(tagName); - Assert.assertEquals( - "The tag needs to point to a specific snapshot id.", firstSnapshotId, ref.snapshotId()); - Assert.assertEquals( - "The tag needs to have the correct max ref age.", - TimeUnit.valueOf(timeUnit.toUpperCase(Locale.ENGLISH)).toMillis(maxRefAge), - ref.maxRefAgeMs().longValue()); + assertThat(ref.snapshotId()) + .as("The tag needs to point to a specific snapshot id.") + .isEqualTo(firstSnapshotId); + assertThat(ref.maxRefAgeMs().longValue()) + .as("The tag needs to have the correct max ref age.") + .isEqualTo(TimeUnit.valueOf(timeUnit.toUpperCase(Locale.ENGLISH)).toMillis(maxRefAge)); } String tagName = "t1"; @@ -118,7 +115,7 @@ public void testCreateTagWithRetain() throws NoSuchTableException { .hasMessageContaining("mismatched input 'SECONDS' expecting {'DAYS', 'HOURS', 'MINUTES'}"); } - @Test + @TestTemplate public void testCreateTagOnEmptyTable() { Assertions.assertThatThrownBy(() -> sql("ALTER TABLE %s CREATE TAG %s", tableName, "abc")) .isInstanceOf(IllegalArgumentException.class) @@ -127,7 +124,7 @@ public void testCreateTagOnEmptyTable() { tableName); } - @Test + @TestTemplate public void testCreateTagUseDefaultConfig() throws NoSuchTableException { Table table = insertRows(); long snapshotId = table.currentSnapshot().snapshotId(); @@ -141,10 +138,12 @@ public void testCreateTagUseDefaultConfig() throws NoSuchTableException { sql("ALTER TABLE %s CREATE TAG %s", tableName, tagName); table.refresh(); SnapshotRef ref = table.refs().get(tagName); - Assert.assertEquals( - "The tag needs to point to a specific snapshot id.", snapshotId, ref.snapshotId()); - Assert.assertNull( - "The tag needs to have the default max ref age, which is null.", ref.maxRefAgeMs()); + assertThat(ref.snapshotId()) + .as("The tag needs to point to a specific snapshot id.") + .isEqualTo(snapshotId); + assertThat(ref.maxRefAgeMs()) + .as("The tag needs to have the default max ref age, which is null.") + .isNull(); Assertions.assertThatThrownBy(() -> sql("ALTER TABLE %s CREATE TAG %s", tableName, tagName)) .isInstanceOf(IllegalArgumentException.class) @@ -163,13 +162,15 @@ public void testCreateTagUseDefaultConfig() throws NoSuchTableException { sql("ALTER TABLE %s CREATE TAG %s AS OF VERSION %d", tableName, tagName, snapshotId); table.refresh(); ref = table.refs().get(tagName); - Assert.assertEquals( - "The tag needs to point to a specific snapshot id.", snapshotId, ref.snapshotId()); - Assert.assertNull( - "The tag needs to have the default max ref age, which is null.", ref.maxRefAgeMs()); + assertThat(ref.snapshotId()) + .as("The tag needs to point to a specific snapshot id.") + .isEqualTo(snapshotId); + assertThat(ref.maxRefAgeMs()) + .as("The tag needs to have the default max ref age, which is null.") + .isNull(); } - @Test + @TestTemplate public void testCreateTagIfNotExists() throws NoSuchTableException { long maxSnapshotAge = 2L; Table table = insertRows(); @@ -179,17 +180,15 @@ public void testCreateTagIfNotExists() throws NoSuchTableException { table.refresh(); SnapshotRef ref = table.refs().get(tagName); - Assert.assertEquals( - "The tag needs to point to a specific snapshot id.", - table.currentSnapshot().snapshotId(), - ref.snapshotId()); - Assert.assertEquals( - "The tag needs to have the correct max ref age.", - TimeUnit.DAYS.toMillis(maxSnapshotAge), - ref.maxRefAgeMs().longValue()); + assertThat(ref.snapshotId()) + .as("The tag needs to point to a specific snapshot id.") + .isEqualTo(table.currentSnapshot().snapshotId()); + assertThat(ref.maxRefAgeMs().longValue()) + .as("The tag needs to have the correct max ref age.") + .isEqualTo(TimeUnit.DAYS.toMillis(maxSnapshotAge)); } - @Test + @TestTemplate public void testReplaceTagFailsForBranch() throws NoSuchTableException { String branchName = "branch1"; Table table = insertRows(); @@ -204,7 +203,7 @@ public void testReplaceTagFailsForBranch() throws NoSuchTableException { .hasMessageContaining("Ref branch1 is a branch not a tag"); } - @Test + @TestTemplate public void testReplaceTag() throws NoSuchTableException { Table table = insertRows(); long first = table.currentSnapshot().snapshotId(); @@ -222,15 +221,15 @@ public void testReplaceTag() throws NoSuchTableException { sql("ALTER TABLE %s REPLACE Tag %s AS OF VERSION %d", tableName, tagName, second); table.refresh(); SnapshotRef ref = table.refs().get(tagName); - Assert.assertEquals( - "The tag needs to point to a specific snapshot id.", second, ref.snapshotId()); - Assert.assertEquals( - "The tag needs to have the correct max ref age.", - expectedMaxRefAgeMs, - ref.maxRefAgeMs().longValue()); + assertThat(ref.snapshotId()) + .as("The tag needs to point to a specific snapshot id.") + .isEqualTo(second); + assertThat(ref.maxRefAgeMs().longValue()) + .as("The tag needs to have the correct max ref age.") + .isEqualTo(expectedMaxRefAgeMs); } - @Test + @TestTemplate public void testReplaceTagDoesNotExist() throws NoSuchTableException { Table table = insertRows(); @@ -243,7 +242,7 @@ public void testReplaceTagDoesNotExist() throws NoSuchTableException { .hasMessageContaining("Tag does not exist"); } - @Test + @TestTemplate public void testReplaceTagWithRetain() throws NoSuchTableException { Table table = insertRows(); long first = table.currentSnapshot().snapshotId(); @@ -260,16 +259,16 @@ public void testReplaceTagWithRetain() throws NoSuchTableException { table.refresh(); SnapshotRef ref = table.refs().get(tagName); - Assert.assertEquals( - "The tag needs to point to a specific snapshot id.", second, ref.snapshotId()); - Assert.assertEquals( - "The tag needs to have the correct max ref age.", - TimeUnit.valueOf(timeUnit).toMillis(maxRefAge), - ref.maxRefAgeMs().longValue()); + assertThat(ref.snapshotId()) + .as("The tag needs to point to a specific snapshot id.") + .isEqualTo(second); + assertThat(ref.maxRefAgeMs().longValue()) + .as("The tag needs to have the correct max ref age.") + .isEqualTo(TimeUnit.valueOf(timeUnit).toMillis(maxRefAge)); } } - @Test + @TestTemplate public void testCreateOrReplace() throws NoSuchTableException { Table table = insertRows(); long first = table.currentSnapshot().snapshotId(); @@ -281,36 +280,34 @@ public void testCreateOrReplace() throws NoSuchTableException { sql("ALTER TABLE %s CREATE OR REPLACE TAG %s AS OF VERSION %d", tableName, tagName, first); table.refresh(); SnapshotRef ref = table.refs().get(tagName); - Assert.assertEquals( - "The tag needs to point to a specific snapshot id.", first, ref.snapshotId()); + assertThat(ref.snapshotId()) + .as("The tag needs to point to a specific snapshot id.") + .isEqualTo(first); } - @Test + @TestTemplate public void testDropTag() throws NoSuchTableException { insertRows(); Table table = validationCatalog.loadTable(tableIdent); String tagName = "t1"; table.manageSnapshots().createTag(tagName, table.currentSnapshot().snapshotId()).commit(); SnapshotRef ref = table.refs().get(tagName); - Assert.assertEquals( - "The tag needs to point to a specific snapshot id.", - table.currentSnapshot().snapshotId(), - ref.snapshotId()); + assertThat(ref.snapshotId()).as("").isEqualTo(table.currentSnapshot().snapshotId()); sql("ALTER TABLE %s DROP TAG %s", tableName, tagName); table.refresh(); ref = table.refs().get(tagName); - Assert.assertNull("The tag needs to be dropped.", ref); + assertThat(ref).as("The tag needs to be dropped.").isNull(); } - @Test + @TestTemplate public void testDropTagNonConformingName() { Assertions.assertThatThrownBy(() -> sql("ALTER TABLE %s DROP TAG %s", tableName, "123")) .isInstanceOf(IcebergParseException.class) .hasMessageContaining("mismatched input '123'"); } - @Test + @TestTemplate public void testDropTagDoesNotExist() { Assertions.assertThatThrownBy( () -> sql("ALTER TABLE %s DROP TAG %s", tableName, "nonExistingTag")) @@ -318,7 +315,7 @@ public void testDropTagDoesNotExist() { .hasMessageContaining("Tag does not exist: nonExistingTag"); } - @Test + @TestTemplate public void testDropTagFailesForBranch() throws NoSuchTableException { String branchName = "b1"; Table table = insertRows(); @@ -329,28 +326,27 @@ public void testDropTagFailesForBranch() throws NoSuchTableException { .hasMessageContaining("Ref b1 is a branch not a tag"); } - @Test + @TestTemplate public void testDropTagIfExists() throws NoSuchTableException { String tagName = "nonExistingTag"; Table table = insertRows(); - Assert.assertNull("The tag does not exists.", table.refs().get(tagName)); + assertThat(table.refs().get(tagName)).as("The tag does not exists.").isNull(); sql("ALTER TABLE %s DROP TAG IF EXISTS %s", tableName, tagName); table.refresh(); - Assert.assertNull("The tag still does not exist.", table.refs().get(tagName)); + assertThat(table.refs().get(tagName)).as("The tag still does not exist.").isNull(); table.manageSnapshots().createTag(tagName, table.currentSnapshot().snapshotId()).commit(); - Assert.assertEquals( - "The tag has been created successfully.", - table.currentSnapshot().snapshotId(), - table.refs().get(tagName).snapshotId()); + assertThat(table.refs().get(tagName).snapshotId()) + .as("The tag has been created successfully.") + .isEqualTo(table.currentSnapshot().snapshotId()); sql("ALTER TABLE %s DROP TAG IF EXISTS %s", tableName, tagName); table.refresh(); - Assert.assertNull("The tag needs to be dropped.", table.refs().get(tagName)); + assertThat(table.refs().get(tagName)).as("The tag needs to be dropped.").isNull(); } - @Test + @TestTemplate public void createOrReplaceWithNonExistingTag() throws NoSuchTableException { Table table = insertRows(); String tagName = "t1"; From 5e735a60d2d0deac69cbd3ce383cf0effe19077e Mon Sep 17 00:00:00 2001 From: Tomohiro Tanaka Date: Tue, 6 Feb 2024 18:50:23 +0900 Subject: [PATCH 4/4] Update assert and relevant tests to assertJ --- .../extensions/TestSetWriteDistributionAndOrdering.java | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestSetWriteDistributionAndOrdering.java b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestSetWriteDistributionAndOrdering.java index 696503ce6bd4..ce49a1630201 100644 --- a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestSetWriteDistributionAndOrdering.java +++ b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestSetWriteDistributionAndOrdering.java @@ -29,7 +29,6 @@ import org.apache.iceberg.exceptions.ValidationException; import org.apache.spark.sql.internal.SQLConf; import org.assertj.core.api.Assertions; -import org.junit.Assert; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.TestTemplate; import org.junit.jupiter.api.extension.ExtendWith; @@ -48,7 +47,7 @@ public void testSetWriteOrderByColumn() { "CREATE TABLE %s (id bigint NOT NULL, category string, ts timestamp, data string) USING iceberg", tableName); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unsorted", table.sortOrder().isUnsorted()); + assertThat(table.sortOrder().isUnsorted()).as("Table should start unsorted").isTrue(); sql("ALTER TABLE %s WRITE ORDERED BY category, id", tableName); @@ -143,7 +142,7 @@ public void testSetWriteOrderByTransform() { "CREATE TABLE %s (id bigint NOT NULL, category string, ts timestamp, data string) USING iceberg", tableName); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unsorted", table.sortOrder().isUnsorted()); + assertThat(table.sortOrder().isUnsorted()).isTrue(); sql("ALTER TABLE %s WRITE ORDERED BY category DESC, bucket(16, id), id", tableName); @@ -239,7 +238,7 @@ public void testSetWriteDistributedByWithLocalSort() { "CREATE TABLE %s (id bigint NOT NULL, category string) USING iceberg PARTITIONED BY (category)", tableName); Table table = validationCatalog.loadTable(tableIdent); - Assert.assertTrue("Table should start unsorted", table.sortOrder().isUnsorted()); + assertThat(table.sortOrder().isUnsorted()).as("Table should start unsorted").isTrue(); sql("ALTER TABLE %s WRITE DISTRIBUTED BY PARTITION LOCALLY ORDERED BY id", tableName);