diff --git a/build.gradle b/build.gradle index 1a0803394d50..8b858755b283 100644 --- a/build.gradle +++ b/build.gradle @@ -731,6 +731,9 @@ project(':iceberg-orc') { } project(':iceberg-parquet') { + test { + useJUnitPlatform() + } dependencies { implementation project(path: ':iceberg-bundled-guava', configuration: 'shadow') api project(':iceberg-api') diff --git a/parquet/src/test/java/org/apache/iceberg/TestHelpers.java b/parquet/src/test/java/org/apache/iceberg/TestHelpers.java index be6ebe93d59e..0e7627cab1f5 100644 --- a/parquet/src/test/java/org/apache/iceberg/TestHelpers.java +++ b/parquet/src/test/java/org/apache/iceberg/TestHelpers.java @@ -18,11 +18,12 @@ */ package org.apache.iceberg; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + import java.util.concurrent.Callable; import org.apache.avro.AvroRuntimeException; import org.apache.avro.generic.GenericRecord; import org.assertj.core.api.AbstractThrowableAssert; -import org.assertj.core.api.Assertions; public class TestHelpers { @@ -42,7 +43,7 @@ public static void assertThrows( String containedInMessage, Callable callable) { AbstractThrowableAssert check = - Assertions.assertThatThrownBy(callable::call).as(message).isInstanceOf(expected); + assertThatThrownBy(callable::call).as(message).isInstanceOf(expected); if (null != containedInMessage) { check.hasMessageContaining(containedInMessage); } @@ -62,7 +63,7 @@ public static void assertThrows( String containedInMessage, Runnable runnable) { AbstractThrowableAssert check = - Assertions.assertThatThrownBy(runnable::run).as(message).isInstanceOf(expected); + assertThatThrownBy(runnable::run).as(message).isInstanceOf(expected); if (null != containedInMessage) { check.hasMessageContaining(containedInMessage); } diff --git a/parquet/src/test/java/org/apache/iceberg/avro/TestParquetReadProjection.java b/parquet/src/test/java/org/apache/iceberg/avro/TestParquetReadProjection.java index 13bf1a37a119..2df806b1fb9a 100644 --- a/parquet/src/test/java/org/apache/iceberg/avro/TestParquetReadProjection.java +++ b/parquet/src/test/java/org/apache/iceberg/avro/TestParquetReadProjection.java @@ -32,7 +32,7 @@ public class TestParquetReadProjection extends TestReadProjection { protected GenericData.Record writeAndRead( String desc, Schema writeSchema, Schema readSchema, GenericData.Record record) throws IOException { - File file = temp.newFile(desc + ".parquet"); + File file = temp.resolve(desc + ".parquet").toFile(); file.delete(); try (FileAppender appender = diff --git a/parquet/src/test/java/org/apache/iceberg/avro/TestReadProjection.java b/parquet/src/test/java/org/apache/iceberg/avro/TestReadProjection.java index 851cf5f7eef9..93c6ad05379e 100644 --- a/parquet/src/test/java/org/apache/iceberg/avro/TestReadProjection.java +++ b/parquet/src/test/java/org/apache/iceberg/avro/TestReadProjection.java @@ -18,7 +18,12 @@ */ package org.apache.iceberg.avro; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.assertj.core.api.Assertions.within; + import java.io.IOException; +import java.nio.file.Path; import java.util.List; import java.util.Map; import org.apache.avro.generic.GenericData.Record; @@ -27,20 +32,15 @@ import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; import org.apache.iceberg.relocated.com.google.common.collect.Maps; -import org.apache.iceberg.relocated.com.google.common.collect.Sets; -import org.apache.iceberg.types.Comparators; import org.apache.iceberg.types.Types; -import org.assertj.core.api.Assertions; -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; public abstract class TestReadProjection { protected abstract Record writeAndRead( String desc, Schema writeSchema, Schema readSchema, Record record) throws IOException; - @Rule public TemporaryFolder temp = new TemporaryFolder(); + @TempDir protected Path temp; @Test public void testFullProjection() throws Exception { @@ -55,10 +55,11 @@ public void testFullProjection() throws Exception { Record projected = writeAndRead("full_projection", schema, schema, record); - Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); + assertThat((long) projected.get("id")).as("Should contain the correct id value").isEqualTo(34L); - int cmp = Comparators.charSequences().compare("test", (CharSequence) projected.get("data")); - Assert.assertTrue("Should contain the correct data value", cmp == 0); + assertThat(projected.get("data").toString()) + .as("Should contain the correct data value") + .isEqualTo("test"); } @Test @@ -79,8 +80,10 @@ public void testReorderedFullProjection() throws Exception { Record projected = writeAndRead("full_projection", schema, reordered, record); - Assert.assertEquals("Should contain the correct 0 value", "test", projected.get(0).toString()); - Assert.assertEquals("Should contain the correct 1 value", 34L, projected.get(1)); + assertThat(projected.get(0).toString()) + .as("Should contain the correct 0 value") + .isEqualTo("test"); + assertThat(projected.get(1)).as("Should contain the correct 1 value").isEqualTo(34L); } @Test @@ -102,9 +105,11 @@ public void testReorderedProjection() throws Exception { Record projected = writeAndRead("full_projection", schema, reordered, record); - Assert.assertNull("Should contain the correct 0 value", projected.get(0)); - Assert.assertEquals("Should contain the correct 1 value", "test", projected.get(1).toString()); - Assert.assertNull("Should contain the correct 2 value", projected.get(2)); + assertThat(projected.get(0)).as("Should contain the correct 0 value").isNull(); + assertThat(projected.get(1).toString()) + .as("Should contain the correct 1 value") + .isEqualTo("test"); + assertThat(projected.get(2)).as("Should contain the correct 2 value").isNull(); } @Test @@ -120,10 +125,9 @@ public void testEmptyProjection() throws Exception { Record projected = writeAndRead("empty_projection", schema, schema.select(), record); - Assert.assertNotNull("Should read a non-null record", projected); + assertThat(projected).as("Should read a non-null record").isNotNull(); // this is expected because there are no values - Assertions.assertThatThrownBy(() -> projected.get(0)) - .isInstanceOf(ArrayIndexOutOfBoundsException.class); + assertThatThrownBy(() -> projected.get(0)).isInstanceOf(ArrayIndexOutOfBoundsException.class); } @Test @@ -141,15 +145,16 @@ public void testBasicProjection() throws Exception { Record projected = writeAndRead("basic_projection_id", writeSchema, idOnly, record); TestHelpers.assertEmptyAvroField(projected, "data"); - Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); + assertThat((long) projected.get("id")).as("Should contain the correct id value").isEqualTo(34L); Schema dataOnly = new Schema(Types.NestedField.optional(1, "data", Types.StringType.get())); projected = writeAndRead("basic_projection_data", writeSchema, dataOnly, record); TestHelpers.assertEmptyAvroField(projected, "id"); - int cmp = Comparators.charSequences().compare("test", (CharSequence) projected.get("data")); - Assert.assertEquals("Should contain the correct data value", 0, cmp); + assertThat(projected.get("data").toString()) + .as("Should contain the correct data value") + .isEqualTo("test"); } @Test @@ -170,9 +175,11 @@ public void testRename() throws Exception { Record projected = writeAndRead("project_and_rename", writeSchema, readSchema, record); - Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); - int cmp = Comparators.charSequences().compare("test", (CharSequence) projected.get("renamed")); - Assert.assertTrue("Should contain the correct data/renamed value", cmp == 0); + assertThat((long) projected.get("id")).as("Should contain the correct id value").isEqualTo(34L); + + assertThat(projected.get("renamed").toString()) + .as("Should contain the correct data/renamed value") + .isEqualTo("test"); } @Test @@ -199,7 +206,7 @@ public void testNestedStructProjection() throws Exception { Record projected = writeAndRead("id_only", writeSchema, idOnly, record); TestHelpers.assertEmptyAvroField(projected, "location"); - Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); + assertThat((long) projected.get("id")).as("Should contain the correct id value").isEqualTo(34L); Schema latOnly = new Schema( @@ -211,10 +218,11 @@ public void testNestedStructProjection() throws Exception { projected = writeAndRead("latitude_only", writeSchema, latOnly, record); Record projectedLocation = (Record) projected.get("location"); TestHelpers.assertEmptyAvroField(projected, "id"); - Assert.assertNotNull("Should project location", projected.get("location")); + assertThat(projected.get("location")).as("Should project location").isNotNull(); TestHelpers.assertEmptyAvroField(projectedLocation, "long"); - Assert.assertEquals( - "Should project latitude", 52.995143f, (float) projectedLocation.get("lat"), 0.000001f); + assertThat((float) projectedLocation.get("lat")) + .as("Should project latitude") + .isCloseTo(52.995143f, within(0.000001f)); Schema longOnly = new Schema( @@ -226,20 +234,24 @@ public void testNestedStructProjection() throws Exception { projected = writeAndRead("longitude_only", writeSchema, longOnly, record); projectedLocation = (Record) projected.get("location"); TestHelpers.assertEmptyAvroField(projected, "id"); - Assert.assertNotNull("Should project location", projected.get("location")); + assertThat(projected.get("location")).as("Should project location").isNotNull(); TestHelpers.assertEmptyAvroField(projectedLocation, "lat"); - Assert.assertEquals( - "Should project longitude", -1.539054f, (float) projectedLocation.get("long"), 0.000001f); + assertThat((float) projectedLocation.get("long")) + .as("Should project longitude") + .isCloseTo(-1.539054f, within(0.000001f)); Schema locationOnly = writeSchema.select("location"); projected = writeAndRead("location_only", writeSchema, locationOnly, record); projectedLocation = (Record) projected.get("location"); TestHelpers.assertEmptyAvroField(projected, "id"); - Assert.assertNotNull("Should project location", projected.get("location")); - Assert.assertEquals( - "Should project latitude", 52.995143f, (float) projectedLocation.get("lat"), 0.000001f); - Assert.assertEquals( - "Should project longitude", -1.539054f, (float) projectedLocation.get("long"), 0.000001f); + assertThat(projected.get("location")).as("Should project location").isNotNull(); + assertThat((float) projectedLocation.get("lat")) + .as("Should project latitude") + .isCloseTo(52.995143f, within(0.000001f)); + + assertThat((float) projectedLocation.get("long")) + .as("Should project longitude") + .isCloseTo(-1.539054f, within(0.000001f)); } @Test @@ -261,26 +273,29 @@ public void testMapProjection() throws IOException { Schema idOnly = new Schema(Types.NestedField.required(0, "id", Types.LongType.get())); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); - Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); + assertThat((long) projected.get("id")).as("Should contain the correct id value").isEqualTo(34L); TestHelpers.assertEmptyAvroField(projected, "properties"); Schema keyOnly = writeSchema.select("properties.key"); projected = writeAndRead("key_only", writeSchema, keyOnly, record); TestHelpers.assertEmptyAvroField(projected, "id"); - Assert.assertEquals( - "Should project entire map", properties, toStringMap((Map) projected.get("properties"))); + assertThat(toStringMap((Map) projected.get("properties"))) + .as("Should project entire map") + .isEqualTo(properties); Schema valueOnly = writeSchema.select("properties.value"); projected = writeAndRead("value_only", writeSchema, valueOnly, record); TestHelpers.assertEmptyAvroField(projected, "id"); - Assert.assertEquals( - "Should project entire map", properties, toStringMap((Map) projected.get("properties"))); + assertThat(toStringMap((Map) projected.get("properties"))) + .as("Should project entire map") + .isEqualTo(properties); Schema mapOnly = writeSchema.select("properties"); projected = writeAndRead("map_only", writeSchema, mapOnly, record); TestHelpers.assertEmptyAvroField(projected, "id"); - Assert.assertEquals( - "Should project entire map", properties, toStringMap((Map) projected.get("properties"))); + assertThat(toStringMap((Map) projected.get("properties"))) + .as("Should project entire map") + .isEqualTo(properties); } private Map toStringMap(Map map) { @@ -328,50 +343,54 @@ public void testMapOfStructsProjection() throws IOException { Schema idOnly = new Schema(Types.NestedField.required(0, "id", Types.LongType.get())); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); - Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); + assertThat((long) projected.get("id")).as("Should contain the correct id value").isEqualTo(34L); TestHelpers.assertEmptyAvroField(projected, "locations"); projected = writeAndRead("all_locations", writeSchema, writeSchema.select("locations"), record); TestHelpers.assertEmptyAvroField(projected, "id"); - Assert.assertEquals( - "Should project locations map", - record.get("locations"), - toStringMap((Map) projected.get("locations"))); + assertThat(toStringMap((Map) projected.get("locations"))) + .as("Should project locations map") + .isEqualTo(record.get("locations")); projected = writeAndRead("lat_only", writeSchema, writeSchema.select("locations.lat"), record); TestHelpers.assertEmptyAvroField(projected, "id"); Map locations = toStringMap((Map) projected.get("locations")); - Assert.assertNotNull("Should project locations map", locations); - Assert.assertEquals( - "Should contain L1 and L2", Sets.newHashSet("L1", "L2"), locations.keySet()); + assertThat(locations).as("Should project locations map").isNotNull(); + assertThat(locations.keySet()).as("Should contain L1 and L2").containsExactly("L1", "L2"); Record projectedL1 = (Record) locations.get("L1"); - Assert.assertNotNull("L1 should not be null", projectedL1); - Assert.assertEquals( - "L1 should contain lat", 53.992811f, (float) projectedL1.get("lat"), 0.000001); + assertThat(projectedL1).as("L1 should not be null").isNotNull(); + assertThat((float) projectedL1.get("lat")) + .as("L1 should contain lat") + .isCloseTo(53.992811f, within(0.000001f)); + TestHelpers.assertEmptyAvroField(projectedL1, "long"); Record projectedL2 = (Record) locations.get("L2"); - Assert.assertNotNull("L2 should not be null", projectedL2); - Assert.assertEquals( - "L2 should contain lat", 52.995143f, (float) projectedL2.get("lat"), 0.000001); + assertThat(projectedL2).as("L2 should not be null").isNotNull(); + assertThat((float) projectedL2.get("lat")) + .as("L2 should contain lat") + .isCloseTo(52.995143f, within(0.000001f)); + TestHelpers.assertEmptyAvroField(projectedL2, "long"); projected = writeAndRead("long_only", writeSchema, writeSchema.select("locations.long"), record); TestHelpers.assertEmptyAvroField(projected, "id"); locations = toStringMap((Map) projected.get("locations")); - Assert.assertNotNull("Should project locations map", locations); - Assert.assertEquals( - "Should contain L1 and L2", Sets.newHashSet("L1", "L2"), locations.keySet()); + assertThat(locations).as("Should project locations map").isNotNull(); + assertThat(locations.keySet()).as("Should contain L1 and L2").containsExactly("L1", "L2"); projectedL1 = (Record) locations.get("L1"); - Assert.assertNotNull("L1 should not be null", projectedL1); + assertThat(projectedL1).as("L1 should not be null").isNotNull(); TestHelpers.assertEmptyAvroField(projectedL1, "lat"); - Assert.assertEquals( - "L1 should contain long", -1.542616f, (float) projectedL1.get("long"), 0.000001); + assertThat((float) projectedL1.get("long")) + .as("L1 should contain long") + .isCloseTo(-1.542616f, within(0.000001f)); + projectedL2 = (Record) locations.get("L2"); - Assert.assertNotNull("L2 should not be null", projectedL2); + assertThat(projectedL2).as("L2 should not be null").isNotNull(); TestHelpers.assertEmptyAvroField(projectedL2, "lat"); - Assert.assertEquals( - "L2 should contain long", -1.539054f, (float) projectedL2.get("long"), 0.000001); + assertThat((float) projectedL2.get("long")) + .as("L2 should contain long") + .isCloseTo(-1.539054f, within(0.000001f)); Schema latitiudeRenamed = new Schema( @@ -388,19 +407,21 @@ public void testMapOfStructsProjection() throws IOException { projected = writeAndRead("latitude_renamed", writeSchema, latitiudeRenamed, record); TestHelpers.assertEmptyAvroField(projected, "id"); locations = toStringMap((Map) projected.get("locations")); - Assert.assertNotNull("Should project locations map", locations); - Assert.assertEquals( - "Should contain L1 and L2", Sets.newHashSet("L1", "L2"), locations.keySet()); + assertThat(locations).as("Should project locations map").isNotNull(); + assertThat(locations.keySet()).as("Should contain L1 and L2").containsExactly("L1", "L2"); projectedL1 = (Record) locations.get("L1"); - Assert.assertNotNull("L1 should not be null", projectedL1); - Assert.assertEquals( - "L1 should contain latitude", 53.992811f, (float) projectedL1.get("latitude"), 0.000001); + assertThat(projectedL1).as("L1 should not be null").isNotNull(); + assertThat((float) projectedL1.get("latitude")) + .as("L1 should contain latitude") + .isCloseTo(53.992811f, within(0.000001f)); TestHelpers.assertEmptyAvroField(projectedL1, "lat"); TestHelpers.assertEmptyAvroField(projectedL1, "long"); projectedL2 = (Record) locations.get("L2"); - Assert.assertNotNull("L2 should not be null", projectedL2); - Assert.assertEquals( - "L2 should contain latitude", 52.995143f, (float) projectedL2.get("latitude"), 0.000001); + assertThat(projectedL2).as("L2 should not be null").isNotNull(); + assertThat((float) projectedL2.get("latitude")) + .as("L2 should contain latitude") + .isCloseTo(52.995143f, within(0.000001f)); + TestHelpers.assertEmptyAvroField(projectedL2, "lat"); TestHelpers.assertEmptyAvroField(projectedL2, "long"); } @@ -422,18 +443,18 @@ public void testListProjection() throws IOException { Schema idOnly = new Schema(Types.NestedField.required(0, "id", Types.LongType.get())); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); - Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); + assertThat((long) projected.get("id")).as("Should contain the correct id value").isEqualTo(34L); TestHelpers.assertEmptyAvroField(projected, "values"); Schema elementOnly = writeSchema.select("values.element"); projected = writeAndRead("element_only", writeSchema, elementOnly, record); TestHelpers.assertEmptyAvroField(projected, "id"); - Assert.assertEquals("Should project entire list", values, projected.get("values")); + assertThat(projected.get("values")).as("Should project entire list").isEqualTo(values); Schema listOnly = writeSchema.select("values"); projected = writeAndRead("list_only", writeSchema, listOnly, record); TestHelpers.assertEmptyAvroField(projected, "id"); - Assert.assertEquals("Should project entire list", values, projected.get("values")); + assertThat(projected.get("values")).as("Should project entire list").isEqualTo(values); } @Test @@ -468,37 +489,38 @@ public void testListOfStructsProjection() throws IOException { Schema idOnly = new Schema(Types.NestedField.required(0, "id", Types.LongType.get())); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); - Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); + assertThat((long) projected.get("id")).as("Should contain the correct id value").isEqualTo(34L); TestHelpers.assertEmptyAvroField(projected, "points"); projected = writeAndRead("all_points", writeSchema, writeSchema.select("points"), record); TestHelpers.assertEmptyAvroField(projected, "id"); - Assert.assertEquals( - "Should project points list", record.get("points"), projected.get("points")); + assertThat(projected.get("points")) + .as("Should project points list") + .isEqualTo(record.get("points")); projected = writeAndRead("x_only", writeSchema, writeSchema.select("points.x"), record); TestHelpers.assertEmptyAvroField(projected, "id"); - Assert.assertNotNull("Should project points list", projected.get("points")); + assertThat(projected.get("points")).as("Should project points list").isNotNull(); List points = (List) projected.get("points"); - Assert.assertEquals("Should read 2 points", 2, points.size()); + assertThat(points).as("Should read 2 points").hasSize(2); Record projectedP1 = points.get(0); - Assert.assertEquals("Should project x", 1, (int) projectedP1.get("x")); + assertThat((int) projectedP1.get("x")).as("Should project x").isEqualTo(1); TestHelpers.assertEmptyAvroField(projectedP1, "y"); Record projectedP2 = points.get(1); - Assert.assertEquals("Should project x", 3, (int) projectedP2.get("x")); + assertThat((int) projectedP2.get("x")).as("Should project x").isEqualTo(3); TestHelpers.assertEmptyAvroField(projectedP2, "y"); projected = writeAndRead("y_only", writeSchema, writeSchema.select("points.y"), record); TestHelpers.assertEmptyAvroField(projected, "id"); - Assert.assertNotNull("Should project points list", projected.get("points")); + assertThat(projected.get("points")).as("Should project points list").isNotNull(); points = (List) projected.get("points"); - Assert.assertEquals("Should read 2 points", 2, points.size()); + assertThat(points).as("Should read 2 points").hasSize(2); projectedP1 = points.get(0); TestHelpers.assertEmptyAvroField(projectedP1, "x"); - Assert.assertEquals("Should project y", 2, (int) projectedP1.get("y")); + assertThat((int) projectedP1.get("y")).as("Should project y").isEqualTo(2); projectedP2 = points.get(1); TestHelpers.assertEmptyAvroField(projectedP2, "x"); - Assert.assertNull("Should project null y", projectedP2.get("y")); + assertThat(projectedP2.get("y")).as("Should project null y").isNull(); Schema yRenamed = new Schema( @@ -512,16 +534,16 @@ public void testListOfStructsProjection() throws IOException { projected = writeAndRead("y_renamed", writeSchema, yRenamed, record); TestHelpers.assertEmptyAvroField(projected, "id"); - Assert.assertNotNull("Should project points list", projected.get("points")); + assertThat(projected.get("points")).as("Should project points list").isNotNull(); points = (List) projected.get("points"); - Assert.assertEquals("Should read 2 points", 2, points.size()); + assertThat(points).as("Should read 2 points").hasSize(2); projectedP1 = points.get(0); TestHelpers.assertEmptyAvroField(projectedP1, "x"); TestHelpers.assertEmptyAvroField(projectedP1, "y"); - Assert.assertEquals("Should project z", 2, (int) projectedP1.get("z")); + assertThat((int) projectedP1.get("z")).as("Should project z").isEqualTo(2); projectedP2 = points.get(1); TestHelpers.assertEmptyAvroField(projectedP2, "x"); TestHelpers.assertEmptyAvroField(projectedP2, "y"); - Assert.assertNull("Should project null z", projectedP2.get("z")); + assertThat(projectedP2.get("z")).as("Should project null z").isNull(); } } diff --git a/parquet/src/test/java/org/apache/iceberg/parquet/ParquetWritingTestUtils.java b/parquet/src/test/java/org/apache/iceberg/parquet/ParquetWritingTestUtils.java index 58463bbb1edc..09673e603338 100644 --- a/parquet/src/test/java/org/apache/iceberg/parquet/ParquetWritingTestUtils.java +++ b/parquet/src/test/java/org/apache/iceberg/parquet/ParquetWritingTestUtils.java @@ -23,6 +23,7 @@ import java.io.Closeable; import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.util.Collections; import java.util.Map; import java.util.UUID; @@ -33,29 +34,25 @@ import org.apache.iceberg.io.FileAppender; import org.apache.iceberg.relocated.com.google.common.collect.Lists; import org.apache.parquet.schema.MessageType; -import org.junit.rules.TemporaryFolder; /** Utilities for tests that need to write Parquet files. */ class ParquetWritingTestUtils { private ParquetWritingTestUtils() {} - static File writeRecords(TemporaryFolder temp, Schema schema, GenericData.Record... records) + static File writeRecords(Path temp, Schema schema, GenericData.Record... records) throws IOException { return writeRecords(temp, schema, Collections.emptyMap(), null, records); } static File writeRecords( - TemporaryFolder temp, - Schema schema, - Map properties, - GenericData.Record... records) + Path temp, Schema schema, Map properties, GenericData.Record... records) throws IOException { return writeRecords(temp, schema, properties, null, records); } static File writeRecords( - TemporaryFolder temp, + Path temp, Schema schema, Map properties, Function> createWriterFunc, @@ -97,8 +94,8 @@ static long write( return len; } - static File createTempFile(TemporaryFolder temp) throws IOException { - File tmpFolder = temp.newFolder("parquet"); + static File createTempFile(Path temp) throws IOException { + File tmpFolder = temp.resolve("parquet").toFile(); String filename = UUID.randomUUID().toString(); return new File(tmpFolder, FileFormat.PARQUET.addExtension(filename)); } diff --git a/parquet/src/test/java/org/apache/iceberg/parquet/TestBloomRowGroupFilter.java b/parquet/src/test/java/org/apache/iceberg/parquet/TestBloomRowGroupFilter.java index 756639ab128c..8af8a512b62b 100644 --- a/parquet/src/test/java/org/apache/iceberg/parquet/TestBloomRowGroupFilter.java +++ b/parquet/src/test/java/org/apache/iceberg/parquet/TestBloomRowGroupFilter.java @@ -38,6 +38,7 @@ import static org.apache.iceberg.expressions.Expressions.startsWith; import static org.apache.iceberg.types.Types.NestedField.optional; import static org.apache.iceberg.types.Types.NestedField.required; +import static org.assertj.core.api.Assertions.assertThat; import java.io.File; import java.io.IOException; @@ -74,11 +75,9 @@ import org.apache.parquet.hadoop.metadata.BlockMetaData; import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData; import org.apache.parquet.schema.MessageType; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; public class TestBloomRowGroupFilter { @@ -184,17 +183,17 @@ public class TestBloomRowGroupFilter { private BlockMetaData rowGroupMetadata = null; private BloomFilterReader bloomStore = null; - @Rule public TemporaryFolder temp = new TemporaryFolder(); + @TempDir private File temp; - @Before + @BeforeEach public void createInputFile() throws IOException { - File parquetFile = temp.newFile(); - Assert.assertTrue(parquetFile.delete()); + + assertThat(temp.delete()).isTrue(); // build struct field schema org.apache.avro.Schema structSchema = AvroSchemaUtil.convert(_structFieldType); - OutputFile outFile = Files.localOutput(parquetFile); + OutputFile outFile = Files.localOutput(temp); try (FileAppender appender = Parquet.write(outFile) .schema(FILE_SCHEMA) @@ -264,11 +263,11 @@ public void createInputFile() throws IOException { } } - InputFile inFile = Files.localInput(parquetFile); + InputFile inFile = Files.localInput(temp); ParquetFileReader reader = ParquetFileReader.open(ParquetIO.file(inFile)); - Assert.assertEquals("Should create only one row group", 1, reader.getRowGroups().size()); + assertThat(reader.getRowGroups()).as("Should create only one row group").hasSize(1); rowGroupMetadata = reader.getRowGroups().get(0); parquetSchema = reader.getFileMetaData().getSchema(); bloomStore = reader.getBloomFilterDataReader(rowGroupMetadata); @@ -279,22 +278,24 @@ public void testNotNull() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, notNull("all_nulls")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, notNull("some_nulls")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, notNull("no_nulls")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, notNull("struct_not_null.int_field")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: this field is required and are always not-null", shouldRead); + assertThat(shouldRead) + .as("Should read: this field is required and are always not-null") + .isTrue(); } @Test @@ -302,22 +303,24 @@ public void testIsNull() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, isNull("all_nulls")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, isNull("some_nulls")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, isNull("no_nulls")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, isNull("struct_not_null.int_field")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertFalse("Should skip: this field is required and are always not-null", shouldRead); + assertThat(shouldRead) + .as("Should skip: this field is required and are always not-null") + .isFalse(); } @Test @@ -325,12 +328,12 @@ public void testRequiredColumn() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, notNull("required")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: required columns are always non-null", shouldRead); + assertThat(shouldRead).as("Should read: required columns are always non-null").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, isNull("required")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertFalse("Should skip: required columns are always non-null", shouldRead); + assertThat(shouldRead).as("Should skip: required columns are always non-null").isFalse(); } @Test @@ -338,17 +341,17 @@ public void testIsNaNs() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, isNaN("all_nans")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, isNaN("some_nans")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, isNaN("no_nans")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); } @Test @@ -356,17 +359,17 @@ public void testNotNaNs() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, notNaN("all_nans")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, notNaN("some_nans")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, notNaN("no_nans")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); } @Test @@ -374,37 +377,37 @@ public void testStartsWith() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, startsWith("non_bloom", "re")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: no bloom", shouldRead); + assertThat(shouldRead).as("Should read: no bloom").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, startsWith("required", "re")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, startsWith("required", "req")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, startsWith("some_nulls", "so")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, startsWith("required", "reqs")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, startsWith("some_nulls", "somex")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, startsWith("no_nulls", "xxx")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); } @Test @@ -437,7 +440,7 @@ public void testColumnNotInFile() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, expr) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter cannot be found: " + expr, shouldRead); + assertThat(shouldRead).as("Should read: bloom filter cannot be found: " + expr).isTrue(); } } @@ -454,7 +457,7 @@ public void testColumnNotBloomFilterEnabled() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, expr) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter cannot be found: " + expr, shouldRead); + assertThat(shouldRead).as("Should read: bloom filter cannot be found: " + expr).isTrue(); } } @@ -463,7 +466,9 @@ public void testMissingStats() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, equal("no_stats", "a")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertFalse("Should skip: stats are missing but bloom filter is present", shouldRead); + assertThat(shouldRead) + .as("Should skip: stats are missing but bloom filter is present") + .isFalse(); } @Test @@ -474,7 +479,7 @@ public void testNot() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, not(equal("id", i))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); } } @@ -486,19 +491,19 @@ public void testAnd() { new ParquetBloomRowGroupFilter( SCHEMA, and(equal("id", INT_MIN_VALUE - 25), equal("id", INT_MIN_VALUE + 30))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertFalse("Should skip: and(false, true)", shouldRead); + assertThat(shouldRead).as("Should skip: and(false, true)").isFalse(); shouldRead = new ParquetBloomRowGroupFilter( SCHEMA, and(equal("id", INT_MIN_VALUE - 25), equal("id", INT_MAX_VALUE + 1))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertFalse("Should skip: and(false, false)", shouldRead); + assertThat(shouldRead).as("Should skip: and(false, false)").isFalse(); shouldRead = new ParquetBloomRowGroupFilter( SCHEMA, and(equal("id", INT_MIN_VALUE + 25), equal("id", INT_MIN_VALUE))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: and(true, true)", shouldRead); + assertThat(shouldRead).as("Should read: and(true, true)").isTrue(); // AND filters that refer different columns ("id", "long", "binary") shouldRead = @@ -509,7 +514,7 @@ SCHEMA, and(equal("id", INT_MIN_VALUE + 25), equal("id", INT_MIN_VALUE))) equal("long", LONG_BASE + 30), equal("binary", RANDOM_BYTES.get(30)))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: and(true, true, true)", shouldRead); + assertThat(shouldRead).as("Should read: and(true, true, true)").isTrue(); // AND filters that refer different columns ("id", "long", "binary") shouldRead = @@ -520,7 +525,7 @@ SCHEMA, and(equal("id", INT_MIN_VALUE + 25), equal("id", INT_MIN_VALUE))) equal("long", LONG_BASE + 30), equal("binary", RANDOM_BYTES.get(30)))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertFalse("Should skip: and(false, true, true)", shouldRead); + assertThat(shouldRead).as("Should skip: and(false, true, true)").isFalse(); // In And, one of the filter's column doesn't have bloom filter shouldRead = @@ -532,7 +537,7 @@ SCHEMA, and(equal("id", INT_MIN_VALUE + 25), equal("id", INT_MIN_VALUE))) equal("binary", RANDOM_BYTES.get(30)), equal("non_bloom", "a"))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: and(true, true, true, true)", shouldRead); + assertThat(shouldRead).as("Should read: and(true, true, true, true)").isTrue(); // In And, one of the filter's column doesn't have bloom filter shouldRead = @@ -544,7 +549,7 @@ SCHEMA, and(equal("id", INT_MIN_VALUE + 25), equal("id", INT_MIN_VALUE))) equal("binary", RANDOM_BYTES.get(30)), equal("non_bloom", "a"))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertFalse("Should skip: and(false, true, true, true)", shouldRead); + assertThat(shouldRead).as("Should skip: and(false, true, true, true)").isFalse(); // In And, one of the filter's column is not in the file shouldRead = @@ -556,7 +561,7 @@ SCHEMA, and(equal("id", INT_MIN_VALUE + 25), equal("id", INT_MIN_VALUE))) equal("binary", RANDOM_BYTES.get(30)), equal("not_in_file", 1.0f))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: and(true, true, true, true)", shouldRead); + assertThat(shouldRead).as("Should read: and(true, true, true, true)").isTrue(); // In And, one of the filter's column is not in the file shouldRead = @@ -568,7 +573,7 @@ SCHEMA, and(equal("id", INT_MIN_VALUE + 25), equal("id", INT_MIN_VALUE))) equal("binary", RANDOM_BYTES.get(30)), equal("not_in_file", 1.0f))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertFalse("Should skip: and(false, true, true, true)", shouldRead); + assertThat(shouldRead).as("Should skip: and(false, true, true, true)").isFalse(); } @Test @@ -579,13 +584,13 @@ public void testOr() { new ParquetBloomRowGroupFilter( SCHEMA, or(equal("id", INT_MIN_VALUE - 25), equal("id", INT_MAX_VALUE + 1))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertFalse("Should skip: or(false, false)", shouldRead); + assertThat(shouldRead).as("Should skip: or(false, false)").isFalse(); shouldRead = new ParquetBloomRowGroupFilter( SCHEMA, or(equal("id", INT_MIN_VALUE - 25), equal("id", INT_MAX_VALUE - 19))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: or(false, true)", shouldRead); + assertThat(shouldRead).as("Should read: or(false, true)").isTrue(); } @Test @@ -594,7 +599,7 @@ public void testIntegerLt() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, lessThan("id", i)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); } } @@ -604,7 +609,7 @@ public void testIntegerLtEq() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, lessThanOrEqual("id", i)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); } } @@ -614,7 +619,7 @@ public void testIntegerGt() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, greaterThan("id", i)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); } } @@ -624,7 +629,7 @@ public void testIntegerGtEq() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, greaterThanOrEqual("id", i)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); } } @@ -635,9 +640,9 @@ public void testIntegerEq() { new ParquetBloomRowGroupFilter(SCHEMA, equal("id", i)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); if (i >= INT_MIN_VALUE && i <= INT_MAX_VALUE) { - Assert.assertTrue("Should read: integer within range", shouldRead); + assertThat(shouldRead).as("Should read: integer within range").isTrue(); } else { - Assert.assertFalse("Should not read: integer outside range", shouldRead); + assertThat(shouldRead).as("Should not read: integer outside range").isFalse(); } } } @@ -649,9 +654,9 @@ public void testLongEq() { new ParquetBloomRowGroupFilter(SCHEMA, equal("long", LONG_BASE + i)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); if (i >= INT_MIN_VALUE && i <= INT_MAX_VALUE) { - Assert.assertTrue("Should read: long within range", shouldRead); + assertThat(shouldRead).as("Should read: long within range").isTrue(); } else { - Assert.assertFalse("Should not read: long outside range", shouldRead); + assertThat(shouldRead).as("Should not read: long outside range").isFalse(); } } } @@ -662,7 +667,7 @@ public void testBytesEq() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, equal("binary", RANDOM_BYTES.get(i))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: binary within range", shouldRead); + assertThat(shouldRead).as("Should read: binary within range").isTrue(); } Random rd = new Random(54321); @@ -672,7 +677,7 @@ public void testBytesEq() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, equal("binary", byteArray)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertFalse("Should not read: cannot match a new generated binary", shouldRead); + assertThat(shouldRead).as("Should not read: cannot match a new generated binary").isFalse(); } } @@ -683,13 +688,13 @@ public void testIntDeciamlEq() { new ParquetBloomRowGroupFilter( SCHEMA, equal("int_decimal", new BigDecimal(String.valueOf(77.77 + i)))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: decimal within range", shouldRead); + assertThat(shouldRead).as("Should read: decimal within range").isTrue(); } boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, equal("int_decimal", new BigDecimal("1234.56"))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertFalse("Should not read: decimal outside range", shouldRead); + assertThat(shouldRead).as("Should not read: decimal outside range").isFalse(); } @Test @@ -699,13 +704,13 @@ public void testLongDeciamlEq() { new ParquetBloomRowGroupFilter( SCHEMA, equal("long_decimal", new BigDecimal(String.valueOf(88.88 + i)))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: decimal within range", shouldRead); + assertThat(shouldRead).as("Should read: decimal within range").isTrue(); } boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, equal("long_decimal", new BigDecimal("1234.56"))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertFalse("Should not read: decimal outside range", shouldRead); + assertThat(shouldRead).as("Should not read: decimal outside range").isFalse(); } @Test @@ -715,13 +720,13 @@ public void testFixedDeciamlEq() { new ParquetBloomRowGroupFilter( SCHEMA, equal("fixed_decimal", new BigDecimal(String.valueOf(99.99 + i)))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: decimal within range", shouldRead); + assertThat(shouldRead).as("Should read: decimal within range").isTrue(); } boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, equal("fixed_decimal", new BigDecimal("1234.56"))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertFalse("Should not read: decimal outside range", shouldRead); + assertThat(shouldRead).as("Should not read: decimal outside range").isFalse(); } @Test @@ -731,9 +736,9 @@ public void testDoubleEq() { new ParquetBloomRowGroupFilter(SCHEMA, equal("double", DOUBLE_BASE + i)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); if (i >= INT_MIN_VALUE && i <= INT_MAX_VALUE) { - Assert.assertTrue("Should read: double within range", shouldRead); + assertThat(shouldRead).as("Should read: double within range").isTrue(); } else { - Assert.assertFalse("Should not read: double outside range", shouldRead); + assertThat(shouldRead).as("Should not read: double outside range").isFalse(); } } } @@ -745,9 +750,9 @@ public void testFloatEq() { new ParquetBloomRowGroupFilter(SCHEMA, equal("float", FLOAT_BASE + i)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); if (i >= INT_MIN_VALUE && i <= INT_MAX_VALUE) { - Assert.assertTrue("Should read: float within range", shouldRead); + assertThat(shouldRead).as("Should read: float within range").isTrue(); } else { - Assert.assertFalse("Should not read: float outside range", shouldRead); + assertThat(shouldRead).as("Should not read: float outside range").isFalse(); } } } @@ -759,9 +764,9 @@ public void testStringEq() { new ParquetBloomRowGroupFilter(SCHEMA, equal("string", BINARY_PREFIX + i)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); if (i >= INT_MIN_VALUE && i <= INT_MAX_VALUE) { - Assert.assertTrue("Should read: string within range", shouldRead); + assertThat(shouldRead).as("Should read: string within range").isTrue(); } else { - Assert.assertFalse("Should not read: string outside range", shouldRead); + assertThat(shouldRead).as("Should not read: string outside range").isFalse(); } } } @@ -772,7 +777,7 @@ public void testRandomBinaryEq() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, equal("uuid", RANDOM_UUIDS.get(i))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: uuid within range", shouldRead); + assertThat(shouldRead).as("Should read: uuid within range").isTrue(); } Random rd = new Random(1357); @@ -780,7 +785,9 @@ public void testRandomBinaryEq() { new ParquetBloomRowGroupFilter( SCHEMA, equal("uuid", new UUID(rd.nextLong(), rd.nextLong()).toString())) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertFalse("Should not read: cannot match a new generated random uuid", shouldRead); + assertThat(shouldRead) + .as("Should not read: cannot match a new generated random uuid") + .isFalse(); } @Test @@ -788,12 +795,12 @@ public void testBooleanEq() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, equal("boolean", true)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter is not supported for Boolean", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter is not supported for Boolean").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, equal("boolean", false)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter is not supported for Boolean", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter is not supported for Boolean").isTrue(); } @Test @@ -804,9 +811,9 @@ public void testTimeEq() { new ParquetBloomRowGroupFilter(SCHEMA, equal("time", ins.toEpochMilli())) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); if (i >= 0 && i < INT_VALUE_COUNT) { - Assert.assertTrue("Should read: time within range", shouldRead); + assertThat(shouldRead).as("Should read: time within range").isTrue(); } else { - Assert.assertFalse("Should not read: time outside range", shouldRead); + assertThat(shouldRead).as("Should not read: time outside range").isFalse(); } } } @@ -819,9 +826,9 @@ public void testDateEq() { new ParquetBloomRowGroupFilter(SCHEMA, equal("date", ins.getEpochSecond())) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); if (i >= 0 && i < INT_VALUE_COUNT) { - Assert.assertTrue("Should read: date within range", shouldRead); + assertThat(shouldRead).as("Should read: date within range").isTrue(); } else { - Assert.assertFalse("Should not read: date outside range", shouldRead); + assertThat(shouldRead).as("Should not read: date outside range").isFalse(); } } } @@ -834,9 +841,9 @@ public void testTimestampEq() { new ParquetBloomRowGroupFilter(SCHEMA, equal("timestamp", ins.toEpochMilli())) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); if (i >= 0 && i < INT_VALUE_COUNT) { - Assert.assertTrue("Should read: timestamp within range", shouldRead); + assertThat(shouldRead).as("Should read: timestamp within range").isTrue(); } else { - Assert.assertFalse("Should not read: timestamp outside range", shouldRead); + assertThat(shouldRead).as("Should not read: timestamp outside range").isFalse(); } } } @@ -849,9 +856,9 @@ public void testTimestamptzEq() { new ParquetBloomRowGroupFilter(SCHEMA, equal("timestamptz", ins.toEpochMilli())) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); if (i >= 0 && i < INT_VALUE_COUNT) { - Assert.assertTrue("Should read: timestamptz within range", shouldRead); + assertThat(shouldRead).as("Should read: timestamptz within range").isTrue(); } else { - Assert.assertFalse("Should not read: timestamptz outside range", shouldRead); + assertThat(shouldRead).as("Should not read: timestamptz outside range").isFalse(); } } } @@ -862,7 +869,7 @@ public void testIntegerNotEq() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, notEqual("id", i)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); } } @@ -872,7 +879,7 @@ public void testIntegerNotEqRewritten() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, not(equal("id", i))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); } } @@ -881,12 +888,12 @@ public void testStringNotEq() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, notEqual("some_nulls", "some")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, notEqual("no_nulls", "")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); } @Test @@ -895,7 +902,7 @@ public void testStructFieldLt() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, lessThan("struct_not_null.int_field", i)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); } } @@ -905,7 +912,7 @@ public void testStructFieldLtEq() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, lessThanOrEqual("struct_not_null.int_field", i)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); } } @@ -915,7 +922,7 @@ public void testStructFieldGt() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, greaterThan("struct_not_null.int_field", i)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); } } @@ -925,7 +932,7 @@ public void testStructFieldGtEq() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, greaterThanOrEqual("struct_not_null.int_field", i)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); } } @@ -936,9 +943,9 @@ public void testStructFieldEq() { new ParquetBloomRowGroupFilter(SCHEMA, equal("struct_not_null.int_field", i)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); if (i >= INT_MIN_VALUE && i <= INT_MAX_VALUE) { - Assert.assertTrue("Should read: value within range", shouldRead); + assertThat(shouldRead).as("Should read: value within range").isTrue(); } else { - Assert.assertFalse("Should not read: value outside range", shouldRead); + assertThat(shouldRead).as("Should not read: value outside range").isFalse(); } } } @@ -949,7 +956,7 @@ public void testStructFieldNotEq() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, notEqual("struct_not_null.int_field", i)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); } } @@ -960,7 +967,7 @@ public void testCaseInsensitive() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, equal("Required", "Req"), false) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertFalse("Should skip: contains only 'req'", shouldRead); + assertThat(shouldRead).as("Should skip: contains only 'req'").isFalse(); } @Test @@ -996,7 +1003,7 @@ public void testIntegerIn() { new ParquetBloomRowGroupFilter( SCHEMA, in("id", INT_MIN_VALUE - 3 * i, INT_MIN_VALUE + i, INT_MAX_VALUE + 3 * i)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: integer within range", shouldRead); + assertThat(shouldRead).as("Should read: integer within range").isTrue(); } // all values are present @@ -1009,7 +1016,7 @@ SCHEMA, in("id", INT_MIN_VALUE - 3 * i, INT_MIN_VALUE + i, INT_MAX_VALUE + 3 * i .boxed() .collect(Collectors.toList()))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: the bloom is a subset of the in set", shouldRead); + assertThat(shouldRead).as("Should read: the bloom is a subset of the in set").isTrue(); // all values are present shouldRead = @@ -1021,7 +1028,7 @@ SCHEMA, in("id", INT_MIN_VALUE - 3 * i, INT_MIN_VALUE + i, INT_MAX_VALUE + 3 * i .boxed() .collect(Collectors.toList()))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: the bloom is equal to the in set", shouldRead); + assertThat(shouldRead).as("Should read: the bloom is equal to the in set").isTrue(); // no values are present shouldRead = @@ -1033,7 +1040,7 @@ SCHEMA, in("id", INT_MIN_VALUE - 3 * i, INT_MIN_VALUE + i, INT_MAX_VALUE + 3 * i .boxed() .collect(Collectors.toList()))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertFalse("Should not read: value outside range", shouldRead); + assertThat(shouldRead).as("Should not read: value outside range").isFalse(); } @Test @@ -1041,29 +1048,35 @@ public void testOtherTypesIn() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, in("all_nulls", 1, 2)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertFalse("Should not read: in on all nulls column (bloom is empty) ", shouldRead); + assertThat(shouldRead) + .as("Should not read: in on all nulls column (bloom is empty) ") + .isFalse(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, in("some_nulls", "aaa", "some")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: in on some nulls column", shouldRead); + assertThat(shouldRead).as("Should read: in on some nulls column").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, in("some_nulls", "aaa", "bbb")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertFalse("Should not read: some_nulls values are not within the set", shouldRead); + assertThat(shouldRead) + .as("Should not read: some_nulls values are not within the set") + .isFalse(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, in("no_nulls", "aaa", "bbb")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertFalse( - "Should not read: in on no nulls column (empty string is not within the set)", shouldRead); + assertThat(shouldRead) + .as("Should not read: in on no nulls column (empty string is not within the set)") + .isFalse(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, in("no_nulls", "aaa", "")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue( - "Should read: in on no nulls column (empty string is within the set)", shouldRead); + assertThat(shouldRead) + .as("Should read: in on no nulls column (empty string is within the set)") + .isTrue(); } @Test @@ -1075,7 +1088,7 @@ public void testIntegerNotIn() { SCHEMA, notIn("id", INT_MIN_VALUE - 3 * i, INT_MIN_VALUE + i, INT_MAX_VALUE + 3 * i)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); } // all values are present @@ -1088,7 +1101,7 @@ public void testIntegerNotIn() { .boxed() .collect(Collectors.toList()))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); // all values are present shouldRead = @@ -1100,7 +1113,7 @@ public void testIntegerNotIn() { .boxed() .collect(Collectors.toList()))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); // no values are present shouldRead = @@ -1112,7 +1125,7 @@ public void testIntegerNotIn() { .boxed() .collect(Collectors.toList()))) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); } @Test @@ -1120,22 +1133,22 @@ public void testOtherTypesNotIn() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, notIn("all_nulls", 1, 2)) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, notIn("some_nulls", "aaa", "bbb")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, notIn("no_nulls", "aaa", "bbb")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, notIn("no_nulls", "aaa", "")) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: bloom filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: bloom filter doesn't help").isTrue(); } @Test @@ -1143,26 +1156,26 @@ public void testTypeConversions() { boolean shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, equal("long", LONG_BASE + INT_MIN_VALUE + 1), true) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: Integer value promoted", shouldRead); + assertThat(shouldRead).as("Should read: Integer value promoted").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, equal("long", LONG_BASE + INT_MIN_VALUE - 1), true) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertFalse("Should not read: Integer value promoted", shouldRead); + assertThat(shouldRead).as("Should not read: Integer value promoted").isFalse(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, equal("id", (long) (INT_MIN_VALUE + 1)), true) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertTrue("Should read: Long value truncated", shouldRead); + assertThat(shouldRead).as("Should read: Long value truncated").isTrue(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, equal("id", (long) (INT_MIN_VALUE - 1)), true) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertFalse("Should not read: Long value truncated", shouldRead); + assertThat(shouldRead).as("Should not read: Long value truncated").isFalse(); shouldRead = new ParquetBloomRowGroupFilter(SCHEMA, equal("id", ((long) Integer.MAX_VALUE) + 1), true) .shouldRead(parquetSchema, rowGroupMetadata, bloomStore); - Assert.assertFalse("Should not read: Long value outside Integer range", shouldRead); + assertThat(shouldRead).as("Should not read: Long value outside Integer range").isFalse(); } } diff --git a/parquet/src/test/java/org/apache/iceberg/parquet/TestCDHParquetStatistics.java b/parquet/src/test/java/org/apache/iceberg/parquet/TestCDHParquetStatistics.java index ed400d28770d..51bea3220259 100644 --- a/parquet/src/test/java/org/apache/iceberg/parquet/TestCDHParquetStatistics.java +++ b/parquet/src/test/java/org/apache/iceberg/parquet/TestCDHParquetStatistics.java @@ -18,12 +18,12 @@ */ package org.apache.iceberg.parquet; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import org.apache.parquet.column.statistics.Statistics; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; /** * Tests for Parquet 1.5.0-Stats which cannot be evaluated like later versions of Parquet stats. @@ -39,6 +39,6 @@ public void testCDHParquetStatistcs() { when(cdhBinaryColumnStats.getMaxBytes()).thenReturn(null); when(cdhBinaryColumnStats.getMinBytes()).thenReturn(null); when(cdhBinaryColumnStats.getNumNulls()).thenReturn(0L); - Assert.assertTrue(ParquetMetricsRowGroupFilter.minMaxUndefined(cdhBinaryColumnStats)); + assertThat(ParquetMetricsRowGroupFilter.minMaxUndefined(cdhBinaryColumnStats)).isTrue(); } } diff --git a/parquet/src/test/java/org/apache/iceberg/parquet/TestDictionaryRowGroupFilter.java b/parquet/src/test/java/org/apache/iceberg/parquet/TestDictionaryRowGroupFilter.java index eafa93af1c84..54e34cb6c339 100644 --- a/parquet/src/test/java/org/apache/iceberg/parquet/TestDictionaryRowGroupFilter.java +++ b/parquet/src/test/java/org/apache/iceberg/parquet/TestDictionaryRowGroupFilter.java @@ -40,6 +40,7 @@ import static org.apache.iceberg.types.Types.NestedField.required; import static org.apache.parquet.column.ParquetProperties.WriterVersion.PARQUET_1_0; import static org.apache.parquet.column.ParquetProperties.WriterVersion.PARQUET_2_0; +import static org.assertj.core.api.Assertions.assertThat; import java.io.File; import java.io.IOException; @@ -77,7 +78,6 @@ import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData; import org.apache.parquet.hadoop.metadata.ColumnPath; import org.apache.parquet.schema.MessageType; -import org.junit.Assert; import org.junit.Assume; import org.junit.Before; import org.junit.Rule; @@ -171,7 +171,7 @@ public TestDictionaryRowGroupFilter(WriterVersion writerVersion) { @Before public void createInputFile() throws IOException { File parquetFile = temp.newFile(); - Assert.assertTrue(parquetFile.delete()); + assertThat(parquetFile.delete()).isTrue(); // build struct field schema org.apache.avro.Schema structSchema = AvroSchemaUtil.convert(_structFieldType); @@ -216,7 +216,7 @@ public void createInputFile() throws IOException { ParquetFileReader reader = ParquetFileReader.open(ParquetIO.file(inFile)); - Assert.assertEquals("Should create only one row group", 1, reader.getRowGroups().size()); + assertThat(reader.getRowGroups()).as("Should create only one row group").hasSize(1); rowGroupMetadata = reader.getRowGroups().get(0); parquetSchema = reader.getFileMetaData().getSchema(); dictionaryStore = reader.getNextDictionaryReader(); @@ -272,22 +272,22 @@ public void testAllNulls() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notNull("all_nulls")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: dictionary filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: dictionary filter doesn't help").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notNull("some_nulls")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: dictionary filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: dictionary filter doesn't help").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notNull("no_nulls")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: dictionary filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: dictionary filter doesn't help").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notNull("struct_not_null")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: dictionary filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: dictionary filter doesn't help").isTrue(); } @Test @@ -295,22 +295,22 @@ public void testNoNulls() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, isNull("all_nulls")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: dictionary filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: dictionary filter doesn't help").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, isNull("some_nulls")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: dictionary filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: dictionary filter doesn't help").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, isNull("no_nulls")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: dictionary filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: dictionary filter doesn't help").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, isNull("struct_not_null")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: dictionary filter doesn't help", shouldRead); + assertThat(shouldRead).as("Should read: dictionary filter doesn't help").isTrue(); } @Test @@ -318,12 +318,12 @@ public void testRequiredColumn() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notNull("required")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: required columns are always non-null", shouldRead); + assertThat(shouldRead).as("Should read: required columns are always non-null").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, isNull("required")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should skip: required columns are always non-null", shouldRead); + assertThat(shouldRead).as("Should skip: required columns are always non-null").isFalse(); } @Test @@ -331,17 +331,17 @@ public void testIsNaNs() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, isNaN("all_nans")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: all_nans column will contain NaN", shouldRead); + assertThat(shouldRead).as("Should read: all_nans column will contain NaN").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, isNaN("some_nans")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: some_nans column will contain NaN", shouldRead); + assertThat(shouldRead).as("Should read: some_nans column will contain NaN").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, isNaN("no_nans")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should skip: no_nans column will not contain NaN", shouldRead); + assertThat(shouldRead).as("Should skip: no_nans column will not contain NaN").isFalse(); } @Test @@ -349,17 +349,17 @@ public void testNotNaNs() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notNaN("all_nans")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should skip: all_nans column will not contain non-NaN", shouldRead); + assertThat(shouldRead).as("Should skip: all_nans column will not contain non-NaN").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notNaN("some_nans")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: some_nans column will contain non-NaN", shouldRead); + assertThat(shouldRead).as("Should read: some_nans column will contain non-NaN").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notNaN("no_nans")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: no_nans column will contain non-NaN", shouldRead); + assertThat(shouldRead).as("Should read: no_nans column will contain non-NaN").isTrue(); } @Test @@ -367,26 +367,30 @@ public void testNotNaNOnNaNsAndNulls() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, isNull("_nans_and_nulls")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: _nans_and_nulls column will contain null values", shouldRead); + assertThat(shouldRead) + .as("Should read: _nans_and_nulls column will contain null values") + .isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notNull("_nans_and_nulls")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue( - "Should read: _nans_and_nulls column will contain NaN values which are not null", - shouldRead); + assertThat(shouldRead) + .as("Should read: _nans_and_nulls column will contain NaN values which are not null") + .isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, isNaN("_nans_and_nulls")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: _nans_and_nulls column will contain NaN values", shouldRead); + assertThat(shouldRead) + .as("Should read: _nans_and_nulls column will contain NaN values") + .isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notNaN("_nans_and_nulls")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue( - "Should read: _nans_and_nulls column will contain null values which are not NaN", - shouldRead); + assertThat(shouldRead) + .as("Should read: _nans_and_nulls column will contain null values which are not NaN") + .isTrue(); } @Test @@ -394,43 +398,43 @@ public void testStartsWith() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, startsWith("non_dict", "re")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: no dictionary", shouldRead); + assertThat(shouldRead).as("Should read: no dictionary").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, startsWith("required", "re")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: dictionary contains a matching entry", shouldRead); + assertThat(shouldRead).as("Should read: dictionary contains a matching entry").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, startsWith("required", "req")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: dictionary contains a matching entry", shouldRead); + assertThat(shouldRead).as("Should read: dictionary contains a matching entry").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, startsWith("some_nulls", "so")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: dictionary contains a matching entry", shouldRead); + assertThat(shouldRead).as("Should read: dictionary contains a matching entry").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, startsWith("no_stats", UUID.randomUUID().toString())) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should skip: no stats but dictionary is present", shouldRead); + assertThat(shouldRead).as("Should skip: no stats but dictionary is present").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, startsWith("required", "reqs")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should skip: no match in dictionary", shouldRead); + assertThat(shouldRead).as("Should skip: no match in dictionary").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, startsWith("some_nulls", "somex")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should skip: no match in dictionary", shouldRead); + assertThat(shouldRead).as("Should skip: no match in dictionary").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, startsWith("no_nulls", "xxx")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should skip: no match in dictionary", shouldRead); + assertThat(shouldRead).as("Should skip: no match in dictionary").isFalse(); } @Test @@ -438,48 +442,48 @@ public void testNotStartsWith() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notStartsWith("non_dict", "re")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: no dictionary", shouldRead); + assertThat(shouldRead).as("Should read: no dictionary").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notStartsWith("required", "re")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should skip: no match in dictionary", shouldRead); + assertThat(shouldRead).as("Should skip: no match in dictionary").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notStartsWith("required", "req")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should skip: no match in dictionary", shouldRead); + assertThat(shouldRead).as("Should skip: no match in dictionary").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notStartsWith("some_nulls", "s!")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: dictionary contains a matching entry", shouldRead); + assertThat(shouldRead).as("Should read: dictionary contains a matching entry").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, notStartsWith("no_stats", UUID.randomUUID().toString())) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: no stats but dictionary is present", shouldRead); + assertThat(shouldRead).as("Should read: no stats but dictionary is present").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notStartsWith("required", "reqs")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: dictionary contains a matching entry", shouldRead); + assertThat(shouldRead).as("Should read: dictionary contains a matching entry").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notStartsWith("some_nulls", "somex")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: dictionary contains a matching entry", shouldRead); + assertThat(shouldRead).as("Should read: dictionary contains a matching entry").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notStartsWith("some_nulls", "some")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should skip: no match in dictionary", shouldRead); + assertThat(shouldRead).as("Should skip: no match in dictionary").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notStartsWith("no_nulls", "xxx")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: dictionary contains a matching entry", shouldRead); + assertThat(shouldRead).as("Should read: dictionary contains a matching entry").isTrue(); } @Test @@ -507,7 +511,7 @@ public void testColumnNotInFile() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, expr) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: dictionary cannot be found: " + expr, shouldRead); + assertThat(shouldRead).as("Should read: dictionary cannot be found: " + expr).isTrue(); } } @@ -524,7 +528,7 @@ public void testColumnFallbackOrNotDictionaryEncoded() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, expr) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: dictionary cannot be found: " + expr, shouldRead); + assertThat(shouldRead).as("Should read: dictionary cannot be found: " + expr).isTrue(); } } @@ -533,7 +537,7 @@ public void testMissingStats() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("no_stats", "a")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should skip: stats are missing but dictionary is present", shouldRead); + assertThat(shouldRead).as("Should skip: stats are missing but dictionary is present").isFalse(); } @Test @@ -542,12 +546,12 @@ public void testNot() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(lessThan("id", INT_MIN_VALUE - 25))) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: not(false)", shouldRead); + assertThat(shouldRead).as("Should read: not(false)").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(greaterThan("id", INT_MIN_VALUE - 25))) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should skip: not(true)", shouldRead); + assertThat(shouldRead).as("Should skip: not(true)").isFalse(); } @Test @@ -560,7 +564,7 @@ public void testAnd() { lessThan("id", INT_MIN_VALUE - 25), greaterThanOrEqual("id", INT_MIN_VALUE - 30))) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should skip: and(false, true)", shouldRead); + assertThat(shouldRead).as("Should skip: and(false, true)").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter( @@ -569,14 +573,14 @@ public void testAnd() { lessThan("id", INT_MIN_VALUE - 25), greaterThanOrEqual("id", INT_MAX_VALUE + 1))) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should skip: and(false, false)", shouldRead); + assertThat(shouldRead).as("Should skip: and(false, false)").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, and(greaterThan("id", INT_MIN_VALUE - 25), lessThanOrEqual("id", INT_MIN_VALUE))) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: and(true, true)", shouldRead); + assertThat(shouldRead).as("Should read: and(true, true)").isTrue(); } @Test @@ -587,7 +591,7 @@ public void testOr() { SCHEMA, or(lessThan("id", INT_MIN_VALUE - 25), greaterThanOrEqual("id", INT_MAX_VALUE + 1))) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should skip: or(false, false)", shouldRead); + assertThat(shouldRead).as("Should skip: or(false, false)").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter( @@ -596,7 +600,7 @@ public void testOr() { lessThan("id", INT_MIN_VALUE - 25), greaterThanOrEqual("id", INT_MAX_VALUE - 19))) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: or(false, true)", shouldRead); + assertThat(shouldRead).as("Should read: or(false, true)").isTrue(); } @Test @@ -604,22 +608,24 @@ public void testIntegerLt() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThan("id", INT_MIN_VALUE - 25)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id range below lower bound (5 < 30)", shouldRead); + assertThat(shouldRead).as("Should not read: id range below lower bound (5 < 30)").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThan("id", INT_MIN_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id range below lower bound (30 is not < 30)", shouldRead); + assertThat(shouldRead) + .as("Should not read: id range below lower bound (30 is not < 30)") + .isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThan("id", INT_MIN_VALUE + 1)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: one possible id", shouldRead); + assertThat(shouldRead).as("Should read: one possible id").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThan("id", INT_MAX_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: may possible ids", shouldRead); + assertThat(shouldRead).as("Should read: may possible ids").isTrue(); } @Test @@ -627,22 +633,22 @@ public void testIntegerLtEq() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThanOrEqual("id", INT_MIN_VALUE - 25)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id range below lower bound (5 < 30)", shouldRead); + assertThat(shouldRead).as("Should not read: id range below lower bound (5 < 30)").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThanOrEqual("id", INT_MIN_VALUE - 1)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id range below lower bound (29 < 30)", shouldRead); + assertThat(shouldRead).as("Should not read: id range below lower bound (29 < 30)").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThanOrEqual("id", INT_MIN_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: one possible id", shouldRead); + assertThat(shouldRead).as("Should read: one possible id").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThanOrEqual("id", INT_MAX_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: many possible ids", shouldRead); + assertThat(shouldRead).as("Should read: many possible ids").isTrue(); } @Test @@ -650,22 +656,24 @@ public void testIntegerGt() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThan("id", INT_MAX_VALUE + 6)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id range above upper bound (85 < 79)", shouldRead); + assertThat(shouldRead).as("Should not read: id range above upper bound (85 < 79)").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThan("id", INT_MAX_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id range above upper bound (79 is not > 79)", shouldRead); + assertThat(shouldRead) + .as("Should not read: id range above upper bound (79 is not > 79)") + .isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThan("id", INT_MAX_VALUE - 1)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: one possible id", shouldRead); + assertThat(shouldRead).as("Should read: one possible id").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThan("id", INT_MAX_VALUE - 4)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: may possible ids", shouldRead); + assertThat(shouldRead).as("Should read: may possible ids").isTrue(); } @Test @@ -673,22 +681,22 @@ public void testIntegerGtEq() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThanOrEqual("id", INT_MAX_VALUE + 6)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id range above upper bound (85 < 79)", shouldRead); + assertThat(shouldRead).as("Should not read: id range above upper bound (85 < 79)").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThanOrEqual("id", INT_MAX_VALUE + 1)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id range above upper bound (80 > 79)", shouldRead); + assertThat(shouldRead).as("Should not read: id range above upper bound (80 > 79)").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThanOrEqual("id", INT_MAX_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: one possible id", shouldRead); + assertThat(shouldRead).as("Should read: one possible id").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThanOrEqual("id", INT_MAX_VALUE - 4)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: may possible ids", shouldRead); + assertThat(shouldRead).as("Should read: may possible ids").isTrue(); } @Test @@ -696,37 +704,37 @@ public void testIntegerEq() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("id", INT_MIN_VALUE - 25)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id below lower bound", shouldRead); + assertThat(shouldRead).as("Should not read: id below lower bound").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("id", INT_MIN_VALUE - 1)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id below lower bound", shouldRead); + assertThat(shouldRead).as("Should not read: id below lower bound").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("id", INT_MIN_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id equal to lower bound", shouldRead); + assertThat(shouldRead).as("Should read: id equal to lower bound").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("id", INT_MAX_VALUE - 4)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); + assertThat(shouldRead).as("Should read: id between lower and upper bounds").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("id", INT_MAX_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id equal to upper bound", shouldRead); + assertThat(shouldRead).as("Should read: id equal to upper bound").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("id", INT_MAX_VALUE + 1)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id above upper bound", shouldRead); + assertThat(shouldRead).as("Should not read: id above upper bound").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("id", INT_MAX_VALUE + 6)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id above upper bound", shouldRead); + assertThat(shouldRead).as("Should not read: id above upper bound").isFalse(); } @Test @@ -734,37 +742,37 @@ public void testIntegerNotEq() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("id", INT_MIN_VALUE - 25)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id below lower bound", shouldRead); + assertThat(shouldRead).as("Should read: id below lower bound").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("id", INT_MIN_VALUE - 1)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id below lower bound", shouldRead); + assertThat(shouldRead).as("Should read: id below lower bound").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("id", INT_MIN_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id equal to lower bound", shouldRead); + assertThat(shouldRead).as("Should read: id equal to lower bound").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("id", INT_MAX_VALUE - 4)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); + assertThat(shouldRead).as("Should read: id between lower and upper bounds").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("id", INT_MAX_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id equal to upper bound", shouldRead); + assertThat(shouldRead).as("Should read: id equal to upper bound").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("id", INT_MAX_VALUE + 1)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id above upper bound", shouldRead); + assertThat(shouldRead).as("Should read: id above upper bound").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("id", INT_MAX_VALUE + 6)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id above upper bound", shouldRead); + assertThat(shouldRead).as("Should read: id above upper bound").isTrue(); } @Test @@ -772,37 +780,37 @@ public void testIntegerNotEqRewritten() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(equal("id", INT_MIN_VALUE - 25))) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id below lower bound", shouldRead); + assertThat(shouldRead).as("Should read: id below lower bound").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(equal("id", INT_MIN_VALUE - 1))) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id below lower bound", shouldRead); + assertThat(shouldRead).as("Should read: id below lower bound").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(equal("id", INT_MIN_VALUE))) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id equal to lower bound", shouldRead); + assertThat(shouldRead).as("Should read: id equal to lower bound").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(equal("id", INT_MAX_VALUE - 4))) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); + assertThat(shouldRead).as("Should read: id between lower and upper bounds").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(equal("id", INT_MAX_VALUE))) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id equal to upper bound", shouldRead); + assertThat(shouldRead).as("Should read: id equal to upper bound").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(equal("id", INT_MAX_VALUE + 1))) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id above upper bound", shouldRead); + assertThat(shouldRead).as("Should read: id above upper bound").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(equal("id", INT_MAX_VALUE + 6))) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id above upper bound", shouldRead); + assertThat(shouldRead).as("Should read: id above upper bound").isTrue(); } @Test @@ -810,12 +818,12 @@ public void testStringNotEq() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("some_nulls", "some")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: contains null != 'some'", shouldRead); + assertThat(shouldRead).as("Should read: contains null != 'some'").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("no_nulls", "")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should skip: contains only ''", shouldRead); + assertThat(shouldRead).as("Should skip: contains only ''").isFalse(); } @Test @@ -824,25 +832,27 @@ public void testStructFieldLt() { new ParquetDictionaryRowGroupFilter( SCHEMA, lessThan("struct_not_null.int_field", INT_MIN_VALUE - 25)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id range below lower bound (5 < 30)", shouldRead); + assertThat(shouldRead).as("Should not read: id range below lower bound (5 < 30)").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, lessThan("struct_not_null.int_field", INT_MIN_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id range below lower bound (30 is not < 30)", shouldRead); + assertThat(shouldRead) + .as("Should not read: id range below lower bound (30 is not < 30)") + .isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, lessThan("struct_not_null.int_field", INT_MIN_VALUE + 1)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: one possible id", shouldRead); + assertThat(shouldRead).as("Should read: one possible id").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, lessThan("struct_not_null.int_field", INT_MAX_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: may possible ids", shouldRead); + assertThat(shouldRead).as("Should read: may possible ids").isTrue(); } @Test @@ -851,25 +861,25 @@ public void testStructFieldLtEq() { new ParquetDictionaryRowGroupFilter( SCHEMA, lessThanOrEqual("struct_not_null.int_field", INT_MIN_VALUE - 25)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id range below lower bound (5 < 30)", shouldRead); + assertThat(shouldRead).as("Should not read: id range below lower bound (5 < 30)").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, lessThanOrEqual("struct_not_null.int_field", INT_MIN_VALUE - 1)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id range below lower bound (29 < 30)", shouldRead); + assertThat(shouldRead).as("Should not read: id range below lower bound (29 < 30)").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, lessThanOrEqual("struct_not_null.int_field", INT_MIN_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: one possible id", shouldRead); + assertThat(shouldRead).as("Should read: one possible id").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, lessThanOrEqual("struct_not_null.int_field", INT_MAX_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: many possible ids", shouldRead); + assertThat(shouldRead).as("Should read: many possible ids").isTrue(); } @Test @@ -878,25 +888,27 @@ public void testStructFieldGt() { new ParquetDictionaryRowGroupFilter( SCHEMA, greaterThan("struct_not_null.int_field", INT_MAX_VALUE + 6)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id range above upper bound (85 < 79)", shouldRead); + assertThat(shouldRead).as("Should not read: id range above upper bound (85 < 79)").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, greaterThan("struct_not_null.int_field", INT_MAX_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id range above upper bound (79 is not > 79)", shouldRead); + assertThat(shouldRead) + .as("Should not read: id range above upper bound (79 is not > 79)") + .isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, greaterThan("struct_not_null.int_field", INT_MAX_VALUE - 1)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: one possible id", shouldRead); + assertThat(shouldRead).as("Should read: one possible id").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, greaterThan("struct_not_null.int_field", INT_MAX_VALUE - 4)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: may possible ids", shouldRead); + assertThat(shouldRead).as("Should read: may possible ids").isTrue(); } @Test @@ -905,25 +917,25 @@ public void testStructFieldGtEq() { new ParquetDictionaryRowGroupFilter( SCHEMA, greaterThanOrEqual("struct_not_null.int_field", INT_MAX_VALUE + 6)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id range above upper bound (85 < 79)", shouldRead); + assertThat(shouldRead).as("Should not read: id range above upper bound (85 < 79)").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, greaterThanOrEqual("struct_not_null.int_field", INT_MAX_VALUE + 1)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id range above upper bound (80 > 79)", shouldRead); + assertThat(shouldRead).as("Should not read: id range above upper bound (80 > 79)").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, greaterThanOrEqual("struct_not_null.int_field", INT_MAX_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: one possible id", shouldRead); + assertThat(shouldRead).as("Should read: one possible id").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, greaterThanOrEqual("struct_not_null.int_field", INT_MAX_VALUE - 4)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: may possible ids", shouldRead); + assertThat(shouldRead).as("Should read: may possible ids").isTrue(); } @Test @@ -932,43 +944,43 @@ public void testStructFieldEq() { new ParquetDictionaryRowGroupFilter( SCHEMA, equal("struct_not_null.int_field", INT_MIN_VALUE - 25)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id below lower bound", shouldRead); + assertThat(shouldRead).as("Should not read: id below lower bound").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, equal("struct_not_null.int_field", INT_MIN_VALUE - 1)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id below lower bound", shouldRead); + assertThat(shouldRead).as("Should not read: id below lower bound").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, equal("struct_not_null.int_field", INT_MIN_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id equal to lower bound", shouldRead); + assertThat(shouldRead).as("Should read: id equal to lower bound").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, equal("struct_not_null.int_field", INT_MAX_VALUE - 4)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); + assertThat(shouldRead).as("Should read: id between lower and upper bounds").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, equal("struct_not_null.int_field", INT_MAX_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id equal to upper bound", shouldRead); + assertThat(shouldRead).as("Should read: id equal to upper bound").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, equal("struct_not_null.int_field", INT_MAX_VALUE + 1)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id above upper bound", shouldRead); + assertThat(shouldRead).as("Should not read: id above upper bound").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, equal("struct_not_null.int_field", INT_MAX_VALUE + 6)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id above upper bound", shouldRead); + assertThat(shouldRead).as("Should not read: id above upper bound").isFalse(); } @Test @@ -977,42 +989,42 @@ public void testStructFieldNotEq() { new ParquetDictionaryRowGroupFilter( SCHEMA, notEqual("struct_not_null.int_field", INT_MIN_VALUE - 25)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id below lower bound", shouldRead); + assertThat(shouldRead).as("Should read: id below lower bound").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, notEqual("struct_not_null.int_field", INT_MIN_VALUE - 1)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id below lower bound", shouldRead); + assertThat(shouldRead).as("Should read: id below lower bound").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, notEqual("struct_not_null.int_field", INT_MIN_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id equal to lower bound", shouldRead); + assertThat(shouldRead).as("Should read: id equal to lower bound").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, notEqual("struct_not_null.int_field", INT_MAX_VALUE - 4)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); + assertThat(shouldRead).as("Should read: id between lower and upper bounds").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, notEqual("struct_not_null.int_field", INT_MAX_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id equal to upper bound", shouldRead); + assertThat(shouldRead).as("Should read: id equal to upper bound").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("id", INT_MAX_VALUE + 1)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id above upper bound", shouldRead); + assertThat(shouldRead).as("Should read: id above upper bound").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, notEqual("struct_not_null.int_field", INT_MAX_VALUE + 6)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id above upper bound", shouldRead); + assertThat(shouldRead).as("Should read: id above upper bound").isTrue(); } @Test @@ -1020,7 +1032,7 @@ public void testCaseInsensitive() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("no_Nulls", ""), false) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should skip: contains only ''", shouldRead); + assertThat(shouldRead).as("Should skip: contains only ''").isFalse(); } @Test @@ -1040,43 +1052,43 @@ public void testIntegerIn() { new ParquetDictionaryRowGroupFilter( SCHEMA, in("id", INT_MIN_VALUE - 25, INT_MIN_VALUE - 24)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse( - "Should not read: id below lower bound (5 < 30, 6 < 30). The two sets are disjoint.", - shouldRead); + assertThat(shouldRead) + .as("Should not read: id below lower bound (5 < 30, 6 < 30). The two sets are disjoint.") + .isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, in("id", INT_MIN_VALUE - 2, INT_MIN_VALUE - 1)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse( - "Should not read: id below lower bound (28 < 30, 29 < 30). The two sets are disjoint.", - shouldRead); + assertThat(shouldRead) + .as("Should not read: id below lower bound (28 < 30, 29 < 30). The two sets are disjoint.") + .isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, in("id", INT_MIN_VALUE - 1, INT_MIN_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id equal to lower bound (30 == 30)", shouldRead); + assertThat(shouldRead).as("Should read: id equal to lower bound (30 == 30)").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, in("id", INT_MAX_VALUE - 4, INT_MAX_VALUE - 3)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: in set is a subset of the dictionary", shouldRead); + assertThat(shouldRead).as("Should read: in set is a subset of the dictionary").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, in("id", INT_MAX_VALUE, INT_MAX_VALUE + 1)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id equal to upper bound (79 == 79)", shouldRead); + assertThat(shouldRead).as("Should read: id equal to upper bound (79 == 79)").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, in("id", INT_MAX_VALUE + 1, INT_MAX_VALUE + 2)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: id above upper bound (80 > 79, 81 > 79)", shouldRead); + assertThat(shouldRead).as("Should not read: id above upper bound (80 > 79, 81 > 79)").isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, in("id", INT_MAX_VALUE + 6, INT_MAX_VALUE + 7)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse( - "Should not read: id above upper bound (85 > 79, 86 > 79). The two sets are disjoint.", - shouldRead); + assertThat(shouldRead) + .as("Should not read: id above upper bound (85 > 79, 86 > 79). The two sets are disjoint.") + .isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter( @@ -1087,7 +1099,7 @@ SCHEMA, in("id", INT_MIN_VALUE - 25, INT_MIN_VALUE - 24)) .boxed() .collect(Collectors.toList()))) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: the dictionary is a subset of the in set", shouldRead); + assertThat(shouldRead).as("Should read: the dictionary is a subset of the in set").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter( @@ -1098,34 +1110,40 @@ SCHEMA, in("id", INT_MIN_VALUE - 25, INT_MIN_VALUE - 24)) .boxed() .collect(Collectors.toList()))) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: the dictionary is equal to the in set", shouldRead); + assertThat(shouldRead).as("Should read: the dictionary is equal to the in set").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, in("all_nulls", 1, 2)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: in on all nulls column (isFallback to be true) ", shouldRead); + assertThat(shouldRead) + .as("Should read: in on all nulls column (isFallback to be true) ") + .isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, in("some_nulls", "aaa", "some")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: in on some nulls column", shouldRead); + assertThat(shouldRead).as("Should read: in on some nulls column").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, in("some_nulls", "aaa", "bbb")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: some_nulls values are not within the set", shouldRead); + assertThat(shouldRead) + .as("Should not read: some_nulls values are not within the set") + .isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, in("no_nulls", "aaa", "bbb")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse( - "Should not read: in on no nulls column (empty string is not within the set)", shouldRead); + assertThat(shouldRead) + .as("Should not read: in on no nulls column (empty string is not within the set)") + .isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, in("no_nulls", "aaa", "")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue( - "Should read: in on no nulls column (empty string is within the set)", shouldRead); + assertThat(shouldRead) + .as("Should read: in on no nulls column (empty string is within the set)") + .isTrue(); } @Test @@ -1134,49 +1152,49 @@ public void testIntegerNotIn() { new ParquetDictionaryRowGroupFilter( SCHEMA, notIn("id", INT_MIN_VALUE - 25, INT_MIN_VALUE - 24)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue( - "Should read: id below lower bound (5 < 30, 6 < 30). The two sets are disjoint.", - shouldRead); + assertThat(shouldRead) + .as("Should read: id below lower bound (5 < 30, 6 < 30). The two sets are disjoint.") + .isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, notIn("id", INT_MIN_VALUE - 2, INT_MIN_VALUE - 1)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue( - "Should read: id below lower bound (28 < 30, 29 < 30). The two sets are disjoint.", - shouldRead); + assertThat(shouldRead) + .as("Should read: id below lower bound (28 < 30, 29 < 30). The two sets are disjoint.") + .isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notIn("id", INT_MIN_VALUE - 1, INT_MIN_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id equal to lower bound (30 == 30)", shouldRead); + assertThat(shouldRead).as("Should read: id equal to lower bound (30 == 30)").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, notIn("id", INT_MAX_VALUE - 4, INT_MAX_VALUE - 3)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: the notIn set is a subset of the dictionary", shouldRead); + assertThat(shouldRead).as("Should read: the notIn set is a subset of the dictionary").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notIn("id", INT_MAX_VALUE, INT_MAX_VALUE + 1)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: id equal to upper bound (79 == 79)", shouldRead); + assertThat(shouldRead).as("Should read: id equal to upper bound (79 == 79)").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, notIn("id", INT_MAX_VALUE + 1, INT_MAX_VALUE + 2)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue( - "Should read: id above upper bound (80 > 79, 81 > 79). The two sets are disjoint.", - shouldRead); + assertThat(shouldRead) + .as("Should read: id above upper bound (80 > 79, 81 > 79). The two sets are disjoint.") + .isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter( SCHEMA, notIn("id", INT_MAX_VALUE + 6, INT_MAX_VALUE + 7)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue( - "Should read: id above upper bound (85 > 79, 86 > 79). The two sets are disjoint.", - shouldRead); + assertThat(shouldRead) + .as("Should read: id above upper bound (85 > 79, 86 > 79). The two sets are disjoint.") + .isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter( @@ -1187,7 +1205,9 @@ SCHEMA, notIn("id", INT_MAX_VALUE + 6, INT_MAX_VALUE + 7)) .boxed() .collect(Collectors.toList()))) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: the dictionary is a subset of the notIn set", shouldRead); + assertThat(shouldRead) + .as("Should not read: the dictionary is a subset of the notIn set") + .isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter( @@ -1198,30 +1218,35 @@ SCHEMA, notIn("id", INT_MAX_VALUE + 6, INT_MAX_VALUE + 7)) .boxed() .collect(Collectors.toList()))) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse("Should not read: the dictionary is equal to the notIn set", shouldRead); + assertThat(shouldRead) + .as("Should not read: the dictionary is equal to the notIn set") + .isFalse(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notIn("all_nulls", 1, 2)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should read: notIn on all nulls column", shouldRead); + assertThat(shouldRead).as("Should read: notIn on all nulls column").isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notIn("some_nulls", "aaa", "bbb")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue( - "Should read: notIn on some nulls column (any null matches the notIn)", shouldRead); + assertThat(shouldRead) + .as("Should read: notIn on some nulls column (any null matches the notIn)") + .isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notIn("no_nulls", "aaa", "bbb")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue( - "Should read: notIn on no nulls column (empty string is not within the set)", shouldRead); + assertThat(shouldRead) + .as("Should read: notIn on no nulls column (empty string is not within the set)") + .isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notIn("no_nulls", "aaa", "")) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse( - "Should not read: notIn on no nulls column (empty string is within the set)", shouldRead); + assertThat(shouldRead) + .as("Should not read: notIn on no nulls column (empty string is within the set)") + .isFalse(); } @Test @@ -1230,7 +1255,7 @@ public void testTypePromotion() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(promotedSchema, equal("id", INT_MIN_VALUE + 1), true) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue("Should succeed with promoted schema", shouldRead); + assertThat(shouldRead).as("Should succeed with promoted schema").isTrue(); } @Test @@ -1249,14 +1274,16 @@ public void testFixedLenByteArray() { new ParquetDictionaryRowGroupFilter( SCHEMA, greaterThanOrEqual("decimal_fixed", BigDecimal.ZERO)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertTrue( - "Should read: Half of the decimal_fixed values are greater than 0", shouldRead); + assertThat(shouldRead) + .as("Should read: Half of the decimal_fixed values are greater than 0") + .isTrue(); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThan("decimal_fixed", DECIMAL_MIN_VALUE)) .shouldRead(parquetSchema, rowGroupMetadata, dictionaryStore); - Assert.assertFalse( - "Should not read: No decimal_fixed values less than -1234567890.0987654321", shouldRead); + assertThat(shouldRead) + .as("Should not read: No decimal_fixed values less than -1234567890.0987654321") + .isFalse(); } private ColumnChunkMetaData getColumnForName(BlockMetaData rowGroup, String columnName) { diff --git a/parquet/src/test/java/org/apache/iceberg/parquet/TestParquet.java b/parquet/src/test/java/org/apache/iceberg/parquet/TestParquet.java index 1762802fd527..b21e234a5d3a 100644 --- a/parquet/src/test/java/org/apache/iceberg/parquet/TestParquet.java +++ b/parquet/src/test/java/org/apache/iceberg/parquet/TestParquet.java @@ -25,17 +25,18 @@ import static org.apache.iceberg.parquet.ParquetWritingTestUtils.createTempFile; import static org.apache.iceberg.parquet.ParquetWritingTestUtils.write; import static org.apache.iceberg.types.Types.NestedField.optional; +import static org.assertj.core.api.Assertions.assertThat; import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.file.Path; import java.util.Collections; import java.util.List; import java.util.function.Function; import org.apache.avro.generic.GenericData; import org.apache.avro.generic.GenericRecord; import org.apache.avro.generic.GenericRecordBuilder; -import org.apache.hadoop.fs.Path; import org.apache.iceberg.Files; import org.apache.iceberg.Schema; import org.apache.iceberg.avro.AvroSchemaUtil; @@ -49,14 +50,12 @@ import org.apache.parquet.hadoop.ParquetFileReader; import org.apache.parquet.hadoop.ParquetWriter; import org.apache.parquet.schema.MessageType; -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; public class TestParquet { - @Rule public TemporaryFolder temp = new TemporaryFolder(); + @TempDir private Path temp; @Test public void testRowGroupSizeConfigurable() throws IOException { @@ -68,7 +67,7 @@ public void testRowGroupSizeConfigurable() throws IOException { try (ParquetFileReader reader = ParquetFileReader.open(ParquetIO.file(localInput(parquetFile)))) { - Assert.assertEquals(2, reader.getRowGroups().size()); + assertThat(reader.getRowGroups()).hasSize(2); } } @@ -83,7 +82,7 @@ public void testRowGroupSizeConfigurableWithWriter() throws IOException { try (ParquetFileReader reader = ParquetFileReader.open(ParquetIO.file(localInput(parquetFile)))) { - Assert.assertEquals(2, reader.getRowGroups().size()); + assertThat(reader.getRowGroups()).hasSize(2); } } @@ -116,7 +115,7 @@ public void testNumberOfBytesWritten() throws IOException { records.toArray(new GenericData.Record[] {})); long expectedSize = ParquetIO.file(localInput(file)).getLength(); - Assert.assertEquals(expectedSize, actualSize); + assertThat(actualSize).isEqualTo(expectedSize); } @Test @@ -127,11 +126,11 @@ public void testTwoLevelList() throws IOException { optional(2, "topbytes", Types.BinaryType.get())); org.apache.avro.Schema avroSchema = AvroSchemaUtil.convert(schema.asStruct()); - File testFile = temp.newFile(); - Assert.assertTrue(testFile.delete()); + File testFile = temp.toFile(); + assertThat(testFile.delete()).isTrue(); ParquetWriter writer = - AvroParquetWriter.builder(new Path(testFile.toURI())) + AvroParquetWriter.builder(new org.apache.hadoop.fs.Path(testFile.toURI())) .withDataModel(GenericData.get()) .withSchema(avroSchema) .config("parquet.avro.add-list-element-records", "true") @@ -154,8 +153,8 @@ public void testTwoLevelList() throws IOException { Iterables.getOnlyElement( Parquet.read(Files.localInput(testFile)).project(schema).callInit().build()); - Assert.assertEquals(expectedByteList, recordRead.get("arraybytes")); - Assert.assertEquals(expectedBinary, recordRead.get("topbytes")); + assertThat(recordRead.get("arraybytes")).isEqualTo(expectedByteList); + assertThat(recordRead.get("topbytes")).isEqualTo(expectedBinary); } private Pair generateFile( diff --git a/parquet/src/test/java/org/apache/iceberg/parquet/TestParquetDataWriter.java b/parquet/src/test/java/org/apache/iceberg/parquet/TestParquetDataWriter.java index 728488329585..d0da4589f2c9 100644 --- a/parquet/src/test/java/org/apache/iceberg/parquet/TestParquetDataWriter.java +++ b/parquet/src/test/java/org/apache/iceberg/parquet/TestParquetDataWriter.java @@ -18,8 +18,12 @@ */ package org.apache.iceberg.parquet; +import static org.apache.iceberg.parquet.ParquetWritingTestUtils.createTempFile; +import static org.assertj.core.api.Assertions.assertThat; + import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.file.Path; import java.util.List; import org.apache.iceberg.DataFile; import org.apache.iceberg.FileContent; @@ -43,11 +47,9 @@ import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; import org.apache.iceberg.relocated.com.google.common.collect.Lists; import org.apache.iceberg.types.Types; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; public class TestParquetDataWriter { private static final Schema SCHEMA = @@ -58,9 +60,9 @@ public class TestParquetDataWriter { private List records; - @Rule public TemporaryFolder temp = new TemporaryFolder(); + @TempDir private Path temp; - @Before + @BeforeEach public void createRecords() { GenericRecord record = GenericRecord.create(SCHEMA); @@ -76,7 +78,7 @@ public void createRecords() { @Test public void testDataWriter() throws IOException { - OutputFile file = Files.localOutput(temp.newFile()); + OutputFile file = Files.localOutput(createTempFile(temp)); SortOrder sortOrder = SortOrder.builderFor(SCHEMA).withOrderId(10).asc("id").build(); @@ -99,13 +101,14 @@ public void testDataWriter() throws IOException { DataFile dataFile = dataWriter.toDataFile(); - Assert.assertEquals("Format should be Parquet", FileFormat.PARQUET, dataFile.format()); - Assert.assertEquals("Should be data file", FileContent.DATA, dataFile.content()); - Assert.assertEquals("Record count should match", records.size(), dataFile.recordCount()); - Assert.assertEquals("Partition should be empty", 0, dataFile.partition().size()); - Assert.assertEquals( - "Sort order should match", sortOrder.orderId(), (int) dataFile.sortOrderId()); - Assert.assertNull("Key metadata should be null", dataFile.keyMetadata()); + assertThat(dataFile.format()).as("Format should be Parquet").isEqualTo(FileFormat.PARQUET); + assertThat(dataFile.content()).as("Should be data file").isEqualTo(FileContent.DATA); + assertThat(dataFile.recordCount()).as("Record count should match").isEqualTo(records.size()); + assertThat(dataFile.partition().size()).as("Partition should be empty").isEqualTo(0); + assertThat((int) dataFile.sortOrderId()) + .as("Sort order should match") + .isEqualTo(sortOrder.orderId()); + assertThat(dataFile.keyMetadata()).as("Key metadata should be null").isNull(); List writtenRecords; try (CloseableIterable reader = @@ -116,17 +119,17 @@ public void testDataWriter() throws IOException { writtenRecords = Lists.newArrayList(reader); } - Assert.assertEquals("Written records should match", records, writtenRecords); + assertThat(writtenRecords).as("Written records should match").isEqualTo(records); } @SuppressWarnings("checkstyle:AvoidEscapedUnicodeCharacters") @Test public void testInvalidUpperBoundString() throws Exception { - OutputFile file = Files.localOutput(temp.newFile()); + OutputFile file = Files.localOutput(createTempFile(temp)); Table testTable = TestTables.create( - temp.newFile(), + createTempFile(temp), "test_invalid_string_bound", SCHEMA, PartitionSpec.unpartitioned(), @@ -167,12 +170,13 @@ public void testInvalidUpperBoundString() throws Exception { DataFile dataFile = dataWriter.toDataFile(); - Assert.assertEquals("Format should be Parquet", FileFormat.PARQUET, dataFile.format()); - Assert.assertEquals("Should be data file", FileContent.DATA, dataFile.content()); - Assert.assertEquals( - "Record count should match", overflowRecords.size(), dataFile.recordCount()); - Assert.assertEquals("Partition should be empty", 0, dataFile.partition().size()); - Assert.assertNull("Key metadata should be null", dataFile.keyMetadata()); + assertThat(dataFile.format()).as("Format should be Parquet").isEqualTo(FileFormat.PARQUET); + assertThat(dataFile.content()).as("Should be data file").isEqualTo(FileContent.DATA); + assertThat(dataFile.recordCount()) + .as("Record count should match") + .isEqualTo(overflowRecords.size()); + assertThat(dataFile.partition().size()).as("Partition should be empty").isEqualTo(0); + assertThat(dataFile.keyMetadata()).as("Key metadata should be null").isNull(); List writtenRecords; try (CloseableIterable reader = @@ -183,22 +187,22 @@ public void testInvalidUpperBoundString() throws Exception { writtenRecords = Lists.newArrayList(reader); } - Assert.assertEquals("Written records should match", overflowRecords, writtenRecords); + assertThat(writtenRecords).as("Written records should match").isEqualTo(overflowRecords); - Assert.assertTrue("Should have a valid lower bound", dataFile.lowerBounds().containsKey(1)); - Assert.assertTrue("Should have a valid upper bound", dataFile.upperBounds().containsKey(1)); - Assert.assertTrue("Should have a valid lower bound", dataFile.lowerBounds().containsKey(2)); - Assert.assertFalse("Should have a null upper bound", dataFile.upperBounds().containsKey(2)); + assertThat(dataFile.lowerBounds()).as("Should have a valid lower bound").containsKey(1); + assertThat(dataFile.upperBounds()).as("Should have a valid upper bound").containsKey(1); + assertThat(dataFile.lowerBounds()).as("Should have a valid lower bound").containsKey(2); + assertThat(dataFile.upperBounds()).as("Should have a null upper bound").doesNotContainKey(2); } @SuppressWarnings("checkstyle:AvoidEscapedUnicodeCharacters") @Test public void testInvalidUpperBoundBinary() throws Exception { - OutputFile file = Files.localOutput(temp.newFile()); + OutputFile file = Files.localOutput(createTempFile(temp)); Table testTable = TestTables.create( - temp.newFile(), + createTempFile(temp), "test_invalid_binary_bound", SCHEMA, PartitionSpec.unpartitioned(), @@ -238,12 +242,13 @@ public void testInvalidUpperBoundBinary() throws Exception { DataFile dataFile = dataWriter.toDataFile(); - Assert.assertEquals("Format should be Parquet", FileFormat.PARQUET, dataFile.format()); - Assert.assertEquals("Should be data file", FileContent.DATA, dataFile.content()); - Assert.assertEquals( - "Record count should match", overflowRecords.size(), dataFile.recordCount()); - Assert.assertEquals("Partition should be empty", 0, dataFile.partition().size()); - Assert.assertNull("Key metadata should be null", dataFile.keyMetadata()); + assertThat(dataFile.format()).as("Format should be Parquet").isEqualTo(FileFormat.PARQUET); + assertThat(dataFile.content()).as("Should be data file").isEqualTo(FileContent.DATA); + assertThat(dataFile.recordCount()) + .as("Record count should match") + .isEqualTo(overflowRecords.size()); + assertThat(dataFile.partition().size()).as("Partition should be empty").isEqualTo(0); + assertThat(dataFile.keyMetadata()).as("Key metadata should be null").isNull(); List writtenRecords; try (CloseableIterable reader = @@ -254,11 +259,11 @@ public void testInvalidUpperBoundBinary() throws Exception { writtenRecords = Lists.newArrayList(reader); } - Assert.assertEquals("Written records should match", overflowRecords, writtenRecords); + assertThat(writtenRecords).as("Written records should match").isEqualTo(overflowRecords); - Assert.assertTrue("Should have a valid lower bound", dataFile.lowerBounds().containsKey(1)); - Assert.assertTrue("Should have a valid upper bound", dataFile.upperBounds().containsKey(1)); - Assert.assertTrue("Should have a valid lower bound", dataFile.lowerBounds().containsKey(3)); - Assert.assertFalse("Should have a null upper bound", dataFile.upperBounds().containsKey(3)); + assertThat(dataFile.lowerBounds()).as("Should have a valid lower bound").containsKey(1); + assertThat(dataFile.upperBounds()).as("Should have a valid upper bound").containsKey(1); + assertThat(dataFile.lowerBounds()).as("Should have a valid lower bound").containsKey(3); + assertThat(dataFile.upperBounds()).as("Should have a null upper bound").doesNotContainKey(3); } } diff --git a/parquet/src/test/java/org/apache/iceberg/parquet/TestParquetDeleteWriters.java b/parquet/src/test/java/org/apache/iceberg/parquet/TestParquetDeleteWriters.java index d70ca377be33..3f886c6dc771 100644 --- a/parquet/src/test/java/org/apache/iceberg/parquet/TestParquetDeleteWriters.java +++ b/parquet/src/test/java/org/apache/iceberg/parquet/TestParquetDeleteWriters.java @@ -18,6 +18,8 @@ */ package org.apache.iceberg.parquet; +import static org.assertj.core.api.Assertions.assertThat; + import java.io.File; import java.io.IOException; import java.util.List; @@ -42,11 +44,9 @@ import org.apache.iceberg.relocated.com.google.common.collect.Lists; import org.apache.iceberg.types.Types; import org.apache.iceberg.types.Types.NestedField; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; public class TestParquetDeleteWriters { private static final Schema SCHEMA = @@ -56,9 +56,9 @@ public class TestParquetDeleteWriters { private List records; - @Rule public TemporaryFolder temp = new TemporaryFolder(); + @TempDir private File temp; - @Before + @BeforeEach public void createDeleteRecords() { GenericRecord record = GenericRecord.create(SCHEMA); @@ -74,9 +74,7 @@ public void createDeleteRecords() { @Test public void testEqualityDeleteWriter() throws IOException { - File deleteFile = temp.newFile(); - - OutputFile out = Files.localOutput(deleteFile); + OutputFile out = Files.localOutput(temp); EqualityDeleteWriter deleteWriter = Parquet.writeDeletes(out) .createWriterFunc(GenericParquetWriter::buildWriter) @@ -91,12 +89,15 @@ public void testEqualityDeleteWriter() throws IOException { } DeleteFile metadata = deleteWriter.toDeleteFile(); - Assert.assertEquals("Format should be Parquet", FileFormat.PARQUET, metadata.format()); - Assert.assertEquals( - "Should be equality deletes", FileContent.EQUALITY_DELETES, metadata.content()); - Assert.assertEquals("Record count should be correct", records.size(), metadata.recordCount()); - Assert.assertEquals("Partition should be empty", 0, metadata.partition().size()); - Assert.assertNull("Key metadata should be null", metadata.keyMetadata()); + assertThat(metadata.format()).as("Format should be Parquet").isEqualTo(FileFormat.PARQUET); + assertThat(metadata.content()) + .as("Should be equality deletes") + .isEqualTo(FileContent.EQUALITY_DELETES); + assertThat(metadata.recordCount()) + .as("Record count should be correct") + .isEqualTo(records.size()); + assertThat(metadata.partition().size()).as("Partition should be empty").isEqualTo(0); + assertThat(metadata.keyMetadata()).as("Key metadata should be null").isNull(); List deletedRecords; try (CloseableIterable reader = @@ -107,13 +108,11 @@ public void testEqualityDeleteWriter() throws IOException { deletedRecords = Lists.newArrayList(reader); } - Assert.assertEquals("Deleted records should match expected", records, deletedRecords); + assertThat(deletedRecords).as("Deleted records should match expected").isEqualTo(records); } @Test public void testPositionDeleteWriter() throws IOException { - File deleteFile = temp.newFile(); - Schema deleteSchema = new Schema( MetadataColumns.DELETE_FILE_PATH, @@ -125,7 +124,7 @@ public void testPositionDeleteWriter() throws IOException { GenericRecord posDelete = GenericRecord.create(deleteSchema); List expectedDeleteRecords = Lists.newArrayList(); - OutputFile out = Files.localOutput(deleteFile); + OutputFile out = Files.localOutput(temp); PositionDeleteWriter deleteWriter = Parquet.writeDeletes(out) .createWriterFunc(GenericParquetWriter::buildWriter) @@ -147,12 +146,15 @@ public void testPositionDeleteWriter() throws IOException { } DeleteFile metadata = deleteWriter.toDeleteFile(); - Assert.assertEquals("Format should be Parquet", FileFormat.PARQUET, metadata.format()); - Assert.assertEquals( - "Should be position deletes", FileContent.POSITION_DELETES, metadata.content()); - Assert.assertEquals("Record count should be correct", records.size(), metadata.recordCount()); - Assert.assertEquals("Partition should be empty", 0, metadata.partition().size()); - Assert.assertNull("Key metadata should be null", metadata.keyMetadata()); + assertThat(metadata.format()).as("Format should be Parquet").isEqualTo(FileFormat.PARQUET); + assertThat(metadata.content()) + .as("Should be position deletes") + .isEqualTo(FileContent.POSITION_DELETES); + assertThat(metadata.recordCount()) + .as("Record count should be correct") + .isEqualTo(records.size()); + assertThat(metadata.partition().size()).as("Partition should be empty").isEqualTo(0); + assertThat(metadata.keyMetadata()).as("Key metadata should be null").isNull(); List deletedRecords; try (CloseableIterable reader = @@ -164,14 +166,13 @@ public void testPositionDeleteWriter() throws IOException { deletedRecords = Lists.newArrayList(reader); } - Assert.assertEquals( - "Deleted records should match expected", expectedDeleteRecords, deletedRecords); + assertThat(deletedRecords) + .as("Deleted records should match expected") + .isEqualTo(expectedDeleteRecords); } @Test public void testPositionDeleteWriterWithEmptyRow() throws IOException { - File deleteFile = temp.newFile(); - Schema deleteSchema = new Schema(MetadataColumns.DELETE_FILE_PATH, MetadataColumns.DELETE_FILE_POS); @@ -179,7 +180,7 @@ public void testPositionDeleteWriterWithEmptyRow() throws IOException { GenericRecord posDelete = GenericRecord.create(deleteSchema); List expectedDeleteRecords = Lists.newArrayList(); - OutputFile out = Files.localOutput(deleteFile); + OutputFile out = Files.localOutput(temp); PositionDeleteWriter deleteWriter = Parquet.writeDeletes(out) .createWriterFunc(GenericParquetWriter::buildWriter) @@ -202,12 +203,15 @@ public void testPositionDeleteWriterWithEmptyRow() throws IOException { } DeleteFile metadata = deleteWriter.toDeleteFile(); - Assert.assertEquals("Format should be Parquet", FileFormat.PARQUET, metadata.format()); - Assert.assertEquals( - "Should be position deletes", FileContent.POSITION_DELETES, metadata.content()); - Assert.assertEquals("Record count should be correct", records.size(), metadata.recordCount()); - Assert.assertEquals("Partition should be empty", 0, metadata.partition().size()); - Assert.assertNull("Key metadata should be null", metadata.keyMetadata()); + assertThat(metadata.format()).as("Format should be Parquet").isEqualTo(FileFormat.PARQUET); + assertThat(metadata.content()) + .as("Should be position deletes") + .isEqualTo(FileContent.POSITION_DELETES); + assertThat(metadata.recordCount()) + .as("Record count should be correct") + .isEqualTo(records.size()); + assertThat(metadata.partition().size()).as("Partition should be empty").isEqualTo(0); + assertThat(metadata.keyMetadata()).as("Key metadata should be null").isNull(); List deletedRecords; try (CloseableIterable reader = @@ -219,7 +223,8 @@ public void testPositionDeleteWriterWithEmptyRow() throws IOException { deletedRecords = Lists.newArrayList(reader); } - Assert.assertEquals( - "Deleted records should match expected", expectedDeleteRecords, deletedRecords); + assertThat(deletedRecords) + .as("Deleted records should match expected") + .isEqualTo(expectedDeleteRecords); } } diff --git a/parquet/src/test/java/org/apache/iceberg/parquet/TestParquetEncryption.java b/parquet/src/test/java/org/apache/iceberg/parquet/TestParquetEncryption.java index 32923c8424b2..c68a79c68977 100644 --- a/parquet/src/test/java/org/apache/iceberg/parquet/TestParquetEncryption.java +++ b/parquet/src/test/java/org/apache/iceberg/parquet/TestParquetEncryption.java @@ -22,11 +22,13 @@ import static org.apache.iceberg.Files.localOutput; import static org.apache.iceberg.parquet.ParquetWritingTestUtils.createTempFile; import static org.apache.iceberg.types.Types.NestedField.optional; +import static org.assertj.core.api.Assertions.assertThat; import java.io.Closeable; import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.file.Path; import java.security.SecureRandom; import java.util.List; import org.apache.avro.generic.GenericData; @@ -38,11 +40,9 @@ import org.apache.iceberg.relocated.com.google.common.collect.Lists; import org.apache.iceberg.types.Types.IntegerType; import org.apache.parquet.crypto.ParquetCryptoRuntimeException; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; public class TestParquetEncryption { @@ -53,9 +53,9 @@ public class TestParquetEncryption { private static File file; private static final Schema schema = new Schema(optional(1, columnName, IntegerType.get())); - @Rule public TemporaryFolder temp = new TemporaryFolder(); + @TempDir private Path temp; - @Before + @BeforeEach public void writeEncryptedFile() throws IOException { List records = Lists.newArrayListWithCapacity(recordCount); org.apache.avro.Schema avroSchema = AvroSchemaUtil.convert(schema.asStruct()); @@ -120,7 +120,7 @@ public void testReadEncryptedFile() throws IOException { .iterator()) { for (int i = 1; i <= recordCount; i++) { GenericData.Record readRecord = (GenericData.Record) readRecords.next(); - Assert.assertEquals(i, readRecord.get(columnName)); + assertThat(readRecord.get(columnName)).isEqualTo(i); } } } diff --git a/parquet/src/test/java/org/apache/iceberg/parquet/TestParquetSchemaUtil.java b/parquet/src/test/java/org/apache/iceberg/parquet/TestParquetSchemaUtil.java index c669cf02a2be..84fbf2a7d989 100644 --- a/parquet/src/test/java/org/apache/iceberg/parquet/TestParquetSchemaUtil.java +++ b/parquet/src/test/java/org/apache/iceberg/parquet/TestParquetSchemaUtil.java @@ -20,6 +20,7 @@ import static org.apache.iceberg.types.Types.NestedField.optional; import static org.apache.iceberg.types.Types.NestedField.required; +import static org.assertj.core.api.Assertions.assertThat; import java.util.concurrent.atomic.AtomicInteger; import org.apache.iceberg.Schema; @@ -38,8 +39,7 @@ import org.apache.parquet.schema.Types.ListBuilder; import org.apache.parquet.schema.Types.MapBuilder; import org.apache.parquet.schema.Types.PrimitiveBuilder; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; public class TestParquetSchemaUtil { private static final Types.StructType SUPPORTED_PRIMITIVES = @@ -124,7 +124,7 @@ public void testAssignIdsByNameMapping() { MessageType messageTypeWithIdsFromNameMapping = ParquetSchemaUtil.applyNameMapping(RemoveIds.removeIds(messageTypeWithIds), nameMapping); - Assert.assertEquals(messageTypeWithIds, messageTypeWithIdsFromNameMapping); + assertThat(messageTypeWithIdsFromNameMapping).isEqualTo(messageTypeWithIds); } @Test @@ -259,7 +259,9 @@ public void testSchemaConversionWithoutAssigningIds() { 28, 29, Types.IntegerType.get(), Types.IntegerType.get()))); Schema actualSchema = ParquetSchemaUtil.convertAndPrune(messageType); - Assert.assertEquals("Schema must match", expectedSchema.asStruct(), actualSchema.asStruct()); + assertThat(actualSchema.asStruct()) + .as("Schema must match") + .isEqualTo(expectedSchema.asStruct()); } @Test @@ -286,7 +288,9 @@ public void testSchemaConversionForHiveStyleLists() { NameMapping nameMapping = MappingUtil.create(expectedSchema); MessageType messageTypeWithIds = ParquetSchemaUtil.applyNameMapping(messageType, nameMapping); Schema actualSchema = ParquetSchemaUtil.convertAndPrune(messageTypeWithIds); - Assert.assertEquals("Schema must match", expectedSchema.asStruct(), actualSchema.asStruct()); + assertThat(actualSchema.asStruct()) + .as("Schema must match") + .isEqualTo(expectedSchema.asStruct()); } @Test @@ -304,7 +308,9 @@ public void testLegacyTwoLevelListTypeWithPrimitiveElement() { optional(1, "arraybytes", Types.ListType.ofRequired(1000, Types.BinaryType.get()))); Schema actualSchema = ParquetSchemaUtil.convert(messageType); - Assert.assertEquals("Schema must match", expectedSchema.asStruct(), actualSchema.asStruct()); + assertThat(actualSchema.asStruct()) + .as("Schema must match") + .isEqualTo(expectedSchema.asStruct()); } @Test @@ -338,7 +344,9 @@ public void testLegacyTwoLevelListTypeWithGroupTypeElementWithTwoFields() { optional(1001, "f001", Types.LongType.get()))))))); Schema actualSchema = ParquetSchemaUtil.convert(parquetScehma); - Assert.assertEquals("Schema must match", expectedSchema.asStruct(), actualSchema.asStruct()); + assertThat(actualSchema.asStruct()) + .as("Schema must match") + .isEqualTo(expectedSchema.asStruct()); } @Test @@ -362,7 +370,9 @@ public void testLegacyTwoLevelListGenByParquetAvro() { 1001, Types.StructType.of(required(1000, "str", Types.StringType.get()))))); Schema actualSchema = ParquetSchemaUtil.convert(parquetScehma); - Assert.assertEquals("Schema must match", expectedSchema.asStruct(), actualSchema.asStruct()); + assertThat(actualSchema.asStruct()) + .as("Schema must match") + .isEqualTo(expectedSchema.asStruct()); } @Test @@ -386,7 +396,9 @@ public void testLegacyTwoLevelListGenByParquetThrift() { 1001, Types.StructType.of(required(1000, "str", Types.StringType.get()))))); Schema actualSchema = ParquetSchemaUtil.convert(parquetScehma); - Assert.assertEquals("Schema must match", expectedSchema.asStruct(), actualSchema.asStruct()); + assertThat(actualSchema.asStruct()) + .as("Schema must match") + .isEqualTo(expectedSchema.asStruct()); } @Test @@ -410,7 +422,9 @@ public void testLegacyTwoLevelListGenByParquetThrift1() { 1001, Types.ListType.ofRequired(1000, Types.IntegerType.get())))); Schema actualSchema = ParquetSchemaUtil.convert(parquetScehma); - Assert.assertEquals("Schema must match", expectedSchema.asStruct(), actualSchema.asStruct()); + assertThat(actualSchema.asStruct()) + .as("Schema must match") + .isEqualTo(expectedSchema.asStruct()); } private Type primitive( diff --git a/parquet/src/test/java/org/apache/iceberg/parquet/TestPruneColumns.java b/parquet/src/test/java/org/apache/iceberg/parquet/TestPruneColumns.java index 9e33ffa314ed..70345adf1b8b 100644 --- a/parquet/src/test/java/org/apache/iceberg/parquet/TestPruneColumns.java +++ b/parquet/src/test/java/org/apache/iceberg/parquet/TestPruneColumns.java @@ -18,6 +18,8 @@ */ package org.apache.iceberg.parquet; +import static org.assertj.core.api.Assertions.assertThat; + import org.apache.iceberg.Schema; import org.apache.iceberg.types.Types.DoubleType; import org.apache.iceberg.types.Types.ListType; @@ -30,8 +32,7 @@ import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName; import org.apache.parquet.schema.Type; import org.apache.parquet.schema.Types; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; public class TestPruneColumns { @Test @@ -118,7 +119,7 @@ public void testMapKeyValueName() { .named("table"); MessageType actual = ParquetSchemaUtil.pruneColumns(fileSchema, projection); - Assert.assertEquals("Pruned schema should not rename repeated struct", expected, actual); + assertThat(actual).as("Pruned schema should not rename repeated struct").isEqualTo(expected); } @Test @@ -193,7 +194,7 @@ public void testListElementName() { .named("table"); MessageType actual = ParquetSchemaUtil.pruneColumns(fileSchema, projection); - Assert.assertEquals("Pruned schema should not rename repeated struct", expected, actual); + assertThat(actual).as("Pruned schema should not rename repeated struct").isEqualTo(expected); } @Test @@ -267,6 +268,6 @@ public void testStructElementName() { .named("table"); MessageType actual = ParquetSchemaUtil.pruneColumns(fileSchema, projection); - Assert.assertEquals("Pruned schema should be matched", expected, actual); + assertThat(actual).as("Pruned schema should be matched").isEqualTo(expected); } }