diff --git a/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/actions/Actions.java b/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/actions/Actions.java index 06ac54617ae6..b96b47c5a785 100644 --- a/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/actions/Actions.java +++ b/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/actions/Actions.java @@ -30,8 +30,8 @@ public class Actions { // disable classloader check as Avro may cache class/object in the serializers. .set(CoreOptions.CHECK_LEAKED_CLASSLOADER, false); - private StreamExecutionEnvironment env; - private Table table; + private final StreamExecutionEnvironment env; + private final Table table; private Actions(StreamExecutionEnvironment env, Table table) { this.env = env; diff --git a/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/actions/RewriteDataFilesAction.java b/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/actions/RewriteDataFilesAction.java index 9876bb3861c4..670abebcb58a 100644 --- a/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/actions/RewriteDataFilesAction.java +++ b/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/actions/RewriteDataFilesAction.java @@ -31,7 +31,7 @@ public class RewriteDataFilesAction extends BaseRewriteDataFilesAction { - private StreamExecutionEnvironment env; + private final StreamExecutionEnvironment env; private int maxParallelism; public RewriteDataFilesAction(StreamExecutionEnvironment env, Table table) { diff --git a/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/sink/AvroGenericRecordToRowDataMapper.java b/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/sink/AvroGenericRecordToRowDataMapper.java index 04e168385a36..f7e8e0c884cf 100644 --- a/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/sink/AvroGenericRecordToRowDataMapper.java +++ b/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/sink/AvroGenericRecordToRowDataMapper.java @@ -55,7 +55,7 @@ public RowData map(GenericRecord genericRecord) throws Exception { public static AvroGenericRecordToRowDataMapper forAvroSchema(Schema avroSchema) { DataType dataType = AvroSchemaConverter.convertToDataType(avroSchema.toString()); LogicalType logicalType = TypeConversions.fromDataToLogicalType(dataType); - RowType rowType = RowType.of(logicalType.getChildren().stream().toArray(LogicalType[]::new)); + RowType rowType = RowType.of(logicalType.getChildren().toArray(new LogicalType[0])); return new AvroGenericRecordToRowDataMapper(rowType); } } diff --git a/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/sink/shuffle/MapDataStatistics.java b/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/sink/shuffle/MapDataStatistics.java index 0ffffd9cf49f..0b63e2721178 100644 --- a/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/sink/shuffle/MapDataStatistics.java +++ b/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/sink/shuffle/MapDataStatistics.java @@ -39,7 +39,7 @@ class MapDataStatistics implements DataStatistics pendingSplits; - private int[] enumerationSplitCountHistory; + private final int[] enumerationSplitCountHistory; public IcebergEnumeratorState(Collection pendingSplits) { this(null, pendingSplits); diff --git a/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java b/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java index 95d6db2cfbc4..f76f8a69ff0e 100644 --- a/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java +++ b/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java @@ -174,10 +174,8 @@ private static Collection deserializePendingSplits( private static void serializeEnumerationSplitCountHistory( DataOutputSerializer out, int[] enumerationSplitCountHistory) throws IOException { out.writeInt(enumerationSplitCountHistory.length); - if (enumerationSplitCountHistory.length > 0) { - for (int enumerationSplitCount : enumerationSplitCountHistory) { - out.writeInt(enumerationSplitCount); - } + for (int enumerationSplitCount : enumerationSplitCountHistory) { + out.writeInt(enumerationSplitCount); } } diff --git a/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/source/split/SerializerHelper.java b/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/source/split/SerializerHelper.java index a0395f29ac5b..841969666ee5 100644 --- a/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/source/split/SerializerHelper.java +++ b/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/source/split/SerializerHelper.java @@ -129,7 +129,7 @@ public static String readLongUTF(DataInputDeserializer in) throws IOException { if (count > utflen) { throw new UTFDataFormatException("malformed input: partial character at end"); } - char2 = (int) bytearr[count - 1]; + char2 = bytearr[count - 1]; if ((char2 & 0xC0) != 0x80) { throw new UTFDataFormatException("malformed input around byte " + count); } @@ -141,8 +141,8 @@ public static String readLongUTF(DataInputDeserializer in) throws IOException { if (count > utflen) { throw new UTFDataFormatException("malformed input: partial character at end"); } - char2 = (int) bytearr[count - 2]; - char3 = (int) bytearr[count - 1]; + char2 = bytearr[count - 2]; + char3 = bytearr[count - 1]; if (((char2 & 0xC0) != 0x80) || ((char3 & 0xC0) != 0x80)) { throw new UTFDataFormatException("malformed input around byte " + (count - 1)); } diff --git a/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/util/FlinkPackage.java b/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/util/FlinkPackage.java index 353cee56bebb..20b33e615e5f 100644 --- a/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/util/FlinkPackage.java +++ b/flink/v1.17/flink/src/main/java/org/apache/iceberg/flink/util/FlinkPackage.java @@ -32,7 +32,7 @@ private FlinkPackage() {} /** Returns Flink version string like x.y.z */ public static String version() { if (null == VERSION.get()) { - String detectedVersion = null; + String detectedVersion; try { detectedVersion = versionFromJar(); // use unknown version in case exact implementation version can't be found from the jar diff --git a/flink/v1.17/flink/src/test/java/org/apache/iceberg/flink/TestFlinkCatalogTablePartitions.java b/flink/v1.17/flink/src/test/java/org/apache/iceberg/flink/TestFlinkCatalogTablePartitions.java index 0bb08e4f265a..e395414e925d 100644 --- a/flink/v1.17/flink/src/test/java/org/apache/iceberg/flink/TestFlinkCatalogTablePartitions.java +++ b/flink/v1.17/flink/src/test/java/org/apache/iceberg/flink/TestFlinkCatalogTablePartitions.java @@ -39,7 +39,7 @@ public class TestFlinkCatalogTablePartitions extends CatalogTestBase { - private String tableName = "test_table"; + private final String tableName = "test_table"; @Parameter(index = 2) private FileFormat format; diff --git a/flink/v1.17/flink/src/test/java/org/apache/iceberg/flink/TestTableLoader.java b/flink/v1.17/flink/src/test/java/org/apache/iceberg/flink/TestTableLoader.java index 4ad302dde436..a7c58e551112 100644 --- a/flink/v1.17/flink/src/test/java/org/apache/iceberg/flink/TestTableLoader.java +++ b/flink/v1.17/flink/src/test/java/org/apache/iceberg/flink/TestTableLoader.java @@ -23,7 +23,7 @@ import org.apache.iceberg.TestTables; public class TestTableLoader implements TableLoader { - private File dir; + private final File dir; public static TableLoader of(String dir) { return new TestTableLoader(dir); diff --git a/flink/v1.17/flink/src/test/java/org/apache/iceberg/flink/source/reader/TestColumnStatsWatermarkExtractor.java b/flink/v1.17/flink/src/test/java/org/apache/iceberg/flink/source/reader/TestColumnStatsWatermarkExtractor.java index 7033fd30e84f..187f2b0b9e31 100644 --- a/flink/v1.17/flink/src/test/java/org/apache/iceberg/flink/source/reader/TestColumnStatsWatermarkExtractor.java +++ b/flink/v1.17/flink/src/test/java/org/apache/iceberg/flink/source/reader/TestColumnStatsWatermarkExtractor.java @@ -122,7 +122,7 @@ public void testTimeUnit() throws IOException { new ColumnStatsWatermarkExtractor(SCHEMA, columnName, TimeUnit.MICROSECONDS); assertThat(extractor.extractWatermark(split(0))) - .isEqualTo(MIN_VALUES.get(0).get(columnName).longValue() / 1000L); + .isEqualTo(MIN_VALUES.get(0).get(columnName) / 1000L); } @TestTemplate diff --git a/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/actions/Actions.java b/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/actions/Actions.java index 06ac54617ae6..b96b47c5a785 100644 --- a/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/actions/Actions.java +++ b/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/actions/Actions.java @@ -30,8 +30,8 @@ public class Actions { // disable classloader check as Avro may cache class/object in the serializers. .set(CoreOptions.CHECK_LEAKED_CLASSLOADER, false); - private StreamExecutionEnvironment env; - private Table table; + private final StreamExecutionEnvironment env; + private final Table table; private Actions(StreamExecutionEnvironment env, Table table) { this.env = env; diff --git a/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/actions/RewriteDataFilesAction.java b/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/actions/RewriteDataFilesAction.java index 9876bb3861c4..670abebcb58a 100644 --- a/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/actions/RewriteDataFilesAction.java +++ b/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/actions/RewriteDataFilesAction.java @@ -31,7 +31,7 @@ public class RewriteDataFilesAction extends BaseRewriteDataFilesAction { - private StreamExecutionEnvironment env; + private final StreamExecutionEnvironment env; private int maxParallelism; public RewriteDataFilesAction(StreamExecutionEnvironment env, Table table) { diff --git a/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/sink/AvroGenericRecordToRowDataMapper.java b/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/sink/AvroGenericRecordToRowDataMapper.java index 04e168385a36..f7e8e0c884cf 100644 --- a/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/sink/AvroGenericRecordToRowDataMapper.java +++ b/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/sink/AvroGenericRecordToRowDataMapper.java @@ -55,7 +55,7 @@ public RowData map(GenericRecord genericRecord) throws Exception { public static AvroGenericRecordToRowDataMapper forAvroSchema(Schema avroSchema) { DataType dataType = AvroSchemaConverter.convertToDataType(avroSchema.toString()); LogicalType logicalType = TypeConversions.fromDataToLogicalType(dataType); - RowType rowType = RowType.of(logicalType.getChildren().stream().toArray(LogicalType[]::new)); + RowType rowType = RowType.of(logicalType.getChildren().toArray(new LogicalType[0])); return new AvroGenericRecordToRowDataMapper(rowType); } } diff --git a/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/sink/shuffle/MapDataStatistics.java b/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/sink/shuffle/MapDataStatistics.java index 0ffffd9cf49f..0b63e2721178 100644 --- a/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/sink/shuffle/MapDataStatistics.java +++ b/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/sink/shuffle/MapDataStatistics.java @@ -39,7 +39,7 @@ class MapDataStatistics implements DataStatistics pendingSplits; - private int[] enumerationSplitCountHistory; + private final int[] enumerationSplitCountHistory; public IcebergEnumeratorState(Collection pendingSplits) { this(null, pendingSplits); diff --git a/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java b/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java index 95d6db2cfbc4..f76f8a69ff0e 100644 --- a/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java +++ b/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java @@ -174,10 +174,8 @@ private static Collection deserializePendingSplits( private static void serializeEnumerationSplitCountHistory( DataOutputSerializer out, int[] enumerationSplitCountHistory) throws IOException { out.writeInt(enumerationSplitCountHistory.length); - if (enumerationSplitCountHistory.length > 0) { - for (int enumerationSplitCount : enumerationSplitCountHistory) { - out.writeInt(enumerationSplitCount); - } + for (int enumerationSplitCount : enumerationSplitCountHistory) { + out.writeInt(enumerationSplitCount); } } diff --git a/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/source/split/SerializerHelper.java b/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/source/split/SerializerHelper.java index a0395f29ac5b..841969666ee5 100644 --- a/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/source/split/SerializerHelper.java +++ b/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/source/split/SerializerHelper.java @@ -129,7 +129,7 @@ public static String readLongUTF(DataInputDeserializer in) throws IOException { if (count > utflen) { throw new UTFDataFormatException("malformed input: partial character at end"); } - char2 = (int) bytearr[count - 1]; + char2 = bytearr[count - 1]; if ((char2 & 0xC0) != 0x80) { throw new UTFDataFormatException("malformed input around byte " + count); } @@ -141,8 +141,8 @@ public static String readLongUTF(DataInputDeserializer in) throws IOException { if (count > utflen) { throw new UTFDataFormatException("malformed input: partial character at end"); } - char2 = (int) bytearr[count - 2]; - char3 = (int) bytearr[count - 1]; + char2 = bytearr[count - 2]; + char3 = bytearr[count - 1]; if (((char2 & 0xC0) != 0x80) || ((char3 & 0xC0) != 0x80)) { throw new UTFDataFormatException("malformed input around byte " + (count - 1)); } diff --git a/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/util/FlinkPackage.java b/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/util/FlinkPackage.java index 353cee56bebb..20b33e615e5f 100644 --- a/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/util/FlinkPackage.java +++ b/flink/v1.18/flink/src/main/java/org/apache/iceberg/flink/util/FlinkPackage.java @@ -32,7 +32,7 @@ private FlinkPackage() {} /** Returns Flink version string like x.y.z */ public static String version() { if (null == VERSION.get()) { - String detectedVersion = null; + String detectedVersion; try { detectedVersion = versionFromJar(); // use unknown version in case exact implementation version can't be found from the jar diff --git a/flink/v1.18/flink/src/test/java/org/apache/iceberg/flink/TestFlinkCatalogTablePartitions.java b/flink/v1.18/flink/src/test/java/org/apache/iceberg/flink/TestFlinkCatalogTablePartitions.java index 0bb08e4f265a..e395414e925d 100644 --- a/flink/v1.18/flink/src/test/java/org/apache/iceberg/flink/TestFlinkCatalogTablePartitions.java +++ b/flink/v1.18/flink/src/test/java/org/apache/iceberg/flink/TestFlinkCatalogTablePartitions.java @@ -39,7 +39,7 @@ public class TestFlinkCatalogTablePartitions extends CatalogTestBase { - private String tableName = "test_table"; + private final String tableName = "test_table"; @Parameter(index = 2) private FileFormat format; diff --git a/flink/v1.18/flink/src/test/java/org/apache/iceberg/flink/TestTableLoader.java b/flink/v1.18/flink/src/test/java/org/apache/iceberg/flink/TestTableLoader.java index 4ad302dde436..a7c58e551112 100644 --- a/flink/v1.18/flink/src/test/java/org/apache/iceberg/flink/TestTableLoader.java +++ b/flink/v1.18/flink/src/test/java/org/apache/iceberg/flink/TestTableLoader.java @@ -23,7 +23,7 @@ import org.apache.iceberg.TestTables; public class TestTableLoader implements TableLoader { - private File dir; + private final File dir; public static TableLoader of(String dir) { return new TestTableLoader(dir); diff --git a/flink/v1.18/flink/src/test/java/org/apache/iceberg/flink/source/reader/TestColumnStatsWatermarkExtractor.java b/flink/v1.18/flink/src/test/java/org/apache/iceberg/flink/source/reader/TestColumnStatsWatermarkExtractor.java index 7033fd30e84f..187f2b0b9e31 100644 --- a/flink/v1.18/flink/src/test/java/org/apache/iceberg/flink/source/reader/TestColumnStatsWatermarkExtractor.java +++ b/flink/v1.18/flink/src/test/java/org/apache/iceberg/flink/source/reader/TestColumnStatsWatermarkExtractor.java @@ -122,7 +122,7 @@ public void testTimeUnit() throws IOException { new ColumnStatsWatermarkExtractor(SCHEMA, columnName, TimeUnit.MICROSECONDS); assertThat(extractor.extractWatermark(split(0))) - .isEqualTo(MIN_VALUES.get(0).get(columnName).longValue() / 1000L); + .isEqualTo(MIN_VALUES.get(0).get(columnName) / 1000L); } @TestTemplate diff --git a/spark/v3.3/spark/src/jmh/java/org/apache/iceberg/spark/action/RandomGeneratingUDF.java b/spark/v3.3/spark/src/jmh/java/org/apache/iceberg/spark/action/RandomGeneratingUDF.java index 63d24f7da553..d8f9301a7d82 100644 --- a/spark/v3.3/spark/src/jmh/java/org/apache/iceberg/spark/action/RandomGeneratingUDF.java +++ b/spark/v3.3/spark/src/jmh/java/org/apache/iceberg/spark/action/RandomGeneratingUDF.java @@ -29,7 +29,7 @@ class RandomGeneratingUDF implements Serializable { private final long uniqueValues; - private Random rand = new Random(); + private final Random rand = new Random(); RandomGeneratingUDF(long uniqueValues) { this.uniqueValues = uniqueValues; @@ -43,8 +43,7 @@ UserDefinedFunction randomLongUDF() { UserDefinedFunction randomString() { return udf( - () -> (String) RandomUtil.generatePrimitive(Types.StringType.get(), rand), - DataTypes.StringType) + () -> RandomUtil.generatePrimitive(Types.StringType.get(), rand), DataTypes.StringType) .asNondeterministic() .asNonNullable(); } diff --git a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/PruneColumnsWithReordering.java b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/PruneColumnsWithReordering.java index cdc0bf5f3cad..a4bcd0d8a5b5 100644 --- a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/PruneColumnsWithReordering.java +++ b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/PruneColumnsWithReordering.java @@ -199,7 +199,7 @@ public Type map(Types.MapType map, Supplier keyResult, Supplier valu "Cannot project a map of optional values as required values: %s", map); Preconditions.checkArgument( - StringType.class.isInstance(requestedMap.keyType()), + requestedMap.keyType() instanceof StringType, "Invalid map key type (not string): %s", requestedMap.keyType()); diff --git a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/Spark3Util.java b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/Spark3Util.java index 10b856a7acf0..f5d9a7af1c8f 100644 --- a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/Spark3Util.java +++ b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/Spark3Util.java @@ -885,7 +885,7 @@ public static List getPartitions( JavaConverters.collectionAsScalaIterableConverter(ImmutableList.of(rootPath)) .asScala() .toSeq(), - scala.collection.immutable.Map$.MODULE$.empty(), + scala.collection.immutable.Map$.MODULE$.empty(), userSpecifiedSchema, fileStatusCache, Option.empty(), diff --git a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/SparkTableUtil.java b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/SparkTableUtil.java index 47690656b41d..af1e99df71d3 100644 --- a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/SparkTableUtil.java +++ b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/SparkTableUtil.java @@ -182,7 +182,7 @@ public static List getPartitions( Option> scalaPartitionFilter; if (partitionFilter != null && !partitionFilter.isEmpty()) { Builder, scala.collection.immutable.Map> builder = - Map$.MODULE$.newBuilder(); + Map$.MODULE$.newBuilder(); partitionFilter.forEach((key, value) -> builder.$plus$eq(Tuple2.apply(key, value))); scalaPartitionFilter = Option.apply(builder.result()); } else { diff --git a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/DeleteOrphanFilesSparkAction.java b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/DeleteOrphanFilesSparkAction.java index b00ed42008f1..5fbb4117feb8 100644 --- a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/DeleteOrphanFilesSparkAction.java +++ b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/DeleteOrphanFilesSparkAction.java @@ -116,7 +116,7 @@ public class DeleteOrphanFilesSparkAction extends BaseSparkAction equalSchemes = flattenMap(EQUAL_SCHEMES_DEFAULT); private Map equalAuthorities = Collections.emptyMap(); private PrefixMismatchMode prefixMismatchMode = PrefixMismatchMode.ERROR; - private String location = null; + private String location; private long olderThanTimestamp = System.currentTimeMillis() - TimeUnit.DAYS.toMillis(3); private Dataset compareToFileList; private Consumer deleteFunc = null; diff --git a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteDataFilesSparkAction.java b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteDataFilesSparkAction.java index ae547e206324..eed0b2b67b0a 100644 --- a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteDataFilesSparkAction.java +++ b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteDataFilesSparkAction.java @@ -367,7 +367,7 @@ private Result doExecuteWithPartialProgress( Stream toGroupStream( RewriteExecutionContext ctx, Map>> groupsByPartition) { return groupsByPartition.entrySet().stream() - .filter(e -> e.getValue().size() != 0) + .filter(e -> !e.getValue().isEmpty()) .flatMap( e -> { StructLike partition = e.getKey(); diff --git a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteManifestsSparkAction.java b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteManifestsSparkAction.java index 78eb010b9b9c..62b7978f3800 100644 --- a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteManifestsSparkAction.java +++ b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteManifestsSparkAction.java @@ -93,9 +93,9 @@ public class RewriteManifestsSparkAction private final long targetManifestSizeBytes; private final boolean shouldStageManifests; - private PartitionSpec spec = null; + private PartitionSpec spec; private Predicate predicate = manifest -> true; - private String outputLocation = null; + private String outputLocation; RewriteManifestsSparkAction(SparkSession spark, Table table) { super(spark); diff --git a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/RewritePositionDeleteFilesSparkAction.java b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/RewritePositionDeleteFilesSparkAction.java index ea1c52940175..8bc67c35c3d6 100644 --- a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/RewritePositionDeleteFilesSparkAction.java +++ b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/actions/RewritePositionDeleteFilesSparkAction.java @@ -309,7 +309,7 @@ private Result doExecuteWithPartialProgress( // stop commit service commitService.close(); List commitResults = commitService.results(); - if (commitResults.size() == 0) { + if (commitResults.isEmpty()) { LOG.error( "{} is true but no rewrite commits succeeded. Check the logs to determine why the individual " + "commits failed. If this is persistent it may help to increase {} which will break the rewrite operation " @@ -331,7 +331,7 @@ private Stream toGroupStream( RewriteExecutionContext ctx, Map>> groupsByPartition) { return groupsByPartition.entrySet().stream() - .filter(e -> e.getValue().size() != 0) + .filter(e -> !e.getValue().isEmpty()) .flatMap( e -> { StructLike partition = e.getKey(); diff --git a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/data/SparkParquetWriters.java b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/data/SparkParquetWriters.java index 1a4f7052de39..e306bab32bcd 100644 --- a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/data/SparkParquetWriters.java +++ b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/data/SparkParquetWriters.java @@ -561,7 +561,7 @@ private static class InternalRowWriter extends ParquetValueWriters.StructWriter< private InternalRowWriter(List> writers, List types) { super(writers); - this.types = types.toArray(new DataType[types.size()]); + this.types = types.toArray(new DataType[0]); } @Override diff --git a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/SparkScanBuilder.java b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/SparkScanBuilder.java index c24bcaad58b1..ef3138d677c6 100644 --- a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/SparkScanBuilder.java +++ b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/SparkScanBuilder.java @@ -93,7 +93,7 @@ public class SparkScanBuilder private final SparkReadConf readConf; private final List metaColumns = Lists.newArrayList(); - private Schema schema = null; + private Schema schema; private boolean caseSensitive; private List filterExpressions = null; private Filter[] pushedFilters = NO_FILTERS; diff --git a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/SparkStagedScanBuilder.java b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/SparkStagedScanBuilder.java index 25393888f95c..c5c86c3ebf28 100644 --- a/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/SparkStagedScanBuilder.java +++ b/spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/SparkStagedScanBuilder.java @@ -44,7 +44,7 @@ class SparkStagedScanBuilder implements ScanBuilder, SupportsPushDownRequiredCol private final SparkReadConf readConf; private final List metaColumns = Lists.newArrayList(); - private Schema schema = null; + private Schema schema; SparkStagedScanBuilder(SparkSession spark, Table table, CaseInsensitiveStringMap options) { this.spark = spark; diff --git a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java index c96ed2909fe7..028d495b894d 100644 --- a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java +++ b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java @@ -138,7 +138,7 @@ public static Object[][] parameters() { @Rule public TemporaryFolder temp = new TemporaryFolder(); - private String baseTableName = "baseTable"; + private final String baseTableName = "baseTable"; private File tableDir; private String tableLocation; private final String type; diff --git a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/data/RandomData.java b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/data/RandomData.java index 478afcf09ae3..360b9ff20ec0 100644 --- a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/data/RandomData.java +++ b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/data/RandomData.java @@ -67,7 +67,7 @@ public static List generateList(Schema schema, int numRecords, long seed public static Iterable generateSpark(Schema schema, int numRecords, long seed) { return () -> new Iterator() { - private SparkRandomDataGenerator generator = new SparkRandomDataGenerator(seed); + private final SparkRandomDataGenerator generator = new SparkRandomDataGenerator(seed); private int count = 0; @Override @@ -114,7 +114,7 @@ private static Iterable newIterable( return () -> new Iterator() { private int count = 0; - private RandomDataGenerator generator = newGenerator.get(); + private final RandomDataGenerator generator = newGenerator.get(); @Override public boolean hasNext() { diff --git a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/LogMessage.java b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/LogMessage.java index 53a35eec61ce..2627ec0d2b55 100644 --- a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/LogMessage.java +++ b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/LogMessage.java @@ -22,7 +22,7 @@ import java.util.concurrent.atomic.AtomicInteger; public class LogMessage { - private static AtomicInteger idCounter = new AtomicInteger(0); + private static final AtomicInteger idCounter = new AtomicInteger(0); static LogMessage debug(String date, String message) { return new LogMessage(idCounter.getAndIncrement(), date, "DEBUG", message); diff --git a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestBaseReader.java b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestBaseReader.java index 3d94966eb76c..e5e0e350e50b 100644 --- a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestBaseReader.java +++ b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestBaseReader.java @@ -87,7 +87,7 @@ public Integer next() { // Main reader class to test base class iteration logic. // Keeps track of iterator closure. private static class ClosureTrackingReader extends BaseReader { - private Map tracker = Maps.newHashMap(); + private final Map tracker = Maps.newHashMap(); ClosureTrackingReader(Table table, List tasks) { super(table, new BaseCombinedScanTask(tasks), null, null, false); diff --git a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java index a5a3da39ad1f..693bcec9eb05 100644 --- a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java +++ b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java @@ -92,7 +92,7 @@ public TestDataFrameWrites(String format) { private Map tableProperties; - private org.apache.spark.sql.types.StructType sparkSchema = + private final org.apache.spark.sql.types.StructType sparkSchema = new org.apache.spark.sql.types.StructType( new org.apache.spark.sql.types.StructField[] { new org.apache.spark.sql.types.StructField( @@ -107,16 +107,16 @@ public TestDataFrameWrites(String format) { org.apache.spark.sql.types.Metadata.empty()) }); - private Schema icebergSchema = + private final Schema icebergSchema = new Schema( Types.NestedField.optional(1, "optionalField", Types.StringType.get()), Types.NestedField.required(2, "requiredField", Types.StringType.get())); - private List data0 = + private final List data0 = Arrays.asList( "{\"optionalField\": \"a1\", \"requiredField\": \"bid_001\"}", "{\"optionalField\": \"a2\", \"requiredField\": \"bid_002\"}"); - private List data1 = + private final List data1 = Arrays.asList( "{\"optionalField\": \"d1\", \"requiredField\": \"bid_101\"}", "{\"optionalField\": \"d2\", \"requiredField\": \"bid_102\"}", @@ -221,16 +221,15 @@ private void writeDataWithFailOnPartition( final int numPartitions = 10; final int partitionToFail = new Random().nextInt(numPartitions); MapPartitionsFunction failOnFirstPartitionFunc = - (MapPartitionsFunction) - input -> { - int partitionId = TaskContext.getPartitionId(); + input -> { + int partitionId = TaskContext.getPartitionId(); - if (partitionId == partitionToFail) { - throw new SparkException( - String.format("Intended exception in partition %d !", partitionId)); - } - return input; - }; + if (partitionId == partitionToFail) { + throw new SparkException( + String.format("Intended exception in partition %d !", partitionId)); + } + return input; + }; Dataset df = createDataset(records, schema) @@ -286,8 +285,8 @@ public void testNullableWithWriteOption() throws IOException { "Spark 3 rejects writing nulls to a required column", spark.version().startsWith("2")); File location = new File(temp.newFolder("parquet"), "test"); - String sourcePath = String.format("%s/nullable_poc/sourceFolder/", location.toString()); - String targetPath = String.format("%s/nullable_poc/targetFolder/", location.toString()); + String sourcePath = String.format("%s/nullable_poc/sourceFolder/", location); + String targetPath = String.format("%s/nullable_poc/targetFolder/", location); tableProperties = ImmutableMap.of(TableProperties.WRITE_DATA_LOCATION, targetPath); @@ -339,8 +338,8 @@ public void testNullableWithSparkSqlOption() throws IOException { "Spark 3 rejects writing nulls to a required column", spark.version().startsWith("2")); File location = new File(temp.newFolder("parquet"), "test"); - String sourcePath = String.format("%s/nullable_poc/sourceFolder/", location.toString()); - String targetPath = String.format("%s/nullable_poc/targetFolder/", location.toString()); + String sourcePath = String.format("%s/nullable_poc/sourceFolder/", location); + String targetPath = String.format("%s/nullable_poc/targetFolder/", location); tableProperties = ImmutableMap.of(TableProperties.WRITE_DATA_LOCATION, targetPath); diff --git a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestIdentityPartitionData.java b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestIdentityPartitionData.java index 7313c18cc09d..6508b80e9c92 100644 --- a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestIdentityPartitionData.java +++ b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestIdentityPartitionData.java @@ -92,7 +92,7 @@ public TestIdentityPartitionData(String format, boolean vectorized) { @Rule public TemporaryFolder temp = new TemporaryFolder(); - private PartitionSpec spec = + private final PartitionSpec spec = PartitionSpec.builderFor(LOG_SCHEMA).identity("date").identity("level").build(); private Table table = null; private Dataset logs = null; diff --git a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestPartitionPruning.java b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestPartitionPruning.java index 4ef022c50c59..86d709cf17ba 100644 --- a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestPartitionPruning.java +++ b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestPartitionPruning.java @@ -163,7 +163,7 @@ private static Instant getInstant(String timestampWithoutZone) { @Rule public TemporaryFolder temp = new TemporaryFolder(); - private PartitionSpec spec = + private final PartitionSpec spec = PartitionSpec.builderFor(LOG_SCHEMA) .identity("date") .identity("level") diff --git a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestStructuredStreaming.java b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestStructuredStreaming.java index bd20a628caa8..f420f1b955c0 100644 --- a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestStructuredStreaming.java +++ b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestStructuredStreaming.java @@ -116,7 +116,7 @@ public void testStreamingWriteAppendMode() throws Exception { query.stop(); // remove the last commit to force Spark to reprocess batch #1 - File lastCommitFile = new File(checkpoint.toString() + "/commits/1"); + File lastCommitFile = new File(checkpoint + "/commits/1"); Assert.assertTrue("The commit file must be deleted", lastCommitFile.delete()); // restart the query from the checkpoint @@ -176,7 +176,7 @@ public void testStreamingWriteCompleteMode() throws Exception { query.stop(); // remove the last commit to force Spark to reprocess batch #1 - File lastCommitFile = new File(checkpoint.toString() + "/commits/1"); + File lastCommitFile = new File(checkpoint + "/commits/1"); Assert.assertTrue("The commit file must be deleted", lastCommitFile.delete()); // restart the query from the checkpoint @@ -236,7 +236,7 @@ public void testStreamingWriteCompleteModeWithProjection() throws Exception { query.stop(); // remove the last commit to force Spark to reprocess batch #1 - File lastCommitFile = new File(checkpoint.toString() + "/commits/1"); + File lastCommitFile = new File(checkpoint + "/commits/1"); Assert.assertTrue("The commit file must be deleted", lastCommitFile.delete()); // restart the query from the checkpoint diff --git a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/sql/TestCreateTable.java b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/sql/TestCreateTable.java index bf2200004282..36e887f49b55 100644 --- a/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/sql/TestCreateTable.java +++ b/spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/sql/TestCreateTable.java @@ -278,7 +278,7 @@ public void testCreateTableLocation() throws Exception { File tableLocation = temp.newFolder(); Assert.assertTrue(tableLocation.delete()); - String location = "file:" + tableLocation.toString(); + String location = "file:" + tableLocation; sql( "CREATE TABLE %s " diff --git a/spark/v3.4/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/SparkRowLevelOperationsTestBase.java b/spark/v3.4/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/SparkRowLevelOperationsTestBase.java index 5a1cc6343424..5b5dc1501335 100644 --- a/spark/v3.4/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/SparkRowLevelOperationsTestBase.java +++ b/spark/v3.4/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/SparkRowLevelOperationsTestBase.java @@ -262,7 +262,7 @@ protected void createOrReplaceView(String name, List data, Encoder enc private Dataset toDS(String schema, String jsonData) { List jsonRows = Arrays.stream(jsonData.split("\n")) - .filter(str -> str.trim().length() > 0) + .filter(str -> !str.trim().isEmpty()) .collect(Collectors.toList()); Dataset jsonDS = spark.createDataset(jsonRows, Encoders.STRING()); diff --git a/spark/v3.4/spark/src/jmh/java/org/apache/iceberg/spark/action/RandomGeneratingUDF.java b/spark/v3.4/spark/src/jmh/java/org/apache/iceberg/spark/action/RandomGeneratingUDF.java index 63d24f7da553..d8f9301a7d82 100644 --- a/spark/v3.4/spark/src/jmh/java/org/apache/iceberg/spark/action/RandomGeneratingUDF.java +++ b/spark/v3.4/spark/src/jmh/java/org/apache/iceberg/spark/action/RandomGeneratingUDF.java @@ -29,7 +29,7 @@ class RandomGeneratingUDF implements Serializable { private final long uniqueValues; - private Random rand = new Random(); + private final Random rand = new Random(); RandomGeneratingUDF(long uniqueValues) { this.uniqueValues = uniqueValues; @@ -43,8 +43,7 @@ UserDefinedFunction randomLongUDF() { UserDefinedFunction randomString() { return udf( - () -> (String) RandomUtil.generatePrimitive(Types.StringType.get(), rand), - DataTypes.StringType) + () -> RandomUtil.generatePrimitive(Types.StringType.get(), rand), DataTypes.StringType) .asNondeterministic() .asNonNullable(); } diff --git a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/PruneColumnsWithReordering.java b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/PruneColumnsWithReordering.java index bbbb46e803e2..f76f12355f1f 100644 --- a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/PruneColumnsWithReordering.java +++ b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/PruneColumnsWithReordering.java @@ -203,7 +203,7 @@ public Type map(Types.MapType map, Supplier keyResult, Supplier valu "Cannot project a map of optional values as required values: %s", map); Preconditions.checkArgument( - StringType.class.isInstance(requestedMap.keyType()), + requestedMap.keyType() instanceof StringType, "Invalid map key type (not string): %s", requestedMap.keyType()); diff --git a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/Spark3Util.java b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/Spark3Util.java index a81877c98b3d..cdf250b61b8f 100644 --- a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/Spark3Util.java +++ b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/Spark3Util.java @@ -888,7 +888,7 @@ public static List getPartitions( JavaConverters.collectionAsScalaIterableConverter(ImmutableList.of(rootPath)) .asScala() .toSeq(), - scala.collection.immutable.Map$.MODULE$.empty(), + scala.collection.immutable.Map$.MODULE$.empty(), userSpecifiedSchema, fileStatusCache, Option.empty(), diff --git a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/SparkTableUtil.java b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/SparkTableUtil.java index 5af6b46b3178..6f57c7ae376c 100644 --- a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/SparkTableUtil.java +++ b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/SparkTableUtil.java @@ -182,7 +182,7 @@ public static List getPartitions( Option> scalaPartitionFilter; if (partitionFilter != null && !partitionFilter.isEmpty()) { Builder, scala.collection.immutable.Map> builder = - Map$.MODULE$.newBuilder(); + Map$.MODULE$.newBuilder(); partitionFilter.forEach((key, value) -> builder.$plus$eq(Tuple2.apply(key, value))); scalaPartitionFilter = Option.apply(builder.result()); } else { diff --git a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/DeleteOrphanFilesSparkAction.java b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/DeleteOrphanFilesSparkAction.java index b00ed42008f1..5fbb4117feb8 100644 --- a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/DeleteOrphanFilesSparkAction.java +++ b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/DeleteOrphanFilesSparkAction.java @@ -116,7 +116,7 @@ public class DeleteOrphanFilesSparkAction extends BaseSparkAction equalSchemes = flattenMap(EQUAL_SCHEMES_DEFAULT); private Map equalAuthorities = Collections.emptyMap(); private PrefixMismatchMode prefixMismatchMode = PrefixMismatchMode.ERROR; - private String location = null; + private String location; private long olderThanTimestamp = System.currentTimeMillis() - TimeUnit.DAYS.toMillis(3); private Dataset compareToFileList; private Consumer deleteFunc = null; diff --git a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteDataFilesSparkAction.java b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteDataFilesSparkAction.java index 7c516b96754a..a4c6642a3edf 100644 --- a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteDataFilesSparkAction.java +++ b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteDataFilesSparkAction.java @@ -379,7 +379,7 @@ private Result doExecuteWithPartialProgress( Stream toGroupStream( RewriteExecutionContext ctx, Map>> groupsByPartition) { return groupsByPartition.entrySet().stream() - .filter(e -> e.getValue().size() != 0) + .filter(e -> !e.getValue().isEmpty()) .flatMap( e -> { StructLike partition = e.getKey(); diff --git a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteManifestsSparkAction.java b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteManifestsSparkAction.java index e9edfeb985c7..60e2b11881cb 100644 --- a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteManifestsSparkAction.java +++ b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteManifestsSparkAction.java @@ -103,9 +103,9 @@ public class RewriteManifestsSparkAction private final long targetManifestSizeBytes; private final boolean shouldStageManifests; - private PartitionSpec spec = null; + private PartitionSpec spec; private Predicate predicate = manifest -> true; - private String outputLocation = null; + private String outputLocation; RewriteManifestsSparkAction(SparkSession spark, Table table) { super(spark); diff --git a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/RewritePositionDeleteFilesSparkAction.java b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/RewritePositionDeleteFilesSparkAction.java index bdb0ee35273f..ccf874716db0 100644 --- a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/RewritePositionDeleteFilesSparkAction.java +++ b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/actions/RewritePositionDeleteFilesSparkAction.java @@ -310,7 +310,7 @@ private Result doExecuteWithPartialProgress( // stop commit service commitService.close(); List commitResults = commitService.results(); - if (commitResults.size() == 0) { + if (commitResults.isEmpty()) { LOG.error( "{} is true but no rewrite commits succeeded. Check the logs to determine why the individual " + "commits failed. If this is persistent it may help to increase {} which will break the rewrite operation " @@ -332,7 +332,7 @@ private Stream toGroupStream( RewriteExecutionContext ctx, Map>> groupsByPartition) { return groupsByPartition.entrySet().stream() - .filter(e -> e.getValue().size() != 0) + .filter(e -> !e.getValue().isEmpty()) .flatMap( e -> { StructLike partition = e.getKey(); diff --git a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/data/SparkParquetWriters.java b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/data/SparkParquetWriters.java index 1a4f7052de39..e306bab32bcd 100644 --- a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/data/SparkParquetWriters.java +++ b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/data/SparkParquetWriters.java @@ -561,7 +561,7 @@ private static class InternalRowWriter extends ParquetValueWriters.StructWriter< private InternalRowWriter(List> writers, List types) { super(writers); - this.types = types.toArray(new DataType[types.size()]); + this.types = types.toArray(new DataType[0]); } @Override diff --git a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/SparkFileWriterFactory.java b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/SparkFileWriterFactory.java index 9b075b675565..50a1259c8626 100644 --- a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/SparkFileWriterFactory.java +++ b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/SparkFileWriterFactory.java @@ -48,7 +48,7 @@ class SparkFileWriterFactory extends BaseFileWriterFactory { private StructType dataSparkType; private StructType equalityDeleteSparkType; private StructType positionDeleteSparkType; - private Map writeProperties; + private final Map writeProperties; SparkFileWriterFactory( Table table, diff --git a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/SparkScanBuilder.java b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/SparkScanBuilder.java index b430e6fca233..6b97e48133fd 100644 --- a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/SparkScanBuilder.java +++ b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/SparkScanBuilder.java @@ -95,7 +95,7 @@ public class SparkScanBuilder private final List metaColumns = Lists.newArrayList(); private final InMemoryMetricsReporter metricsReporter; - private Schema schema = null; + private Schema schema; private boolean caseSensitive; private List filterExpressions = null; private Predicate[] pushedPredicates = NO_PREDICATES; diff --git a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/SparkStagedScanBuilder.java b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/SparkStagedScanBuilder.java index 25393888f95c..c5c86c3ebf28 100644 --- a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/SparkStagedScanBuilder.java +++ b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/source/SparkStagedScanBuilder.java @@ -44,7 +44,7 @@ class SparkStagedScanBuilder implements ScanBuilder, SupportsPushDownRequiredCol private final SparkReadConf readConf; private final List metaColumns = Lists.newArrayList(); - private Schema schema = null; + private Schema schema; SparkStagedScanBuilder(SparkSession spark, Table table, CaseInsensitiveStringMap options) { this.spark = spark; diff --git a/spark/v3.4/spark/src/main/java/org/apache/spark/sql/catalyst/analysis/NoSuchProcedureException.java b/spark/v3.4/spark/src/main/java/org/apache/spark/sql/catalyst/analysis/NoSuchProcedureException.java index 2a89ac73e2c6..0cae0451905b 100644 --- a/spark/v3.4/spark/src/main/java/org/apache/spark/sql/catalyst/analysis/NoSuchProcedureException.java +++ b/spark/v3.4/spark/src/main/java/org/apache/spark/sql/catalyst/analysis/NoSuchProcedureException.java @@ -33,7 +33,7 @@ public NoSuchProcedureException(Identifier ident) { Option.empty(), Option.empty(), Option.empty(), - Map$.MODULE$.empty(), + Map$.MODULE$.empty(), new QueryContext[0]); } } diff --git a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java index c96ed2909fe7..028d495b894d 100644 --- a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java +++ b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java @@ -138,7 +138,7 @@ public static Object[][] parameters() { @Rule public TemporaryFolder temp = new TemporaryFolder(); - private String baseTableName = "baseTable"; + private final String baseTableName = "baseTable"; private File tableDir; private String tableLocation; private final String type; diff --git a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/data/RandomData.java b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/data/RandomData.java index 478afcf09ae3..360b9ff20ec0 100644 --- a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/data/RandomData.java +++ b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/data/RandomData.java @@ -67,7 +67,7 @@ public static List generateList(Schema schema, int numRecords, long seed public static Iterable generateSpark(Schema schema, int numRecords, long seed) { return () -> new Iterator() { - private SparkRandomDataGenerator generator = new SparkRandomDataGenerator(seed); + private final SparkRandomDataGenerator generator = new SparkRandomDataGenerator(seed); private int count = 0; @Override @@ -114,7 +114,7 @@ private static Iterable newIterable( return () -> new Iterator() { private int count = 0; - private RandomDataGenerator generator = newGenerator.get(); + private final RandomDataGenerator generator = newGenerator.get(); @Override public boolean hasNext() { diff --git a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/LogMessage.java b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/LogMessage.java index 53a35eec61ce..2627ec0d2b55 100644 --- a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/LogMessage.java +++ b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/LogMessage.java @@ -22,7 +22,7 @@ import java.util.concurrent.atomic.AtomicInteger; public class LogMessage { - private static AtomicInteger idCounter = new AtomicInteger(0); + private static final AtomicInteger idCounter = new AtomicInteger(0); static LogMessage debug(String date, String message) { return new LogMessage(idCounter.getAndIncrement(), date, "DEBUG", message); diff --git a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestBaseReader.java b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestBaseReader.java index 3d94966eb76c..e5e0e350e50b 100644 --- a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestBaseReader.java +++ b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestBaseReader.java @@ -87,7 +87,7 @@ public Integer next() { // Main reader class to test base class iteration logic. // Keeps track of iterator closure. private static class ClosureTrackingReader extends BaseReader { - private Map tracker = Maps.newHashMap(); + private final Map tracker = Maps.newHashMap(); ClosureTrackingReader(Table table, List tasks) { super(table, new BaseCombinedScanTask(tasks), null, null, false); diff --git a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestCompressionSettings.java b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestCompressionSettings.java index ea329f96d5b9..6b188cd1b120 100644 --- a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestCompressionSettings.java +++ b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestCompressionSettings.java @@ -215,7 +215,7 @@ private String getCompressionType(InputFile inputFile) throws Exception { return footer.getBlocks().get(0).getColumns().get(0).getCodec().name(); default: FileContext fc = FileContext.getFileContext(CONF); - GenericDatumReader reader = new GenericDatumReader(); + GenericDatumReader reader = new GenericDatumReader<>(); DataFileReader fileReader = (DataFileReader) DataFileReader.openReader( diff --git a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java index a5a3da39ad1f..693bcec9eb05 100644 --- a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java +++ b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java @@ -92,7 +92,7 @@ public TestDataFrameWrites(String format) { private Map tableProperties; - private org.apache.spark.sql.types.StructType sparkSchema = + private final org.apache.spark.sql.types.StructType sparkSchema = new org.apache.spark.sql.types.StructType( new org.apache.spark.sql.types.StructField[] { new org.apache.spark.sql.types.StructField( @@ -107,16 +107,16 @@ public TestDataFrameWrites(String format) { org.apache.spark.sql.types.Metadata.empty()) }); - private Schema icebergSchema = + private final Schema icebergSchema = new Schema( Types.NestedField.optional(1, "optionalField", Types.StringType.get()), Types.NestedField.required(2, "requiredField", Types.StringType.get())); - private List data0 = + private final List data0 = Arrays.asList( "{\"optionalField\": \"a1\", \"requiredField\": \"bid_001\"}", "{\"optionalField\": \"a2\", \"requiredField\": \"bid_002\"}"); - private List data1 = + private final List data1 = Arrays.asList( "{\"optionalField\": \"d1\", \"requiredField\": \"bid_101\"}", "{\"optionalField\": \"d2\", \"requiredField\": \"bid_102\"}", @@ -221,16 +221,15 @@ private void writeDataWithFailOnPartition( final int numPartitions = 10; final int partitionToFail = new Random().nextInt(numPartitions); MapPartitionsFunction failOnFirstPartitionFunc = - (MapPartitionsFunction) - input -> { - int partitionId = TaskContext.getPartitionId(); + input -> { + int partitionId = TaskContext.getPartitionId(); - if (partitionId == partitionToFail) { - throw new SparkException( - String.format("Intended exception in partition %d !", partitionId)); - } - return input; - }; + if (partitionId == partitionToFail) { + throw new SparkException( + String.format("Intended exception in partition %d !", partitionId)); + } + return input; + }; Dataset df = createDataset(records, schema) @@ -286,8 +285,8 @@ public void testNullableWithWriteOption() throws IOException { "Spark 3 rejects writing nulls to a required column", spark.version().startsWith("2")); File location = new File(temp.newFolder("parquet"), "test"); - String sourcePath = String.format("%s/nullable_poc/sourceFolder/", location.toString()); - String targetPath = String.format("%s/nullable_poc/targetFolder/", location.toString()); + String sourcePath = String.format("%s/nullable_poc/sourceFolder/", location); + String targetPath = String.format("%s/nullable_poc/targetFolder/", location); tableProperties = ImmutableMap.of(TableProperties.WRITE_DATA_LOCATION, targetPath); @@ -339,8 +338,8 @@ public void testNullableWithSparkSqlOption() throws IOException { "Spark 3 rejects writing nulls to a required column", spark.version().startsWith("2")); File location = new File(temp.newFolder("parquet"), "test"); - String sourcePath = String.format("%s/nullable_poc/sourceFolder/", location.toString()); - String targetPath = String.format("%s/nullable_poc/targetFolder/", location.toString()); + String sourcePath = String.format("%s/nullable_poc/sourceFolder/", location); + String targetPath = String.format("%s/nullable_poc/targetFolder/", location); tableProperties = ImmutableMap.of(TableProperties.WRITE_DATA_LOCATION, targetPath); diff --git a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestIdentityPartitionData.java b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestIdentityPartitionData.java index 45a523917f05..4ee77345dbe5 100644 --- a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestIdentityPartitionData.java +++ b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestIdentityPartitionData.java @@ -102,7 +102,7 @@ public TestIdentityPartitionData(String format, boolean vectorized, PlanningMode @Rule public TemporaryFolder temp = new TemporaryFolder(); - private PartitionSpec spec = + private final PartitionSpec spec = PartitionSpec.builderFor(LOG_SCHEMA).identity("date").identity("level").build(); private Table table = null; private Dataset logs = null; diff --git a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestPartitionPruning.java b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestPartitionPruning.java index c00549c68f3b..639d37c79336 100644 --- a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestPartitionPruning.java +++ b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestPartitionPruning.java @@ -169,7 +169,7 @@ private static Instant getInstant(String timestampWithoutZone) { @Rule public TemporaryFolder temp = new TemporaryFolder(); - private PartitionSpec spec = + private final PartitionSpec spec = PartitionSpec.builderFor(LOG_SCHEMA) .identity("date") .identity("level") diff --git a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestStructuredStreaming.java b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestStructuredStreaming.java index bd20a628caa8..f420f1b955c0 100644 --- a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestStructuredStreaming.java +++ b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/source/TestStructuredStreaming.java @@ -116,7 +116,7 @@ public void testStreamingWriteAppendMode() throws Exception { query.stop(); // remove the last commit to force Spark to reprocess batch #1 - File lastCommitFile = new File(checkpoint.toString() + "/commits/1"); + File lastCommitFile = new File(checkpoint + "/commits/1"); Assert.assertTrue("The commit file must be deleted", lastCommitFile.delete()); // restart the query from the checkpoint @@ -176,7 +176,7 @@ public void testStreamingWriteCompleteMode() throws Exception { query.stop(); // remove the last commit to force Spark to reprocess batch #1 - File lastCommitFile = new File(checkpoint.toString() + "/commits/1"); + File lastCommitFile = new File(checkpoint + "/commits/1"); Assert.assertTrue("The commit file must be deleted", lastCommitFile.delete()); // restart the query from the checkpoint @@ -236,7 +236,7 @@ public void testStreamingWriteCompleteModeWithProjection() throws Exception { query.stop(); // remove the last commit to force Spark to reprocess batch #1 - File lastCommitFile = new File(checkpoint.toString() + "/commits/1"); + File lastCommitFile = new File(checkpoint + "/commits/1"); Assert.assertTrue("The commit file must be deleted", lastCommitFile.delete()); // restart the query from the checkpoint diff --git a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/sql/TestCreateTable.java b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/sql/TestCreateTable.java index 4e34d662ffd1..927f6e21439f 100644 --- a/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/sql/TestCreateTable.java +++ b/spark/v3.4/spark/src/test/java/org/apache/iceberg/spark/sql/TestCreateTable.java @@ -277,7 +277,7 @@ public void testCreateTableLocation() throws Exception { File tableLocation = temp.newFolder(); Assert.assertTrue(tableLocation.delete()); - String location = "file:" + tableLocation.toString(); + String location = "file:" + tableLocation; sql( "CREATE TABLE %s "