diff --git a/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/BaseOSSFile.java b/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/BaseOSSFile.java index d957e82f92ed..417785c400a1 100644 --- a/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/BaseOSSFile.java +++ b/aliyun/src/main/java/org/apache/iceberg/aliyun/oss/BaseOSSFile.java @@ -29,7 +29,7 @@ abstract class BaseOSSFile { private final OSS client; private final OSSURI uri; - private AliyunProperties aliyunProperties; + private final AliyunProperties aliyunProperties; private SimplifiedObjectMeta metadata; private final MetricsContext metrics; diff --git a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockExtension.java b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockExtension.java index 5a9b06090392..9aae5b777a8d 100644 --- a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockExtension.java +++ b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockExtension.java @@ -101,7 +101,7 @@ public void tearDownBucket(String bucket) { } public static class Builder { - private Map props = Maps.newHashMap(); + private final Map props = Maps.newHashMap(); public Builder silent() { props.put(AliyunOSSMockApp.PROP_SILENT, true); diff --git a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockLocalController.java b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockLocalController.java index e98f228b6f37..7f7546ec233b 100644 --- a/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockLocalController.java +++ b/aliyun/src/test/java/org/apache/iceberg/aliyun/oss/mock/AliyunOSSMockLocalController.java @@ -420,11 +420,11 @@ public boolean markSupported() { * A caller has caused a request that would cross the {@code maxLength} boundary. * * @param maxLength The max count of bytes to read. - * @param count The count of bytes read. + * @param bytesRead The count of bytes read. * @throws IOException Subclasses may throw. * @since 2.12.0 */ - protected void onMaxLength(final long maxLength, final long pCount) throws IOException { + protected void onMaxLength(final long maxLength, final long bytesRead) throws IOException { // for subclasses } diff --git a/api/src/main/java/org/apache/iceberg/PartitionSpec.java b/api/src/main/java/org/apache/iceberg/PartitionSpec.java index 8f1df794030a..08a1c4f9ecfd 100644 --- a/api/src/main/java/org/apache/iceberg/PartitionSpec.java +++ b/api/src/main/java/org/apache/iceberg/PartitionSpec.java @@ -365,7 +365,7 @@ public static class Builder { private final Schema schema; private final List fields = Lists.newArrayList(); private final Set partitionNames = Sets.newHashSet(); - private Map, PartitionField> dedupFields = Maps.newHashMap(); + private final Map, PartitionField> dedupFields = Maps.newHashMap(); private int specId = 0; private final AtomicInteger lastAssignedFieldId = new AtomicInteger(unpartitionedLastAssignedId()); @@ -409,8 +409,7 @@ private void checkAndAddPartitionName(String name, Integer sourceColumnId) { name); } } - Preconditions.checkArgument( - name != null && !name.isEmpty(), "Cannot use empty or null partition name: %s", name); + Preconditions.checkArgument(!name.isEmpty(), "Cannot use empty partition name: %s", name); Preconditions.checkArgument( !partitionNames.contains(name), "Cannot use partition name more than once: %s", name); partitionNames.add(name); diff --git a/api/src/main/java/org/apache/iceberg/Schema.java b/api/src/main/java/org/apache/iceberg/Schema.java index d5ec3f250982..7ff712b62790 100644 --- a/api/src/main/java/org/apache/iceberg/Schema.java +++ b/api/src/main/java/org/apache/iceberg/Schema.java @@ -67,8 +67,8 @@ public class Schema implements Serializable { private transient Map> idToAccessor = null; private transient Map idToName = null; private transient Set identifierFieldIdSet = null; - private transient Map idsToReassigned; - private transient Map idsToOriginal; + private final transient Map idsToReassigned; + private final transient Map idsToOriginal; public Schema(List columns, Map aliases) { this(columns, aliases, ImmutableSet.of()); diff --git a/api/src/main/java/org/apache/iceberg/SortOrderBuilder.java b/api/src/main/java/org/apache/iceberg/SortOrderBuilder.java index 4a112e21c26c..48d3f7b57734 100644 --- a/api/src/main/java/org/apache/iceberg/SortOrderBuilder.java +++ b/api/src/main/java/org/apache/iceberg/SortOrderBuilder.java @@ -113,5 +113,5 @@ default R desc(Term term) { default R caseSensitive(boolean caseSensitive) { throw new UnsupportedOperationException( this.getClass().getName() + " doesn't implement caseSensitive"); - }; + } } diff --git a/api/src/main/java/org/apache/iceberg/io/FileIO.java b/api/src/main/java/org/apache/iceberg/io/FileIO.java index a521cbf79d7f..de4bc2e12a81 100644 --- a/api/src/main/java/org/apache/iceberg/io/FileIO.java +++ b/api/src/main/java/org/apache/iceberg/io/FileIO.java @@ -94,7 +94,7 @@ default void deleteFile(OutputFile file) { */ default Map properties() { throw new UnsupportedOperationException( - String.format("%s does not expose configuration properties", this.getClass().toString())); + String.format("%s does not expose configuration properties", this.getClass())); } /** diff --git a/api/src/main/java/org/apache/iceberg/metrics/DefaultTimer.java b/api/src/main/java/org/apache/iceberg/metrics/DefaultTimer.java index 6cf0b380b4cb..4ad0c8f2b161 100644 --- a/api/src/main/java/org/apache/iceberg/metrics/DefaultTimer.java +++ b/api/src/main/java/org/apache/iceberg/metrics/DefaultTimer.java @@ -66,31 +66,22 @@ public void record(long amount, TimeUnit unit) { @Override public T time(Supplier supplier) { - Timed timed = start(); - try { + try (Timed ignore = start()) { return supplier.get(); - } finally { - timed.stop(); } } @Override public T timeCallable(Callable callable) throws Exception { - Timed timed = start(); - try { + try (Timed ignore = start()) { return callable.call(); - } finally { - timed.stop(); } } @Override public void time(Runnable runnable) { - Timed timed = start(); - try { + try (Timed ignore = start()) { runnable.run(); - } finally { - timed.stop(); } } diff --git a/api/src/main/java/org/apache/iceberg/util/BucketUtil.java b/api/src/main/java/org/apache/iceberg/util/BucketUtil.java index 1f3a68aef52b..113850c7b603 100644 --- a/api/src/main/java/org/apache/iceberg/util/BucketUtil.java +++ b/api/src/main/java/org/apache/iceberg/util/BucketUtil.java @@ -36,7 +36,7 @@ public class BucketUtil { private BucketUtil() {} public static int hash(int value) { - return MURMUR3.hashLong((long) value).asInt(); + return MURMUR3.hashLong(value).asInt(); } public static int hash(long value) { @@ -56,7 +56,7 @@ private static long doubleToLongBits(double value) { } public static int hash(float value) { - return MURMUR3.hashLong(doubleToLongBits((double) value)).asInt(); + return MURMUR3.hashLong(doubleToLongBits(value)).asInt(); } public static int hash(double value) { diff --git a/api/src/test/java/org/apache/iceberg/io/TestableCloseableIterable.java b/api/src/test/java/org/apache/iceberg/io/TestableCloseableIterable.java index abd088cf6b25..adb6a60e2491 100644 --- a/api/src/test/java/org/apache/iceberg/io/TestableCloseableIterable.java +++ b/api/src/test/java/org/apache/iceberg/io/TestableCloseableIterable.java @@ -22,7 +22,7 @@ public class TestableCloseableIterable implements CloseableIterable { private Boolean closed = false; - private TestableCloseableIterator iterator = new TestableCloseableIterator(); + private final TestableCloseableIterator iterator = new TestableCloseableIterator(); @Override public CloseableIterator iterator() { diff --git a/api/src/test/java/org/apache/iceberg/transforms/TestBucketing.java b/api/src/test/java/org/apache/iceberg/transforms/TestBucketing.java index f397f94f0ce5..28d01efa6d3b 100644 --- a/api/src/test/java/org/apache/iceberg/transforms/TestBucketing.java +++ b/api/src/test/java/org/apache/iceberg/transforms/TestBucketing.java @@ -383,7 +383,7 @@ private int hashBytes(byte[] bytes, int offset, int length) { */ private static UUID newUUID(byte[] bytes) { try { - return uuidBytesConstructor.newInstance((Object) bytes); + return uuidBytesConstructor.newInstance(bytes); } catch (InstantiationException | IllegalAccessException | InvocationTargetException e) { throw new RuntimeException(e); } diff --git a/api/src/test/java/org/apache/iceberg/transforms/TestBucketingProjection.java b/api/src/test/java/org/apache/iceberg/transforms/TestBucketingProjection.java index 52a6d2cd49db..9ff11ef74b26 100644 --- a/api/src/test/java/org/apache/iceberg/transforms/TestBucketingProjection.java +++ b/api/src/test/java/org/apache/iceberg/transforms/TestBucketingProjection.java @@ -32,6 +32,7 @@ import java.math.BigDecimal; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.util.UUID; import java.util.stream.Collectors; import org.apache.iceberg.PartitionSpec; @@ -309,7 +310,7 @@ public void testBucketStringInclusive() { @Test public void testBucketByteBufferStrict() throws Exception { - ByteBuffer value = ByteBuffer.wrap("abcdefg".getBytes("UTF-8")); + ByteBuffer value = ByteBuffer.wrap("abcdefg".getBytes(StandardCharsets.UTF_8)); Schema schema = new Schema(optional(1, "value", Types.BinaryType.get())); PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("value", 10).build(); @@ -322,7 +323,7 @@ public void testBucketByteBufferStrict() throws Exception { assertProjectionStrictValue( spec, greaterThanOrEqual("value", value), Expression.Operation.FALSE); - ByteBuffer anotherValue = ByteBuffer.wrap("abcdehij".getBytes("UTF-8")); + ByteBuffer anotherValue = ByteBuffer.wrap("abcdehij".getBytes(StandardCharsets.UTF_8)); assertProjectionStrict( spec, notIn("value", value, anotherValue), Expression.Operation.NOT_IN, "[4, 6]"); assertProjectionStrictValue(spec, in("value", value, anotherValue), Expression.Operation.FALSE); @@ -330,7 +331,7 @@ public void testBucketByteBufferStrict() throws Exception { @Test public void testBucketByteBufferInclusive() throws Exception { - ByteBuffer value = ByteBuffer.wrap("abcdefg".getBytes("UTF-8")); + ByteBuffer value = ByteBuffer.wrap("abcdefg".getBytes(StandardCharsets.UTF_8)); Schema schema = new Schema(optional(1, "value", Types.BinaryType.get())); PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("value", 10).build(); @@ -344,7 +345,7 @@ public void testBucketByteBufferInclusive() throws Exception { assertProjectionInclusiveValue( spec, greaterThanOrEqual("value", value), Expression.Operation.TRUE); - ByteBuffer anotherValue = ByteBuffer.wrap("abcdehij".getBytes("UTF-8")); + ByteBuffer anotherValue = ByteBuffer.wrap("abcdehij".getBytes(StandardCharsets.UTF_8)); assertProjectionInclusive( spec, in("value", value, anotherValue), Expression.Operation.IN, "[4, 6]"); assertProjectionInclusiveValue( diff --git a/api/src/test/java/org/apache/iceberg/transforms/TestTruncatesProjection.java b/api/src/test/java/org/apache/iceberg/transforms/TestTruncatesProjection.java index 588f6fc3bd03..67fc27a8d1d5 100644 --- a/api/src/test/java/org/apache/iceberg/transforms/TestTruncatesProjection.java +++ b/api/src/test/java/org/apache/iceberg/transforms/TestTruncatesProjection.java @@ -32,6 +32,7 @@ import java.math.BigDecimal; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.util.stream.Collectors; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; @@ -447,10 +448,11 @@ public void testStringInclusive() { @Test public void testBinaryStrict() throws Exception { - ByteBuffer value = ByteBuffer.wrap("abcdefg".getBytes("UTF-8")); + ByteBuffer value = ByteBuffer.wrap("abcdefg".getBytes(StandardCharsets.UTF_8)); Schema schema = new Schema(optional(1, "value", Types.BinaryType.get())); PartitionSpec spec = PartitionSpec.builderFor(schema).truncate("value", 5).build(); - String expectedValue = TransformUtil.base64encode(ByteBuffer.wrap("abcde".getBytes("UTF-8"))); + String expectedValue = + TransformUtil.base64encode(ByteBuffer.wrap("abcde".getBytes(StandardCharsets.UTF_8))); assertProjectionStrict(spec, lessThan("value", value), Expression.Operation.LT, expectedValue); assertProjectionStrict( @@ -463,7 +465,7 @@ public void testBinaryStrict() throws Exception { spec, notEqual("value", value), Expression.Operation.NOT_EQ, expectedValue); assertProjectionStrictValue(spec, equal("value", value), Expression.Operation.FALSE); - ByteBuffer anotherValue = ByteBuffer.wrap("abcdehij".getBytes("UTF-8")); + ByteBuffer anotherValue = ByteBuffer.wrap("abcdehij".getBytes(StandardCharsets.UTF_8)); assertProjectionStrict( spec, notIn("value", value, anotherValue), @@ -474,10 +476,11 @@ public void testBinaryStrict() throws Exception { @Test public void testBinaryInclusive() throws Exception { - ByteBuffer value = ByteBuffer.wrap("abcdefg".getBytes("UTF-8")); + ByteBuffer value = ByteBuffer.wrap("abcdefg".getBytes(StandardCharsets.UTF_8)); Schema schema = new Schema(optional(1, "value", Types.BinaryType.get())); PartitionSpec spec = PartitionSpec.builderFor(schema).truncate("value", 5).build(); - String expectedValue = TransformUtil.base64encode(ByteBuffer.wrap("abcde".getBytes("UTF-8"))); + String expectedValue = + TransformUtil.base64encode(ByteBuffer.wrap("abcde".getBytes(StandardCharsets.UTF_8))); assertProjectionInclusive( spec, lessThan("value", value), Expression.Operation.LT_EQ, expectedValue); @@ -490,7 +493,7 @@ public void testBinaryInclusive() throws Exception { assertProjectionInclusive(spec, equal("value", value), Expression.Operation.EQ, expectedValue); assertProjectionInclusiveValue(spec, notEqual("value", value), Expression.Operation.TRUE); - ByteBuffer anotherValue = ByteBuffer.wrap("abcdehij".getBytes("UTF-8")); + ByteBuffer anotherValue = ByteBuffer.wrap("abcdehij".getBytes(StandardCharsets.UTF_8)); assertProjectionInclusive( spec, in("value", value, anotherValue), diff --git a/api/src/test/java/org/apache/iceberg/util/TestCharSequenceMap.java b/api/src/test/java/org/apache/iceberg/util/TestCharSequenceMap.java index 2154cf31a9f6..0d74aa5bf2f2 100644 --- a/api/src/test/java/org/apache/iceberg/util/TestCharSequenceMap.java +++ b/api/src/test/java/org/apache/iceberg/util/TestCharSequenceMap.java @@ -31,8 +31,8 @@ public class TestCharSequenceMap { @Test public void nullString() { - assertThat(CharSequenceMap.create()).doesNotContainKey((String) null); - assertThat(CharSequenceMap.create()).doesNotContainValue((String) null); + assertThat(CharSequenceMap.create()).doesNotContainKey(null); + assertThat(CharSequenceMap.create()).doesNotContainValue(null); } @Test diff --git a/arrow/src/main/java/org/apache/iceberg/arrow/vectorized/parquet/BaseVectorizedParquetValuesReader.java b/arrow/src/main/java/org/apache/iceberg/arrow/vectorized/parquet/BaseVectorizedParquetValuesReader.java index 247f4d4f849e..b0a75cceb5b3 100644 --- a/arrow/src/main/java/org/apache/iceberg/arrow/vectorized/parquet/BaseVectorizedParquetValuesReader.java +++ b/arrow/src/main/java/org/apache/iceberg/arrow/vectorized/parquet/BaseVectorizedParquetValuesReader.java @@ -145,7 +145,7 @@ private int readIntLittleEndian() throws IOException { int ch3 = inputStream.read(); int ch2 = inputStream.read(); int ch1 = inputStream.read(); - return (ch1 << 24) + (ch2 << 16) + (ch3 << 8) + (ch4 << 0); + return (ch1 << 24) + (ch2 << 16) + (ch3 << 8) + ch4; } /** Reads the next byteWidth little endian int. */ @@ -166,7 +166,7 @@ private int readIntLittleEndianPaddedOnBitWidth() throws IOException { int ch3 = inputStream.read(); int ch2 = inputStream.read(); int ch1 = inputStream.read(); - return (ch1 << 16) + (ch2 << 8) + (ch3 << 0); + return (ch1 << 16) + (ch2 << 8) + ch3; } case 4: { diff --git a/aws/src/integration/java/org/apache/iceberg/aws/lakeformation/LakeFormationTestBase.java b/aws/src/integration/java/org/apache/iceberg/aws/lakeformation/LakeFormationTestBase.java index 5e0f66610c42..630db7a5a1b4 100644 --- a/aws/src/integration/java/org/apache/iceberg/aws/lakeformation/LakeFormationTestBase.java +++ b/aws/src/integration/java/org/apache/iceberg/aws/lakeformation/LakeFormationTestBase.java @@ -128,15 +128,12 @@ public class LakeFormationTestBase { @BeforeAll public static void beforeClass() throws Exception { - lfRegisterPathRoleName = LF_REGISTER_PATH_ROLE_PREFIX + UUID.randomUUID().toString(); - lfPrivilegedRoleName = LF_PRIVILEGED_ROLE_PREFIX + UUID.randomUUID().toString(); - lfRegisterPathRoleS3PolicyName = - LF_REGISTER_PATH_ROLE_S3_POLICY_PREFIX + UUID.randomUUID().toString(); - lfRegisterPathRoleLfPolicyName = - LF_REGISTER_PATH_ROLE_LF_POLICY_PREFIX + UUID.randomUUID().toString(); - lfRegisterPathRoleIamPolicyName = - LF_REGISTER_PATH_ROLE_IAM_POLICY_PREFIX + UUID.randomUUID().toString(); - lfPrivilegedRolePolicyName = LF_PRIVILEGED_ROLE_POLICY_PREFIX + UUID.randomUUID().toString(); + lfRegisterPathRoleName = LF_REGISTER_PATH_ROLE_PREFIX + UUID.randomUUID(); + lfPrivilegedRoleName = LF_PRIVILEGED_ROLE_PREFIX + UUID.randomUUID(); + lfRegisterPathRoleS3PolicyName = LF_REGISTER_PATH_ROLE_S3_POLICY_PREFIX + UUID.randomUUID(); + lfRegisterPathRoleLfPolicyName = LF_REGISTER_PATH_ROLE_LF_POLICY_PREFIX + UUID.randomUUID(); + lfRegisterPathRoleIamPolicyName = LF_REGISTER_PATH_ROLE_IAM_POLICY_PREFIX + UUID.randomUUID(); + lfPrivilegedRolePolicyName = LF_PRIVILEGED_ROLE_POLICY_PREFIX + UUID.randomUUID(); iam = IamClient.builder() diff --git a/aws/src/integration/java/org/apache/iceberg/aws/s3/TestS3FileIOIntegration.java b/aws/src/integration/java/org/apache/iceberg/aws/s3/TestS3FileIOIntegration.java index 9a52ae5ebfd8..18abb82ce74a 100644 --- a/aws/src/integration/java/org/apache/iceberg/aws/s3/TestS3FileIOIntegration.java +++ b/aws/src/integration/java/org/apache/iceberg/aws/s3/TestS3FileIOIntegration.java @@ -119,7 +119,7 @@ public static void afterClass() { @BeforeEach public void beforeEach() { - objectKey = String.format("%s/%s", prefix, UUID.randomUUID().toString()); + objectKey = String.format("%s/%s", prefix, UUID.randomUUID()); objectUri = String.format("s3://%s/%s", bucketName, objectKey); clientFactory.initialize(Maps.newHashMap()); } diff --git a/aws/src/integration/java/org/apache/iceberg/aws/s3/TestS3MultipartUpload.java b/aws/src/integration/java/org/apache/iceberg/aws/s3/TestS3MultipartUpload.java index ac34807db676..29d4c48927fc 100644 --- a/aws/src/integration/java/org/apache/iceberg/aws/s3/TestS3MultipartUpload.java +++ b/aws/src/integration/java/org/apache/iceberg/aws/s3/TestS3MultipartUpload.java @@ -64,7 +64,7 @@ public static void afterClass() { @BeforeEach public void before() { - String objectKey = String.format("%s/%s", prefix, UUID.randomUUID().toString()); + String objectKey = String.format("%s/%s", prefix, UUID.randomUUID()); objectUri = String.format("s3://%s/%s", bucketName, objectKey); } diff --git a/aws/src/main/java/org/apache/iceberg/aws/AwsClientProperties.java b/aws/src/main/java/org/apache/iceberg/aws/AwsClientProperties.java index 1d03a0acbd2c..0c91f8685ae9 100644 --- a/aws/src/main/java/org/apache/iceberg/aws/AwsClientProperties.java +++ b/aws/src/main/java/org/apache/iceberg/aws/AwsClientProperties.java @@ -67,7 +67,7 @@ public class AwsClientProperties implements Serializable { public static final String CLIENT_REGION = "client.region"; private String clientRegion; - private String clientCredentialsProvider; + private final String clientCredentialsProvider; private final Map clientCredentialsProviderProperties; public AwsClientProperties() { diff --git a/aws/src/main/java/org/apache/iceberg/aws/AwsProperties.java b/aws/src/main/java/org/apache/iceberg/aws/AwsProperties.java index 0d75e2451629..5c3afc28a98b 100644 --- a/aws/src/main/java/org/apache/iceberg/aws/AwsProperties.java +++ b/aws/src/main/java/org/apache/iceberg/aws/AwsProperties.java @@ -213,25 +213,25 @@ public class AwsProperties implements Serializable { private static final String HTTP_CLIENT_PREFIX = "http-client."; private final Set stsClientAssumeRoleTags; - private String clientAssumeRoleArn; - private String clientAssumeRoleExternalId; - private int clientAssumeRoleTimeoutSec; - private String clientAssumeRoleRegion; - private String clientAssumeRoleSessionName; - private String clientCredentialsProvider; + private final String clientAssumeRoleArn; + private final String clientAssumeRoleExternalId; + private final int clientAssumeRoleTimeoutSec; + private final String clientAssumeRoleRegion; + private final String clientAssumeRoleSessionName; + private final String clientCredentialsProvider; private final Map clientCredentialsProviderProperties; - private String glueEndpoint; + private final String glueEndpoint; private String glueCatalogId; private boolean glueCatalogSkipArchive; private boolean glueCatalogSkipNameValidation; private boolean glueLakeFormationEnabled; private String dynamoDbTableName; - private String dynamoDbEndpoint; + private final String dynamoDbEndpoint; private String restSigningRegion; - private String restSigningName; + private final String restSigningName; private String restAccessKeyId; private String restSecretAccessKey; private String restSessionToken; diff --git a/aws/src/main/java/org/apache/iceberg/aws/glue/GlueCatalog.java b/aws/src/main/java/org/apache/iceberg/aws/glue/GlueCatalog.java index cdbcc79d43fa..c6b157bb5c79 100644 --- a/aws/src/main/java/org/apache/iceberg/aws/glue/GlueCatalog.java +++ b/aws/src/main/java/org/apache/iceberg/aws/glue/GlueCatalog.java @@ -387,7 +387,7 @@ public void renameTable(TableIdentifier from, TableIdentifier to) { "Cannot rename %s to %s because namespace %s does not exist", from, to, to.namespace()); } // keep metadata - Table fromTable = null; + Table fromTable; String fromTableDbName = IcebergToGlueConverter.getDatabaseName(from, awsProperties.glueCatalogSkipNameValidation()); String fromTableName = diff --git a/aws/src/main/java/org/apache/iceberg/aws/lakeformation/LakeFormationAwsClientFactory.java b/aws/src/main/java/org/apache/iceberg/aws/lakeformation/LakeFormationAwsClientFactory.java index fb0b1c23efc4..552da4bc949c 100644 --- a/aws/src/main/java/org/apache/iceberg/aws/lakeformation/LakeFormationAwsClientFactory.java +++ b/aws/src/main/java/org/apache/iceberg/aws/lakeformation/LakeFormationAwsClientFactory.java @@ -139,8 +139,8 @@ private LakeFormationClient lakeFormation() { } static class LakeFormationCredentialsProvider implements AwsCredentialsProvider { - private LakeFormationClient client; - private String tableArn; + private final LakeFormationClient client; + private final String tableArn; LakeFormationCredentialsProvider(LakeFormationClient lakeFormationClient, String tableArn) { this.client = lakeFormationClient; diff --git a/aws/src/main/java/org/apache/iceberg/aws/s3/S3FileIOProperties.java b/aws/src/main/java/org/apache/iceberg/aws/s3/S3FileIOProperties.java index 1bcbdf7ed36b..3414c9df7046 100644 --- a/aws/src/main/java/org/apache/iceberg/aws/s3/S3FileIOProperties.java +++ b/aws/src/main/java/org/apache/iceberg/aws/s3/S3FileIOProperties.java @@ -396,9 +396,9 @@ public class S3FileIOProperties implements Serializable { private String sseType; private String sseKey; private String sseMd5; - private String accessKeyId; - private String secretAccessKey; - private String sessionToken; + private final String accessKeyId; + private final String secretAccessKey; + private final String sessionToken; private boolean isS3AccessGrantsEnabled; private boolean isS3AccessGrantsFallbackToIamEnabled; private int multipartUploadThreads; @@ -416,11 +416,11 @@ public class S3FileIOProperties implements Serializable { private boolean isDeleteEnabled; private final Map bucketToAccessPointMapping; private boolean isPreloadClientEnabled; - private boolean isDualStackEnabled; - private boolean isPathStyleAccess; - private boolean isUseArnRegionEnabled; - private boolean isAccelerationEnabled; - private String endpoint; + private final boolean isDualStackEnabled; + private final boolean isPathStyleAccess; + private final boolean isUseArnRegionEnabled; + private final boolean isAccelerationEnabled; + private final String endpoint; private final boolean isRemoteSigningEnabled; private String writeStorageClass; private final Map allProperties; diff --git a/common/src/main/java/org/apache/iceberg/common/DynClasses.java b/common/src/main/java/org/apache/iceberg/common/DynClasses.java index af33aded80b1..3d42171847d3 100644 --- a/common/src/main/java/org/apache/iceberg/common/DynClasses.java +++ b/common/src/main/java/org/apache/iceberg/common/DynClasses.java @@ -34,7 +34,7 @@ public static class Builder { private ClassLoader loader = Thread.currentThread().getContextClassLoader(); private Class foundClass = null; private boolean nullOk = false; - private Set classNames = Sets.newLinkedHashSet(); + private final Set classNames = Sets.newLinkedHashSet(); private Builder() {} diff --git a/common/src/main/java/org/apache/iceberg/common/DynFields.java b/common/src/main/java/org/apache/iceberg/common/DynFields.java index 01afbad22ecb..a96da851315b 100644 --- a/common/src/main/java/org/apache/iceberg/common/DynFields.java +++ b/common/src/main/java/org/apache/iceberg/common/DynFields.java @@ -414,7 +414,7 @@ public StaticField buildStatic() { } private static class MakeFieldAccessible implements PrivilegedAction { - private Field hidden; + private final Field hidden; MakeFieldAccessible(Field hidden) { this.hidden = hidden; diff --git a/common/src/main/java/org/apache/iceberg/common/DynMethods.java b/common/src/main/java/org/apache/iceberg/common/DynMethods.java index ae331da84ed4..98de1e7f06ca 100644 --- a/common/src/main/java/org/apache/iceberg/common/DynMethods.java +++ b/common/src/main/java/org/apache/iceberg/common/DynMethods.java @@ -508,7 +508,7 @@ public StaticMethod buildStatic() { } private static class MakeAccessible implements PrivilegedAction { - private Method hidden; + private final Method hidden; MakeAccessible(Method hidden) { this.hidden = hidden; diff --git a/core/src/main/java/org/apache/iceberg/BaseEntriesTable.java b/core/src/main/java/org/apache/iceberg/BaseEntriesTable.java index 4e485d516f12..5f35704bc35b 100644 --- a/core/src/main/java/org/apache/iceberg/BaseEntriesTable.java +++ b/core/src/main/java/org/apache/iceberg/BaseEntriesTable.java @@ -54,7 +54,7 @@ abstract class BaseEntriesTable extends BaseMetadataTable { public Schema schema() { StructType partitionType = Partitioning.partitionType(table()); Schema schema = ManifestEntry.getSchema(partitionType); - if (partitionType.fields().size() < 1) { + if (partitionType.fields().isEmpty()) { // avoid returning an empty struct, which is not always supported. // instead, drop the partition field (id 102) schema = TypeUtil.selectNot(schema, Sets.newHashSet(DataFile.PARTITION_ID)); diff --git a/core/src/main/java/org/apache/iceberg/BaseFilesTable.java b/core/src/main/java/org/apache/iceberg/BaseFilesTable.java index 62e6f8acf7a5..011abffd8a2a 100644 --- a/core/src/main/java/org/apache/iceberg/BaseFilesTable.java +++ b/core/src/main/java/org/apache/iceberg/BaseFilesTable.java @@ -48,7 +48,7 @@ abstract class BaseFilesTable extends BaseMetadataTable { public Schema schema() { StructType partitionType = Partitioning.partitionType(table()); Schema schema = new Schema(DataFile.getType(partitionType).fields()); - if (partitionType.fields().size() < 1) { + if (partitionType.fields().isEmpty()) { // avoid returning an empty struct, which is not always supported. // instead, drop the partition field schema = TypeUtil.selectNot(schema, Sets.newHashSet(DataFile.PARTITION_ID)); diff --git a/core/src/main/java/org/apache/iceberg/BaseMetastoreTableOperations.java b/core/src/main/java/org/apache/iceberg/BaseMetastoreTableOperations.java index 6443cf6e60ea..50266b2edcbf 100644 --- a/core/src/main/java/org/apache/iceberg/BaseMetastoreTableOperations.java +++ b/core/src/main/java/org/apache/iceberg/BaseMetastoreTableOperations.java @@ -355,7 +355,7 @@ private static int parseVersion(String metadataLocation) { } try { - return Integer.valueOf(metadataLocation.substring(versionStart, versionEnd)); + return Integer.parseInt(metadataLocation.substring(versionStart, versionEnd)); } catch (NumberFormatException e) { LOG.warn("Unable to parse version from metadata location: {}", metadataLocation, e); return -1; diff --git a/core/src/main/java/org/apache/iceberg/BaseReplacePartitions.java b/core/src/main/java/org/apache/iceberg/BaseReplacePartitions.java index d3a8edbc7cdd..04611f50f15a 100644 --- a/core/src/main/java/org/apache/iceberg/BaseReplacePartitions.java +++ b/core/src/main/java/org/apache/iceberg/BaseReplacePartitions.java @@ -111,7 +111,7 @@ public void validate(TableMetadata currentMetadata, Snapshot parent) { @Override public List apply(TableMetadata base, Snapshot snapshot) { - if (dataSpec().fields().size() <= 0) { + if (dataSpec().fields().isEmpty()) { // replace all data in an unpartitioned table deleteByRowFilter(Expressions.alwaysTrue()); } diff --git a/core/src/main/java/org/apache/iceberg/BaseRewriteManifests.java b/core/src/main/java/org/apache/iceberg/BaseRewriteManifests.java index f3f8e5fcd7cb..dce6d4a995bd 100644 --- a/core/src/main/java/org/apache/iceberg/BaseRewriteManifests.java +++ b/core/src/main/java/org/apache/iceberg/BaseRewriteManifests.java @@ -200,7 +200,7 @@ private boolean requiresRewrite(Set currentManifests) { return false; } - if (rewrittenManifests.size() == 0) { + if (rewrittenManifests.isEmpty()) { // nothing yet processed so perform a full rewrite return true; } diff --git a/core/src/main/java/org/apache/iceberg/BaseTransaction.java b/core/src/main/java/org/apache/iceberg/BaseTransaction.java index 2f051466bb8b..eb8dbd2538e6 100644 --- a/core/src/main/java/org/apache/iceberg/BaseTransaction.java +++ b/core/src/main/java/org/apache/iceberg/BaseTransaction.java @@ -71,7 +71,7 @@ enum TransactionType { private final Set deletedFiles = Sets.newHashSet(); // keep track of files deleted in the most recent commit private final Consumer enqueueDelete = deletedFiles::add; - private TransactionType type; + private final TransactionType type; private TableMetadata base; private TableMetadata current; private boolean hasLastOpCommitted; diff --git a/core/src/main/java/org/apache/iceberg/ClientPoolImpl.java b/core/src/main/java/org/apache/iceberg/ClientPoolImpl.java index c3534fa22a4a..f454c4aeea53 100644 --- a/core/src/main/java/org/apache/iceberg/ClientPoolImpl.java +++ b/core/src/main/java/org/apache/iceberg/ClientPoolImpl.java @@ -40,7 +40,7 @@ public abstract class ClientPoolImpl private volatile int currentSize; private boolean closed; - private int connectionRetryWaitPeriodMs = 1000; + private static final int CONNECTION_RETRY_WAIT_PERIOD_MS = 1000; public ClientPoolImpl(int poolSize, Class reconnectExc, boolean retryByDefault) { this(poolSize, reconnectExc, retryByDefault, 1); @@ -80,7 +80,7 @@ public R run(Action action, boolean retry) throws E, InterruptedExc } catch (Exception e) { if (isConnectionException(e)) { retryAttempts++; - Thread.sleep(connectionRetryWaitPeriodMs); + Thread.sleep(CONNECTION_RETRY_WAIT_PERIOD_MS); } else { throw reconnectExc.cast(exc); } diff --git a/core/src/main/java/org/apache/iceberg/MetadataUpdate.java b/core/src/main/java/org/apache/iceberg/MetadataUpdate.java index d133b76901da..49fb1fe01c44 100644 --- a/core/src/main/java/org/apache/iceberg/MetadataUpdate.java +++ b/core/src/main/java/org/apache/iceberg/MetadataUpdate.java @@ -336,9 +336,9 @@ class SetSnapshotRef implements MetadataUpdate { private final String refName; private final Long snapshotId; private final SnapshotRefType type; - private Integer minSnapshotsToKeep; - private Long maxSnapshotAgeMs; - private Long maxRefAgeMs; + private final Integer minSnapshotsToKeep; + private final Long maxSnapshotAgeMs; + private final Long maxRefAgeMs; public SetSnapshotRef( String refName, diff --git a/core/src/main/java/org/apache/iceberg/MetricsModes.java b/core/src/main/java/org/apache/iceberg/MetricsModes.java index fd6b9c580ee2..3dc2434310a5 100644 --- a/core/src/main/java/org/apache/iceberg/MetricsModes.java +++ b/core/src/main/java/org/apache/iceberg/MetricsModes.java @@ -160,7 +160,7 @@ Object writeReplace() throws ObjectStreamException { } private static class MetricsModeProxy implements Serializable { - private String modeAsString; + private final String modeAsString; MetricsModeProxy(String modeAsString) { this.modeAsString = modeAsString; diff --git a/core/src/main/java/org/apache/iceberg/PropertiesUpdate.java b/core/src/main/java/org/apache/iceberg/PropertiesUpdate.java index 9168b84b4042..35338a689205 100644 --- a/core/src/main/java/org/apache/iceberg/PropertiesUpdate.java +++ b/core/src/main/java/org/apache/iceberg/PropertiesUpdate.java @@ -62,7 +62,7 @@ public UpdateProperties set(String key, String value) { public UpdateProperties remove(String key) { Preconditions.checkNotNull(key, "Key cannot be null"); Preconditions.checkArgument( - !updates.keySet().contains(key), "Cannot remove and update the same key: %s", key); + !updates.containsKey(key), "Cannot remove and update the same key: %s", key); removals.add(key); diff --git a/core/src/main/java/org/apache/iceberg/SnapshotProducer.java b/core/src/main/java/org/apache/iceberg/SnapshotProducer.java index 218daca64dbd..9f4bcbc6bba9 100644 --- a/core/src/main/java/org/apache/iceberg/SnapshotProducer.java +++ b/core/src/main/java/org/apache/iceberg/SnapshotProducer.java @@ -370,82 +370,84 @@ protected TableMetadata refresh() { public void commit() { // this is always set to the latest commit attempt's snapshot id. AtomicLong newSnapshotId = new AtomicLong(-1L); - Timed totalDuration = commitMetrics().totalDuration().start(); - try { - Tasks.foreach(ops) - .retry(base.propertyAsInt(COMMIT_NUM_RETRIES, COMMIT_NUM_RETRIES_DEFAULT)) - .exponentialBackoff( - base.propertyAsInt(COMMIT_MIN_RETRY_WAIT_MS, COMMIT_MIN_RETRY_WAIT_MS_DEFAULT), - base.propertyAsInt(COMMIT_MAX_RETRY_WAIT_MS, COMMIT_MAX_RETRY_WAIT_MS_DEFAULT), - base.propertyAsInt(COMMIT_TOTAL_RETRY_TIME_MS, COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT), - 2.0 /* exponential */) - .onlyRetryOn(CommitFailedException.class) - .countAttempts(commitMetrics().attempts()) - .run( - taskOps -> { - Snapshot newSnapshot = apply(); - newSnapshotId.set(newSnapshot.snapshotId()); - TableMetadata.Builder update = TableMetadata.buildFrom(base); - if (base.snapshot(newSnapshot.snapshotId()) != null) { - // this is a rollback operation - update.setBranchSnapshot(newSnapshot.snapshotId(), targetBranch); - } else if (stageOnly) { - update.addSnapshot(newSnapshot); - } else { - update.setBranchSnapshot(newSnapshot, targetBranch); - } - - TableMetadata updated = update.build(); - if (updated.changes().isEmpty()) { - // do not commit if the metadata has not changed. for example, this may happen - // when setting the current - // snapshot to an ID that is already current. note that this check uses identity. - return; - } - - // if the table UUID is missing, add it here. the UUID will be re-created each time - // this operation retries - // to ensure that if a concurrent operation assigns the UUID, this operation will - // not fail. - taskOps.commit(base, updated.withUUID()); - }); - - } catch (CommitStateUnknownException commitStateUnknownException) { - throw commitStateUnknownException; - } catch (RuntimeException e) { - if (!strictCleanup || e instanceof CleanableFailure) { - Exceptions.suppressAndThrow(e, this::cleanAll); - } + try (Timed ignore = commitMetrics().totalDuration().start()) { + try { + Tasks.foreach(ops) + .retry(base.propertyAsInt(COMMIT_NUM_RETRIES, COMMIT_NUM_RETRIES_DEFAULT)) + .exponentialBackoff( + base.propertyAsInt(COMMIT_MIN_RETRY_WAIT_MS, COMMIT_MIN_RETRY_WAIT_MS_DEFAULT), + base.propertyAsInt(COMMIT_MAX_RETRY_WAIT_MS, COMMIT_MAX_RETRY_WAIT_MS_DEFAULT), + base.propertyAsInt(COMMIT_TOTAL_RETRY_TIME_MS, COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT), + 2.0 /* exponential */) + .onlyRetryOn(CommitFailedException.class) + .countAttempts(commitMetrics().attempts()) + .run( + taskOps -> { + Snapshot newSnapshot = apply(); + newSnapshotId.set(newSnapshot.snapshotId()); + TableMetadata.Builder update = TableMetadata.buildFrom(base); + if (base.snapshot(newSnapshot.snapshotId()) != null) { + // this is a rollback operation + update.setBranchSnapshot(newSnapshot.snapshotId(), targetBranch); + } else if (stageOnly) { + update.addSnapshot(newSnapshot); + } else { + update.setBranchSnapshot(newSnapshot, targetBranch); + } - throw e; - } + TableMetadata updated = update.build(); + if (updated.changes().isEmpty()) { + // do not commit if the metadata has not changed. for example, this may happen + // when setting the current + // snapshot to an ID that is already current. note that this check uses + // identity. + return; + } - try { - LOG.info("Committed snapshot {} ({})", newSnapshotId.get(), getClass().getSimpleName()); - - // at this point, the commit must have succeeded. after a refresh, the snapshot is loaded by - // id in case another commit was added between this commit and the refresh. - Snapshot saved = ops.refresh().snapshot(newSnapshotId.get()); - if (saved != null) { - cleanUncommitted(Sets.newHashSet(saved.allManifests(ops.io()))); - // also clean up unused manifest lists created by multiple attempts - for (String manifestList : manifestLists) { - if (!saved.manifestListLocation().equals(manifestList)) { - deleteFile(manifestList); - } + // if the table UUID is missing, add it here. the UUID will be re-created each + // time + // this operation retries + // to ensure that if a concurrent operation assigns the UUID, this operation will + // not fail. + taskOps.commit(base, updated.withUUID()); + }); + + } catch (CommitStateUnknownException commitStateUnknownException) { + throw commitStateUnknownException; + } catch (RuntimeException e) { + if (!strictCleanup || e instanceof CleanableFailure) { + Exceptions.suppressAndThrow(e, this::cleanAll); } - } else { - // saved may not be present if the latest metadata couldn't be loaded due to eventual - // consistency problems in refresh. in that case, don't clean up. - LOG.warn("Failed to load committed snapshot, skipping manifest clean-up"); + + throw e; } - } catch (Throwable e) { - LOG.warn( - "Failed to load committed table metadata or during cleanup, skipping further cleanup", e); - } + try { + LOG.info("Committed snapshot {} ({})", newSnapshotId.get(), getClass().getSimpleName()); + + // at this point, the commit must have succeeded. after a refresh, the snapshot is loaded by + // id in case another commit was added between this commit and the refresh. + Snapshot saved = ops.refresh().snapshot(newSnapshotId.get()); + if (saved != null) { + cleanUncommitted(Sets.newHashSet(saved.allManifests(ops.io()))); + // also clean up unused manifest lists created by multiple attempts + for (String manifestList : manifestLists) { + if (!saved.manifestListLocation().equals(manifestList)) { + deleteFile(manifestList); + } + } + } else { + // saved may not be present if the latest metadata couldn't be loaded due to eventual + // consistency problems in refresh. in that case, don't clean up. + LOG.warn("Failed to load committed snapshot, skipping manifest clean-up"); + } - totalDuration.stop(); + } catch (Throwable e) { + LOG.warn( + "Failed to load committed table metadata or during cleanup, skipping further cleanup", + e); + } + } try { notifyListeners(); diff --git a/core/src/main/java/org/apache/iceberg/TableMetadata.java b/core/src/main/java/org/apache/iceberg/TableMetadata.java index a7edddecad52..6b23c337ae98 100644 --- a/core/src/main/java/org/apache/iceberg/TableMetadata.java +++ b/core/src/main/java/org/apache/iceberg/TableMetadata.java @@ -1434,7 +1434,7 @@ public TableMetadata build() { // what is in the metadata file, which does not store changes. metadata location with changes // is inconsistent. Preconditions.checkArgument( - changes.size() == 0 || discardChanges || metadataLocation == null, + changes.isEmpty() || discardChanges || metadataLocation == null, "Cannot set metadata location with changes to table metadata: %s changes", changes.size()); diff --git a/core/src/main/java/org/apache/iceberg/UpdateSnapshotReferencesOperation.java b/core/src/main/java/org/apache/iceberg/UpdateSnapshotReferencesOperation.java index 9d15bf0ee207..f7ccea747a60 100644 --- a/core/src/main/java/org/apache/iceberg/UpdateSnapshotReferencesOperation.java +++ b/core/src/main/java/org/apache/iceberg/UpdateSnapshotReferencesOperation.java @@ -31,7 +31,7 @@ class UpdateSnapshotReferencesOperation implements PendingUpdate updatedRefs; - private TableMetadata base; + private final TableMetadata base; UpdateSnapshotReferencesOperation(TableOperations ops) { this.ops = ops; diff --git a/core/src/main/java/org/apache/iceberg/actions/RewriteDataFilesActionResult.java b/core/src/main/java/org/apache/iceberg/actions/RewriteDataFilesActionResult.java index a3b0292d0463..df2fdfdcdc3b 100644 --- a/core/src/main/java/org/apache/iceberg/actions/RewriteDataFilesActionResult.java +++ b/core/src/main/java/org/apache/iceberg/actions/RewriteDataFilesActionResult.java @@ -27,8 +27,8 @@ public class RewriteDataFilesActionResult { private static final RewriteDataFilesActionResult EMPTY = new RewriteDataFilesActionResult(ImmutableList.of(), ImmutableList.of()); - private List deletedDataFiles; - private List addedDataFiles; + private final List deletedDataFiles; + private final List addedDataFiles; public RewriteDataFilesActionResult( List deletedDataFiles, List addedDataFiles) { diff --git a/core/src/main/java/org/apache/iceberg/avro/AvroCustomOrderSchemaVisitor.java b/core/src/main/java/org/apache/iceberg/avro/AvroCustomOrderSchemaVisitor.java index 575e44a83e42..69159b65be3e 100644 --- a/core/src/main/java/org/apache/iceberg/avro/AvroCustomOrderSchemaVisitor.java +++ b/core/src/main/java/org/apache/iceberg/avro/AvroCustomOrderSchemaVisitor.java @@ -68,7 +68,7 @@ public static T visit(Schema schema, AvroCustomOrderSchemaVisitor v } } - private Deque recordLevels = Lists.newLinkedList(); + private final Deque recordLevels = Lists.newLinkedList(); public T record(Schema record, List names, Iterable fields) { return null; diff --git a/core/src/main/java/org/apache/iceberg/avro/AvroFileAppender.java b/core/src/main/java/org/apache/iceberg/avro/AvroFileAppender.java index c69a8626f6c9..a583f9a84e1c 100644 --- a/core/src/main/java/org/apache/iceberg/avro/AvroFileAppender.java +++ b/core/src/main/java/org/apache/iceberg/avro/AvroFileAppender.java @@ -34,11 +34,11 @@ import org.apache.iceberg.relocated.com.google.common.base.Preconditions; class AvroFileAppender implements FileAppender { - private PositionOutputStream stream; + private final PositionOutputStream stream; private DataFileWriter writer; - private DatumWriter datumWriter; - private org.apache.iceberg.Schema icebergSchema; - private MetricsConfig metricsConfig; + private final DatumWriter datumWriter; + private final org.apache.iceberg.Schema icebergSchema; + private final MetricsConfig metricsConfig; private long numRecords = 0L; private boolean isClosed = false; diff --git a/core/src/main/java/org/apache/iceberg/avro/AvroSchemaUtil.java b/core/src/main/java/org/apache/iceberg/avro/AvroSchemaUtil.java index 57c99faa2073..032d63105dfe 100644 --- a/core/src/main/java/org/apache/iceberg/avro/AvroSchemaUtil.java +++ b/core/src/main/java/org/apache/iceberg/avro/AvroSchemaUtil.java @@ -222,7 +222,7 @@ public static boolean isKeyValueSchema(Schema schema) { static Schema createMap(int keyId, Schema keySchema, int valueId, Schema valueSchema) { String keyValueName = "k" + keyId + "_v" + valueId; - Schema.Field keyField = new Schema.Field("key", keySchema, null, (Object) null); + Schema.Field keyField = new Schema.Field("key", keySchema, null, null); keyField.addProp(FIELD_ID_PROP, keyId); Schema.Field valueField = @@ -250,7 +250,7 @@ static Schema createProjectionMap( Schema valueSchema) { String keyValueName = "k" + keyId + "_v" + valueId; - Schema.Field keyField = new Schema.Field("key", keySchema, null, (Object) null); + Schema.Field keyField = new Schema.Field("key", keySchema, null, null); if (!"key".equals(keyName)) { keyField.addAlias(keyName); } diff --git a/core/src/main/java/org/apache/iceberg/avro/AvroSchemaVisitor.java b/core/src/main/java/org/apache/iceberg/avro/AvroSchemaVisitor.java index f22a3592ad3d..9819924ffa99 100644 --- a/core/src/main/java/org/apache/iceberg/avro/AvroSchemaVisitor.java +++ b/core/src/main/java/org/apache/iceberg/avro/AvroSchemaVisitor.java @@ -71,8 +71,8 @@ public static T visit(Schema schema, AvroSchemaVisitor visitor) { } } - private Deque recordLevels = Lists.newLinkedList(); - private Deque fieldNames = Lists.newLinkedList(); + private final Deque recordLevels = Lists.newLinkedList(); + private final Deque fieldNames = Lists.newLinkedList(); protected Deque fieldNames() { return fieldNames; diff --git a/core/src/main/java/org/apache/iceberg/avro/AvroSchemaWithTypeVisitor.java b/core/src/main/java/org/apache/iceberg/avro/AvroSchemaWithTypeVisitor.java index 85a8718abfce..45892d3de151 100644 --- a/core/src/main/java/org/apache/iceberg/avro/AvroSchemaWithTypeVisitor.java +++ b/core/src/main/java/org/apache/iceberg/avro/AvroSchemaWithTypeVisitor.java @@ -115,7 +115,7 @@ private static T visitArray(Type type, Schema array, AvroSchemaWithTypeVisit } } - private Deque recordLevels = Lists.newLinkedList(); + private final Deque recordLevels = Lists.newLinkedList(); public T record(Types.StructType iStruct, Schema record, List names, List fields) { return null; diff --git a/core/src/main/java/org/apache/iceberg/avro/AvroWithPartnerByStructureVisitor.java b/core/src/main/java/org/apache/iceberg/avro/AvroWithPartnerByStructureVisitor.java index 2c080005d862..126a6373d210 100644 --- a/core/src/main/java/org/apache/iceberg/avro/AvroWithPartnerByStructureVisitor.java +++ b/core/src/main/java/org/apache/iceberg/avro/AvroWithPartnerByStructureVisitor.java @@ -146,7 +146,7 @@ private static T visitArray( } /** Just for checking state. */ - private Deque recordLevels = Lists.newLinkedList(); + private final Deque recordLevels = Lists.newLinkedList(); // ---------------------------------- Partner type methods // --------------------------------------------- diff --git a/core/src/main/java/org/apache/iceberg/avro/AvroWithPartnerVisitor.java b/core/src/main/java/org/apache/iceberg/avro/AvroWithPartnerVisitor.java index 0147dbf37d31..b23b195d959a 100644 --- a/core/src/main/java/org/apache/iceberg/avro/AvroWithPartnerVisitor.java +++ b/core/src/main/java/org/apache/iceberg/avro/AvroWithPartnerVisitor.java @@ -67,7 +67,7 @@ public Type listElementPartner(Type partner) { } /** Used to fail on recursive types. */ - private Deque recordLevels = Lists.newLinkedList(); + private final Deque recordLevels = Lists.newLinkedList(); public R record(P partner, Schema record, List fieldResults) { return null; diff --git a/core/src/main/java/org/apache/iceberg/avro/GenericAvroReader.java b/core/src/main/java/org/apache/iceberg/avro/GenericAvroReader.java index d89489d92a28..93bfa2398466 100644 --- a/core/src/main/java/org/apache/iceberg/avro/GenericAvroReader.java +++ b/core/src/main/java/org/apache/iceberg/avro/GenericAvroReader.java @@ -43,7 +43,7 @@ public class GenericAvroReader private final Types.StructType expectedType; private ClassLoader loader = Thread.currentThread().getContextClassLoader(); private Map renames = ImmutableMap.of(); - private Map idToConstant = ImmutableMap.of(); + private final Map idToConstant = ImmutableMap.of(); private Schema fileSchema = null; private ValueReader reader = null; diff --git a/core/src/main/java/org/apache/iceberg/avro/ValueWriters.java b/core/src/main/java/org/apache/iceberg/avro/ValueWriters.java index 1658db96c21a..3844ede1c16a 100644 --- a/core/src/main/java/org/apache/iceberg/avro/ValueWriters.java +++ b/core/src/main/java/org/apache/iceberg/avro/ValueWriters.java @@ -232,7 +232,7 @@ public void write(Object s, Encoder encoder) throws IOException { throw new IllegalArgumentException("Cannot write null to required string column"); } else { throw new IllegalArgumentException( - "Cannot write unknown string type: " + s.getClass().getName() + ": " + s.toString()); + "Cannot write unknown string type: " + s.getClass().getName() + ": " + s); } } } diff --git a/core/src/main/java/org/apache/iceberg/encryption/NativeFileCryptoParameters.java b/core/src/main/java/org/apache/iceberg/encryption/NativeFileCryptoParameters.java index f11506a1bbc9..2cade89dc75d 100644 --- a/core/src/main/java/org/apache/iceberg/encryption/NativeFileCryptoParameters.java +++ b/core/src/main/java/org/apache/iceberg/encryption/NativeFileCryptoParameters.java @@ -27,8 +27,8 @@ * encryption support (Parquet and ORC). */ public class NativeFileCryptoParameters { - private ByteBuffer fileKey; - private EncryptionAlgorithm fileEncryptionAlgorithm; + private final ByteBuffer fileKey; + private final EncryptionAlgorithm fileEncryptionAlgorithm; private NativeFileCryptoParameters( ByteBuffer fileKey, EncryptionAlgorithm fileEncryptionAlgorithm) { @@ -48,7 +48,7 @@ public static Builder create(ByteBuffer fileKey) { } public static class Builder { - private ByteBuffer fileKey; + private final ByteBuffer fileKey; private EncryptionAlgorithm fileEncryptionAlgorithm; private Builder(ByteBuffer fileKey) { diff --git a/core/src/main/java/org/apache/iceberg/hadoop/HadoopTableOperations.java b/core/src/main/java/org/apache/iceberg/hadoop/HadoopTableOperations.java index 4e815ceff59a..1e0cf4422120 100644 --- a/core/src/main/java/org/apache/iceberg/hadoop/HadoopTableOperations.java +++ b/core/src/main/java/org/apache/iceberg/hadoop/HadoopTableOperations.java @@ -153,7 +153,7 @@ public void commit(TableMetadata base, TableMetadata metadata) { TableProperties.METADATA_COMPRESSION, TableProperties.METADATA_COMPRESSION_DEFAULT); TableMetadataParser.Codec codec = TableMetadataParser.Codec.fromName(codecName); String fileExtension = TableMetadataParser.getFileExtension(codec); - Path tempMetadataFile = metadataPath(UUID.randomUUID().toString() + fileExtension); + Path tempMetadataFile = metadataPath(UUID.randomUUID() + fileExtension); TableMetadataParser.write(metadata, io().newOutputFile(tempMetadataFile.toString())); int nextVersion = (current.first() != null ? current.first() : 0) + 1; @@ -296,7 +296,7 @@ private void writeVersionHint(int versionToWrite) { FileSystem fs = getFileSystem(versionHintFile, conf); try { - Path tempVersionHintFile = metadataPath(UUID.randomUUID().toString() + "-version-hint.temp"); + Path tempVersionHintFile = metadataPath(UUID.randomUUID() + "-version-hint.temp"); writeVersionToPath(fs, tempVersionHintFile, versionToWrite); fs.delete(versionHintFile, false /* recursive delete */); fs.rename(tempVersionHintFile, versionHintFile); diff --git a/core/src/main/java/org/apache/iceberg/jdbc/JdbcClientPool.java b/core/src/main/java/org/apache/iceberg/jdbc/JdbcClientPool.java index 487b8409b1cc..9b6fca95bfe4 100644 --- a/core/src/main/java/org/apache/iceberg/jdbc/JdbcClientPool.java +++ b/core/src/main/java/org/apache/iceberg/jdbc/JdbcClientPool.java @@ -53,7 +53,7 @@ public class JdbcClientPool extends ClientPoolImpl { private final String dbUrl; private final Map properties; - private Set retryableStatusCodes; + private final Set retryableStatusCodes; public JdbcClientPool(String dbUrl, Map props) { this( diff --git a/core/src/main/java/org/apache/iceberg/rest/responses/ErrorResponse.java b/core/src/main/java/org/apache/iceberg/rest/responses/ErrorResponse.java index 5543259af1e0..da93b821cedd 100644 --- a/core/src/main/java/org/apache/iceberg/rest/responses/ErrorResponse.java +++ b/core/src/main/java/org/apache/iceberg/rest/responses/ErrorResponse.java @@ -28,10 +28,10 @@ /** Standard response body for all API errors */ public class ErrorResponse implements RESTResponse { - private String message; - private String type; - private int code; - private List stack; + private final String message; + private final String type; + private final int code; + private final List stack; private ErrorResponse(String message, String type, int code, List stack) { this.message = message; diff --git a/core/src/main/java/org/apache/iceberg/rest/responses/LoadTableResponse.java b/core/src/main/java/org/apache/iceberg/rest/responses/LoadTableResponse.java index a389479fff10..5e5353c3909a 100644 --- a/core/src/main/java/org/apache/iceberg/rest/responses/LoadTableResponse.java +++ b/core/src/main/java/org/apache/iceberg/rest/responses/LoadTableResponse.java @@ -84,7 +84,7 @@ public static Builder builder() { public static class Builder { private String metadataLocation; private TableMetadata metadata; - private Map config = Maps.newHashMap(); + private final Map config = Maps.newHashMap(); private Builder() {} diff --git a/core/src/main/java/org/apache/iceberg/schema/UnionByNameVisitor.java b/core/src/main/java/org/apache/iceberg/schema/UnionByNameVisitor.java index 291677741e93..1497ba59c582 100644 --- a/core/src/main/java/org/apache/iceberg/schema/UnionByNameVisitor.java +++ b/core/src/main/java/org/apache/iceberg/schema/UnionByNameVisitor.java @@ -34,7 +34,7 @@ public class UnionByNameVisitor extends SchemaWithPartnerVisitor, Class> key) { null, false, Lists.newArrayList( - new Schema.Field("x", xSchema, null, (Object) null), - new Schema.Field("y", ySchema, null, (Object) null))); + new Schema.Field("x", xSchema, null, null), + new Schema.Field("y", ySchema, null, null))); } }); @@ -109,7 +109,7 @@ public Y second() { @Override public String toString() { - return "(" + String.valueOf(first) + ", " + String.valueOf(second) + ")"; + return "(" + first + ", " + second + ")"; } @Override diff --git a/core/src/main/java/org/apache/iceberg/util/Tasks.java b/core/src/main/java/org/apache/iceberg/util/Tasks.java index e420145c8dce..02d2b834311f 100644 --- a/core/src/main/java/org/apache/iceberg/util/Tasks.java +++ b/core/src/main/java/org/apache/iceberg/util/Tasks.java @@ -78,7 +78,7 @@ public static class Builder { private boolean stopAbortsOnFailure = false; // retry settings - private List> stopRetryExceptions = + private final List> stopRetryExceptions = Lists.newArrayList(UnrecoverableException.class); private List> onlyRetryExceptions = null; private Predicate shouldRetryPredicate = null; @@ -496,7 +496,7 @@ private static Collection waitFor(Collection> futures) { } catch (ExecutionException e) { Throwable cause = e.getCause(); - if (Error.class.isInstance(cause)) { + if (cause instanceof Error) { for (Throwable t : uncaught) { cause.addSuppressed(t); } diff --git a/core/src/main/java/org/apache/iceberg/util/ZOrderByteUtils.java b/core/src/main/java/org/apache/iceberg/util/ZOrderByteUtils.java index c687fc4e03dc..4a5805742443 100644 --- a/core/src/main/java/org/apache/iceberg/util/ZOrderByteUtils.java +++ b/core/src/main/java/org/apache/iceberg/util/ZOrderByteUtils.java @@ -76,9 +76,7 @@ public static ByteBuffer longToOrderedBytes(long val, ByteBuffer reuse) { * ByteBuffer)} */ public static ByteBuffer shortToOrderedBytes(short val, ByteBuffer reuse) { - ByteBuffer bytes = ByteBuffers.reuse(reuse, PRIMITIVE_BUFFER_SIZE); - bytes.putLong(((long) val) ^ 0x8000000000000000L); - return bytes; + return intToOrderedBytes(val, reuse); } /** @@ -86,9 +84,7 @@ public static ByteBuffer shortToOrderedBytes(short val, ByteBuffer reuse) { * ByteBuffer)} */ public static ByteBuffer tinyintToOrderedBytes(byte val, ByteBuffer reuse) { - ByteBuffer bytes = ByteBuffers.reuse(reuse, PRIMITIVE_BUFFER_SIZE); - bytes.putLong(((long) val) ^ 0x8000000000000000L); - return bytes; + return intToOrderedBytes(val, reuse); } /** @@ -100,11 +96,7 @@ public static ByteBuffer tinyintToOrderedBytes(byte val, ByteBuffer reuse) { * into lexicographically comparable bytes */ public static ByteBuffer floatToOrderedBytes(float val, ByteBuffer reuse) { - ByteBuffer bytes = ByteBuffers.reuse(reuse, PRIMITIVE_BUFFER_SIZE); - long lval = Double.doubleToLongBits(val); - lval ^= ((lval >> (Integer.SIZE - 1)) | Long.MIN_VALUE); - bytes.putLong(lval); - return bytes; + return doubleToOrderedBytes(val, reuse); } /** Doubles are treated the same as floats in {@link #floatToOrderedBytes(float, ByteBuffer)} */ diff --git a/core/src/main/java/org/apache/iceberg/view/BaseViewOperations.java b/core/src/main/java/org/apache/iceberg/view/BaseViewOperations.java index 7a4f546b8860..18b452f98367 100644 --- a/core/src/main/java/org/apache/iceberg/view/BaseViewOperations.java +++ b/core/src/main/java/org/apache/iceberg/view/BaseViewOperations.java @@ -219,7 +219,7 @@ private static int parseVersion(String metadataLocation) { } try { - return Integer.valueOf(metadataLocation.substring(versionStart, versionEnd)); + return Integer.parseInt(metadataLocation.substring(versionStart, versionEnd)); } catch (NumberFormatException e) { LOG.warn("Unable to parse version from metadata location: {}", metadataLocation, e); return -1; diff --git a/core/src/test/java/org/apache/iceberg/TableMetadataParserTest.java b/core/src/test/java/org/apache/iceberg/TableMetadataParserTest.java index 5581818aa1f0..56e3f7b67037 100644 --- a/core/src/test/java/org/apache/iceberg/TableMetadataParserTest.java +++ b/core/src/test/java/org/apache/iceberg/TableMetadataParserTest.java @@ -36,7 +36,6 @@ import java.util.zip.GZIPInputStream; import java.util.zip.ZipException; import org.apache.iceberg.TableMetadataParser.Codec; -import org.apache.iceberg.io.FileIO; import org.apache.iceberg.io.OutputFile; import org.apache.iceberg.relocated.com.google.common.collect.Maps; import org.apache.iceberg.types.Types.BooleanType; @@ -69,7 +68,7 @@ public void testGzipCompressionProperty() throws IOException { TableMetadataParser.write(metadata, outputFile); assertThat(isCompressed(fileName)).isEqualTo(codec == Codec.GZIP); TableMetadata actualMetadata = - TableMetadataParser.read((FileIO) null, Files.localInput(new File(fileName))); + TableMetadataParser.read(null, Files.localInput(new File(fileName))); verifyMetadata(metadata, actualMetadata); } @@ -88,7 +87,7 @@ private void verifyMetadata(TableMetadata expected, TableMetadata actual) { } private boolean isCompressed(String path) throws IOException { - try (InputStream ignored = new GZIPInputStream(new FileInputStream(new File(path)))) { + try (InputStream ignored = new GZIPInputStream(new FileInputStream(path))) { return true; } catch (ZipException e) { if (e.getMessage().equals("Not in GZIP format")) { diff --git a/core/src/test/java/org/apache/iceberg/TestBase.java b/core/src/test/java/org/apache/iceberg/TestBase.java index dc7bf0e8d8cb..2322062dad85 100644 --- a/core/src/test/java/org/apache/iceberg/TestBase.java +++ b/core/src/test/java/org/apache/iceberg/TestBase.java @@ -390,7 +390,7 @@ Snapshot commit(Table table, SnapshotUpdate snapshotUpdate, String branch) { Snapshot apply(SnapshotUpdate snapshotUpdate, String branch) { if (branch.equals(SnapshotRef.MAIN_BRANCH)) { - return ((SnapshotProducer) snapshotUpdate).apply(); + return snapshotUpdate.apply(); } else { return ((SnapshotProducer) snapshotUpdate.toBranch(branch)).apply(); } diff --git a/core/src/test/java/org/apache/iceberg/TestTableMetadata.java b/core/src/test/java/org/apache/iceberg/TestTableMetadata.java index 8075372d09c1..e11cc500df55 100644 --- a/core/src/test/java/org/apache/iceberg/TestTableMetadata.java +++ b/core/src/test/java/org/apache/iceberg/TestTableMetadata.java @@ -537,7 +537,7 @@ public void testJsonWithPreviousMetadataLog() throws Exception { List previousMetadataLog = Lists.newArrayList(); previousMetadataLog.add( new MetadataLogEntry( - currentTimestamp, "/tmp/000001-" + UUID.randomUUID().toString() + ".metadata.json")); + currentTimestamp, "/tmp/000001-" + UUID.randomUUID() + ".metadata.json")); TableMetadata base = new TableMetadata( @@ -606,17 +606,14 @@ public void testAddPreviousMetadataRemoveNone() throws IOException { List previousMetadataLog = Lists.newArrayList(); previousMetadataLog.add( new MetadataLogEntry( - currentTimestamp - 100, - "/tmp/000001-" + UUID.randomUUID().toString() + ".metadata.json")); + currentTimestamp - 100, "/tmp/000001-" + UUID.randomUUID() + ".metadata.json")); previousMetadataLog.add( new MetadataLogEntry( - currentTimestamp - 90, - "/tmp/000002-" + UUID.randomUUID().toString() + ".metadata.json")); + currentTimestamp - 90, "/tmp/000002-" + UUID.randomUUID() + ".metadata.json")); MetadataLogEntry latestPreviousMetadata = new MetadataLogEntry( - currentTimestamp - 80, - "/tmp/000003-" + UUID.randomUUID().toString() + ".metadata.json"); + currentTimestamp - 80, "/tmp/000003-" + UUID.randomUUID() + ".metadata.json"); TableMetadata base = new TableMetadata( @@ -691,29 +688,23 @@ public void testAddPreviousMetadataRemoveOne() throws IOException { List previousMetadataLog = Lists.newArrayList(); previousMetadataLog.add( new MetadataLogEntry( - currentTimestamp - 100, - "/tmp/000001-" + UUID.randomUUID().toString() + ".metadata.json")); + currentTimestamp - 100, "/tmp/000001-" + UUID.randomUUID() + ".metadata.json")); previousMetadataLog.add( new MetadataLogEntry( - currentTimestamp - 90, - "/tmp/000002-" + UUID.randomUUID().toString() + ".metadata.json")); + currentTimestamp - 90, "/tmp/000002-" + UUID.randomUUID() + ".metadata.json")); previousMetadataLog.add( new MetadataLogEntry( - currentTimestamp - 80, - "/tmp/000003-" + UUID.randomUUID().toString() + ".metadata.json")); + currentTimestamp - 80, "/tmp/000003-" + UUID.randomUUID() + ".metadata.json")); previousMetadataLog.add( new MetadataLogEntry( - currentTimestamp - 70, - "/tmp/000004-" + UUID.randomUUID().toString() + ".metadata.json")); + currentTimestamp - 70, "/tmp/000004-" + UUID.randomUUID() + ".metadata.json")); previousMetadataLog.add( new MetadataLogEntry( - currentTimestamp - 60, - "/tmp/000005-" + UUID.randomUUID().toString() + ".metadata.json")); + currentTimestamp - 60, "/tmp/000005-" + UUID.randomUUID() + ".metadata.json")); MetadataLogEntry latestPreviousMetadata = new MetadataLogEntry( - currentTimestamp - 50, - "/tmp/000006-" + UUID.randomUUID().toString() + ".metadata.json"); + currentTimestamp - 50, "/tmp/000006-" + UUID.randomUUID() + ".metadata.json"); TableMetadata base = new TableMetadata( @@ -792,29 +783,23 @@ public void testAddPreviousMetadataRemoveMultiple() throws IOException { List previousMetadataLog = Lists.newArrayList(); previousMetadataLog.add( new MetadataLogEntry( - currentTimestamp - 100, - "/tmp/000001-" + UUID.randomUUID().toString() + ".metadata.json")); + currentTimestamp - 100, "/tmp/000001-" + UUID.randomUUID() + ".metadata.json")); previousMetadataLog.add( new MetadataLogEntry( - currentTimestamp - 90, - "/tmp/000002-" + UUID.randomUUID().toString() + ".metadata.json")); + currentTimestamp - 90, "/tmp/000002-" + UUID.randomUUID() + ".metadata.json")); previousMetadataLog.add( new MetadataLogEntry( - currentTimestamp - 80, - "/tmp/000003-" + UUID.randomUUID().toString() + ".metadata.json")); + currentTimestamp - 80, "/tmp/000003-" + UUID.randomUUID() + ".metadata.json")); previousMetadataLog.add( new MetadataLogEntry( - currentTimestamp - 70, - "/tmp/000004-" + UUID.randomUUID().toString() + ".metadata.json")); + currentTimestamp - 70, "/tmp/000004-" + UUID.randomUUID() + ".metadata.json")); previousMetadataLog.add( new MetadataLogEntry( - currentTimestamp - 60, - "/tmp/000005-" + UUID.randomUUID().toString() + ".metadata.json")); + currentTimestamp - 60, "/tmp/000005-" + UUID.randomUUID() + ".metadata.json")); MetadataLogEntry latestPreviousMetadata = new MetadataLogEntry( - currentTimestamp - 50, - "/tmp/000006-" + UUID.randomUUID().toString() + ".metadata.json"); + currentTimestamp - 50, "/tmp/000006-" + UUID.randomUUID() + ".metadata.json"); TableMetadata base = new TableMetadata( diff --git a/core/src/test/java/org/apache/iceberg/TestV1ToV2RowDeltaDelete.java b/core/src/test/java/org/apache/iceberg/TestV1ToV2RowDeltaDelete.java index 28695e2fffe0..80f4d059f5f0 100644 --- a/core/src/test/java/org/apache/iceberg/TestV1ToV2RowDeltaDelete.java +++ b/core/src/test/java/org/apache/iceberg/TestV1ToV2RowDeltaDelete.java @@ -70,7 +70,7 @@ public void testPartitionedTableWithPartitionEqDeletes() { verifyManifestSequenceNumber(dataManifest, 0, 0); // update table version to 2 - TableOperations ops = ((BaseTable) table).operations(); + TableOperations ops = table.operations(); TableMetadata base = ops.current(); ops.commit(base, base.upgradeToFormatVersion(2)); @@ -127,7 +127,7 @@ public void testPartitionedTableWithUnrelatedPartitionDeletes() { table.newAppend().appendFile(FILE_B).appendFile(FILE_C).appendFile(FILE_D).commit(); // update table version to 2 - TableOperations ops = ((BaseTable) table).operations(); + TableOperations ops = table.operations(); TableMetadata base = ops.current(); ops.commit(base, base.upgradeToFormatVersion(2)); @@ -162,7 +162,7 @@ public void testPartitionedTableWithExistingDeleteFile() { table.newAppend().appendFile(FILE_A).commit(); // update table version to 2 - TableOperations ops = ((BaseTable) table).operations(); + TableOperations ops = table.operations(); TableMetadata base = ops.current(); ops.commit(base, base.upgradeToFormatVersion(2)); diff --git a/core/src/test/java/org/apache/iceberg/avro/TestNameMappingWithAvroSchema.java b/core/src/test/java/org/apache/iceberg/avro/TestNameMappingWithAvroSchema.java index 686456b3b16f..e3b25969fdb0 100644 --- a/core/src/test/java/org/apache/iceberg/avro/TestNameMappingWithAvroSchema.java +++ b/core/src/test/java/org/apache/iceberg/avro/TestNameMappingWithAvroSchema.java @@ -59,41 +59,39 @@ public void testNameMappingWithAvroSchema() { new Schema.Field( "complexUnion", Schema.createUnion( - new Schema[] { - Schema.create(Schema.Type.NULL), - Schema.create(Schema.Type.STRING), - Schema.createRecord( - "innerRecord1", - null, - "namespace1", - false, - Lists.newArrayList( - new Schema.Field("lat", Schema.create(Schema.Type.DOUBLE)), - new Schema.Field("long", Schema.create(Schema.Type.DOUBLE)))), - Schema.createRecord( - "innerRecord2", - null, - "namespace2", - false, - Lists.newArrayList( - new Schema.Field("lat", Schema.create(Schema.Type.DOUBLE)), - new Schema.Field("long", Schema.create(Schema.Type.DOUBLE)))), - Schema.createRecord( - "innerRecord3", - null, - "namespace3", - false, - Lists.newArrayList( - new Schema.Field( - "innerUnion", - Schema.createUnion( - Lists.newArrayList( - Schema.create(Schema.Type.STRING), - Schema.create(Schema.Type.INT)))))), - Schema.createEnum( - "timezone", null, null, Lists.newArrayList("UTC", "PST", "EST")), - Schema.createFixed("bitmap", null, null, 1) - })))); + Schema.create(Schema.Type.NULL), + Schema.create(Schema.Type.STRING), + Schema.createRecord( + "innerRecord1", + null, + "namespace1", + false, + Lists.newArrayList( + new Schema.Field("lat", Schema.create(Schema.Type.DOUBLE)), + new Schema.Field("long", Schema.create(Schema.Type.DOUBLE)))), + Schema.createRecord( + "innerRecord2", + null, + "namespace2", + false, + Lists.newArrayList( + new Schema.Field("lat", Schema.create(Schema.Type.DOUBLE)), + new Schema.Field("long", Schema.create(Schema.Type.DOUBLE)))), + Schema.createRecord( + "innerRecord3", + null, + "namespace3", + false, + Lists.newArrayList( + new Schema.Field( + "innerUnion", + Schema.createUnion( + Lists.newArrayList( + Schema.create(Schema.Type.STRING), + Schema.create(Schema.Type.INT)))))), + Schema.createEnum( + "timezone", null, null, Lists.newArrayList("UTC", "PST", "EST")), + Schema.createFixed("bitmap", null, null, 1))))); NameMappingWithAvroSchema nameMappingWithAvroSchema = new NameMappingWithAvroSchema(); diff --git a/core/src/test/java/org/apache/iceberg/data/avro/TestDecoderResolver.java b/core/src/test/java/org/apache/iceberg/data/avro/TestDecoderResolver.java index 9b355464a9d8..0024363701a5 100644 --- a/core/src/test/java/org/apache/iceberg/data/avro/TestDecoderResolver.java +++ b/core/src/test/java/org/apache/iceberg/data/avro/TestDecoderResolver.java @@ -40,6 +40,7 @@ public void before() { DecoderResolver.DECODER_CACHES.get().clear(); } + @SuppressWarnings("UnusedAssignment") // the unused assignments are necessary for this test @Test public void testDecoderCachingReadSchemaSameAsFileSchema() throws Exception { Decoder dummyDecoder = DecoderFactory.get().binaryDecoder(new byte[] {}, null); @@ -84,6 +85,7 @@ public void testDecoderCachingReadSchemaSameAsFileSchema() throws Exception { checkCachedSize(0); } + @SuppressWarnings("UnusedAssignment") // the unused assignments are necessary for this test @Test public void testDecoderCachingReadSchemaNotSameAsFileSchema() throws Exception { Decoder dummyDecoder = DecoderFactory.get().binaryDecoder(new byte[] {}, null); diff --git a/core/src/test/java/org/apache/iceberg/hadoop/HadoopFileIOTest.java b/core/src/test/java/org/apache/iceberg/hadoop/HadoopFileIOTest.java index 95d0b6adbd02..109c88daeaa5 100644 --- a/core/src/test/java/org/apache/iceberg/hadoop/HadoopFileIOTest.java +++ b/core/src/test/java/org/apache/iceberg/hadoop/HadoopFileIOTest.java @@ -86,7 +86,7 @@ public void testListPrefix() { @Test public void testFileExists() throws IOException { Path parent = new Path(tempDir.toURI()); - Path randomFilePath = new Path(parent, "random-file-" + UUID.randomUUID().toString()); + Path randomFilePath = new Path(parent, "random-file-" + UUID.randomUUID()); fs.createNewFile(randomFilePath); // check existence of the created file diff --git a/core/src/test/java/org/apache/iceberg/hadoop/TestHadoopCatalog.java b/core/src/test/java/org/apache/iceberg/hadoop/TestHadoopCatalog.java index fbd6f833499e..6512b2499071 100644 --- a/core/src/test/java/org/apache/iceberg/hadoop/TestHadoopCatalog.java +++ b/core/src/test/java/org/apache/iceberg/hadoop/TestHadoopCatalog.java @@ -58,7 +58,7 @@ import org.junit.jupiter.params.provider.ValueSource; public class TestHadoopCatalog extends HadoopTableTestBase { - private static ImmutableMap meta = ImmutableMap.of(); + private static final ImmutableMap meta = ImmutableMap.of(); @ParameterizedTest @ValueSource(ints = {1, 2}) @@ -632,7 +632,7 @@ public void testRegisterTable() throws IOException { catalog.createTable(identifier, SCHEMA); Table registeringTable = catalog.loadTable(identifier); TableOperations ops = ((HasTableOperations) registeringTable).operations(); - String metadataLocation = ((HadoopTableOperations) ops).current().metadataFileLocation(); + String metadataLocation = ops.current().metadataFileLocation(); assertThat(catalog.registerTable(identifier2, metadataLocation)).isNotNull(); assertThat(catalog.loadTable(identifier2)).isNotNull(); assertThat(catalog.dropTable(identifier)).isTrue(); @@ -646,7 +646,7 @@ public void testRegisterExistingTable() throws IOException { catalog.createTable(identifier, SCHEMA); Table registeringTable = catalog.loadTable(identifier); TableOperations ops = ((HasTableOperations) registeringTable).operations(); - String metadataLocation = ((HadoopTableOperations) ops).current().metadataFileLocation(); + String metadataLocation = ops.current().metadataFileLocation(); assertThatThrownBy(() -> catalog.registerTable(identifier, metadataLocation)) .isInstanceOf(AlreadyExistsException.class) .hasMessage("Table already exists: a.t1"); diff --git a/core/src/test/java/org/apache/iceberg/io/MockInputStream.java b/core/src/test/java/org/apache/iceberg/io/MockInputStream.java index 17a6d537b267..00ce7fb78ebb 100644 --- a/core/src/test/java/org/apache/iceberg/io/MockInputStream.java +++ b/core/src/test/java/org/apache/iceberg/io/MockInputStream.java @@ -24,7 +24,7 @@ class MockInputStream extends ByteArrayInputStream { static final byte[] TEST_ARRAY = new byte[] {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; - private int[] lengths; + private final int[] lengths; private int current = 0; MockInputStream(int... actualReadLengths) { diff --git a/core/src/test/java/org/apache/iceberg/jdbc/TestJdbcCatalog.java b/core/src/test/java/org/apache/iceberg/jdbc/TestJdbcCatalog.java index c0c9afafd9a7..148a483b3477 100644 --- a/core/src/test/java/org/apache/iceberg/jdbc/TestJdbcCatalog.java +++ b/core/src/test/java/org/apache/iceberg/jdbc/TestJdbcCatalog.java @@ -1130,7 +1130,7 @@ public void report(MetricsReport report) { private String createMetadataLocationViaJdbcCatalog(TableIdentifier identifier) throws SQLException { // temporary connection just to actually create a concrete metadata location - String jdbcUrl = null; + String jdbcUrl; try { java.nio.file.Path dbFile = Files.createTempFile("temp", "metadata"); jdbcUrl = "jdbc:sqlite:" + dbFile.toAbsolutePath(); diff --git a/data/src/test/java/org/apache/iceberg/data/TestMetricsRowGroupFilter.java b/data/src/test/java/org/apache/iceberg/data/TestMetricsRowGroupFilter.java index 9525001b6265..4e542b8455c4 100644 --- a/data/src/test/java/org/apache/iceberg/data/TestMetricsRowGroupFilter.java +++ b/data/src/test/java/org/apache/iceberg/data/TestMetricsRowGroupFilter.java @@ -149,7 +149,7 @@ public TestMetricsRowGroupFilter(String format) { static { StringBuilder sb = new StringBuilder(); for (int i = 0; i < 200; i += 1) { - sb.append(UUID.randomUUID().toString()); + sb.append(UUID.randomUUID()); } TOO_LONG_FOR_STATS_PARQUET = sb.toString(); } diff --git a/data/src/test/java/org/apache/iceberg/io/TestBaseTaskWriter.java b/data/src/test/java/org/apache/iceberg/io/TestBaseTaskWriter.java index 85ddeb78f473..fb93a79d32a3 100644 --- a/data/src/test/java/org/apache/iceberg/io/TestBaseTaskWriter.java +++ b/data/src/test/java/org/apache/iceberg/io/TestBaseTaskWriter.java @@ -217,8 +217,8 @@ private TestTaskWriter createTaskWriter(long targetFileSize) { private static class TestTaskWriter extends BaseTaskWriter { - private RollingFileWriter dataWriter; - private RollingEqDeleteWriter deleteWriter; + private final RollingFileWriter dataWriter; + private final RollingEqDeleteWriter deleteWriter; private TestTaskWriter( PartitionSpec spec, diff --git a/dell/src/test/java/org/apache/iceberg/dell/mock/ecs/EcsS3MockRule.java b/dell/src/test/java/org/apache/iceberg/dell/mock/ecs/EcsS3MockRule.java index f9db7f055751..c51241b76c4b 100644 --- a/dell/src/test/java/org/apache/iceberg/dell/mock/ecs/EcsS3MockRule.java +++ b/dell/src/test/java/org/apache/iceberg/dell/mock/ecs/EcsS3MockRule.java @@ -110,7 +110,7 @@ private void initialize() { private void cleanUp() { if (mock) { // clean up - TEST_RULE_FOR_MOCK_CLIENT.set(null); + TEST_RULE_FOR_MOCK_CLIENT.remove(); } else { if (bucketCreated) { deleteBucket(); diff --git a/dell/src/test/java/org/apache/iceberg/dell/mock/ecs/ObjectData.java b/dell/src/test/java/org/apache/iceberg/dell/mock/ecs/ObjectData.java index 08ffacb84e36..5f50dcc7d341 100644 --- a/dell/src/test/java/org/apache/iceberg/dell/mock/ecs/ObjectData.java +++ b/dell/src/test/java/org/apache/iceberg/dell/mock/ecs/ObjectData.java @@ -72,7 +72,7 @@ public InputStream createInputStream(Range range) { public S3ObjectMetadata createFullMetadata() { S3ObjectMetadata metadata = new S3ObjectMetadata(); - MessageDigest md = null; + MessageDigest md; try { md = MessageDigest.getInstance("MD5"); } catch (NoSuchAlgorithmException e) { diff --git a/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/actions/Actions.java b/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/actions/Actions.java index 06ac54617ae6..b96b47c5a785 100644 --- a/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/actions/Actions.java +++ b/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/actions/Actions.java @@ -30,8 +30,8 @@ public class Actions { // disable classloader check as Avro may cache class/object in the serializers. .set(CoreOptions.CHECK_LEAKED_CLASSLOADER, false); - private StreamExecutionEnvironment env; - private Table table; + private final StreamExecutionEnvironment env; + private final Table table; private Actions(StreamExecutionEnvironment env, Table table) { this.env = env; diff --git a/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/actions/RewriteDataFilesAction.java b/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/actions/RewriteDataFilesAction.java index 9876bb3861c4..670abebcb58a 100644 --- a/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/actions/RewriteDataFilesAction.java +++ b/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/actions/RewriteDataFilesAction.java @@ -31,7 +31,7 @@ public class RewriteDataFilesAction extends BaseRewriteDataFilesAction { - private StreamExecutionEnvironment env; + private final StreamExecutionEnvironment env; private int maxParallelism; public RewriteDataFilesAction(StreamExecutionEnvironment env, Table table) { diff --git a/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/AvroGenericRecordToRowDataMapper.java b/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/AvroGenericRecordToRowDataMapper.java index 04e168385a36..f7e8e0c884cf 100644 --- a/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/AvroGenericRecordToRowDataMapper.java +++ b/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/AvroGenericRecordToRowDataMapper.java @@ -55,7 +55,7 @@ public RowData map(GenericRecord genericRecord) throws Exception { public static AvroGenericRecordToRowDataMapper forAvroSchema(Schema avroSchema) { DataType dataType = AvroSchemaConverter.convertToDataType(avroSchema.toString()); LogicalType logicalType = TypeConversions.fromDataToLogicalType(dataType); - RowType rowType = RowType.of(logicalType.getChildren().stream().toArray(LogicalType[]::new)); + RowType rowType = RowType.of(logicalType.getChildren().toArray(new LogicalType[0])); return new AvroGenericRecordToRowDataMapper(rowType); } } diff --git a/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/shuffle/MapDataStatistics.java b/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/shuffle/MapDataStatistics.java index 22dd2388cc3e..05b943f6046f 100644 --- a/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/shuffle/MapDataStatistics.java +++ b/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/sink/shuffle/MapDataStatistics.java @@ -43,7 +43,7 @@ public StatisticsType type() { @Override public boolean isEmpty() { - return keyFrequency.size() == 0; + return keyFrequency.isEmpty(); } @Override diff --git a/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/source/RowDataToAvroGenericRecordConverter.java b/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/source/RowDataToAvroGenericRecordConverter.java index 8f95e3e554a0..8ef1f1fbb833 100644 --- a/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/source/RowDataToAvroGenericRecordConverter.java +++ b/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/source/RowDataToAvroGenericRecordConverter.java @@ -64,7 +64,7 @@ public static RowDataToAvroGenericRecordConverter fromIcebergSchema( public static RowDataToAvroGenericRecordConverter fromAvroSchema(Schema avroSchema) { DataType dataType = AvroSchemaConverter.convertToDataType(avroSchema.toString()); LogicalType logicalType = TypeConversions.fromDataToLogicalType(dataType); - RowType rowType = RowType.of(logicalType.getChildren().stream().toArray(LogicalType[]::new)); + RowType rowType = RowType.of(logicalType.getChildren().toArray(new LogicalType[0])); return new RowDataToAvroGenericRecordConverter(rowType, avroSchema); } } diff --git a/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/ContinuousSplitPlannerImpl.java b/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/ContinuousSplitPlannerImpl.java index e9e3c159b07b..fef4ec45ed8a 100644 --- a/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/ContinuousSplitPlannerImpl.java +++ b/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/ContinuousSplitPlannerImpl.java @@ -121,7 +121,7 @@ private ContinuousEnumerationResult discoverIncrementalSplits( LOG.info("Current table snapshot is already enumerated: {}", currentSnapshot.snapshotId()); return new ContinuousEnumerationResult(Collections.emptyList(), lastPosition, lastPosition); } else { - Long lastConsumedSnapshotId = lastPosition != null ? lastPosition.snapshotId() : null; + Long lastConsumedSnapshotId = lastPosition.snapshotId(); Snapshot toSnapshotInclusive = toSnapshotInclusive( lastConsumedSnapshotId, currentSnapshot, scanContext.maxPlanningSnapshotCount()); diff --git a/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorState.java b/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorState.java index 024d0b101165..26fbad46c128 100644 --- a/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorState.java +++ b/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorState.java @@ -29,7 +29,7 @@ public class IcebergEnumeratorState implements Serializable { @Nullable private final IcebergEnumeratorPosition lastEnumeratedPosition; private final Collection pendingSplits; - private int[] enumerationSplitCountHistory; + private final int[] enumerationSplitCountHistory; public IcebergEnumeratorState(Collection pendingSplits) { this(null, pendingSplits); diff --git a/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java b/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java index 95d6db2cfbc4..f76f8a69ff0e 100644 --- a/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java +++ b/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/source/enumerator/IcebergEnumeratorStateSerializer.java @@ -174,10 +174,8 @@ private static Collection deserializePendingSplits( private static void serializeEnumerationSplitCountHistory( DataOutputSerializer out, int[] enumerationSplitCountHistory) throws IOException { out.writeInt(enumerationSplitCountHistory.length); - if (enumerationSplitCountHistory.length > 0) { - for (int enumerationSplitCount : enumerationSplitCountHistory) { - out.writeInt(enumerationSplitCount); - } + for (int enumerationSplitCount : enumerationSplitCountHistory) { + out.writeInt(enumerationSplitCount); } } diff --git a/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/source/split/SerializerHelper.java b/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/source/split/SerializerHelper.java index a0395f29ac5b..841969666ee5 100644 --- a/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/source/split/SerializerHelper.java +++ b/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/source/split/SerializerHelper.java @@ -129,7 +129,7 @@ public static String readLongUTF(DataInputDeserializer in) throws IOException { if (count > utflen) { throw new UTFDataFormatException("malformed input: partial character at end"); } - char2 = (int) bytearr[count - 1]; + char2 = bytearr[count - 1]; if ((char2 & 0xC0) != 0x80) { throw new UTFDataFormatException("malformed input around byte " + count); } @@ -141,8 +141,8 @@ public static String readLongUTF(DataInputDeserializer in) throws IOException { if (count > utflen) { throw new UTFDataFormatException("malformed input: partial character at end"); } - char2 = (int) bytearr[count - 2]; - char3 = (int) bytearr[count - 1]; + char2 = bytearr[count - 2]; + char3 = bytearr[count - 1]; if (((char2 & 0xC0) != 0x80) || ((char3 & 0xC0) != 0x80)) { throw new UTFDataFormatException("malformed input around byte " + (count - 1)); } diff --git a/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/util/FlinkPackage.java b/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/util/FlinkPackage.java index 353cee56bebb..20b33e615e5f 100644 --- a/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/util/FlinkPackage.java +++ b/flink/v1.19/flink/src/main/java/org/apache/iceberg/flink/util/FlinkPackage.java @@ -32,7 +32,7 @@ private FlinkPackage() {} /** Returns Flink version string like x.y.z */ public static String version() { if (null == VERSION.get()) { - String detectedVersion = null; + String detectedVersion; try { detectedVersion = versionFromJar(); // use unknown version in case exact implementation version can't be found from the jar diff --git a/flink/v1.19/flink/src/test/java/org/apache/iceberg/flink/TestFlinkCatalogTablePartitions.java b/flink/v1.19/flink/src/test/java/org/apache/iceberg/flink/TestFlinkCatalogTablePartitions.java index 97ab78716b8b..e69e1ac4d713 100644 --- a/flink/v1.19/flink/src/test/java/org/apache/iceberg/flink/TestFlinkCatalogTablePartitions.java +++ b/flink/v1.19/flink/src/test/java/org/apache/iceberg/flink/TestFlinkCatalogTablePartitions.java @@ -39,7 +39,7 @@ public class TestFlinkCatalogTablePartitions extends CatalogTestBase { - private String tableName = "test_table"; + private final String tableName = "test_table"; @Parameter(index = 2) private FileFormat format; diff --git a/flink/v1.19/flink/src/test/java/org/apache/iceberg/flink/TestTableLoader.java b/flink/v1.19/flink/src/test/java/org/apache/iceberg/flink/TestTableLoader.java index 4ad302dde436..a7c58e551112 100644 --- a/flink/v1.19/flink/src/test/java/org/apache/iceberg/flink/TestTableLoader.java +++ b/flink/v1.19/flink/src/test/java/org/apache/iceberg/flink/TestTableLoader.java @@ -23,7 +23,7 @@ import org.apache.iceberg.TestTables; public class TestTableLoader implements TableLoader { - private File dir; + private final File dir; public static TableLoader of(String dir) { return new TestTableLoader(dir); diff --git a/flink/v1.19/flink/src/test/java/org/apache/iceberg/flink/maintenance/operator/FlinkSqlExtension.java b/flink/v1.19/flink/src/test/java/org/apache/iceberg/flink/maintenance/operator/FlinkSqlExtension.java index 90790b373d5f..91d36aa3e85d 100644 --- a/flink/v1.19/flink/src/test/java/org/apache/iceberg/flink/maintenance/operator/FlinkSqlExtension.java +++ b/flink/v1.19/flink/src/test/java/org/apache/iceberg/flink/maintenance/operator/FlinkSqlExtension.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import java.util.stream.Stream; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.api.TableResult; @@ -89,7 +90,9 @@ public void afterEach(ExtensionContext context) throws IOException { tables.forEach(t -> exec("DROP TABLE IF EXISTS %s", t.getField(0))); exec("USE CATALOG default_catalog"); exec("DROP CATALOG IF EXISTS %s", catalogName); - Files.walk(warehouse).sorted(Comparator.reverseOrder()).map(Path::toFile).forEach(File::delete); + try (Stream files = Files.walk(warehouse)) { + files.sorted(Comparator.reverseOrder()).map(Path::toFile).forEach(File::delete); + } } /** diff --git a/flink/v1.19/flink/src/test/java/org/apache/iceberg/flink/source/reader/TestColumnStatsWatermarkExtractor.java b/flink/v1.19/flink/src/test/java/org/apache/iceberg/flink/source/reader/TestColumnStatsWatermarkExtractor.java index 7033fd30e84f..187f2b0b9e31 100644 --- a/flink/v1.19/flink/src/test/java/org/apache/iceberg/flink/source/reader/TestColumnStatsWatermarkExtractor.java +++ b/flink/v1.19/flink/src/test/java/org/apache/iceberg/flink/source/reader/TestColumnStatsWatermarkExtractor.java @@ -122,7 +122,7 @@ public void testTimeUnit() throws IOException { new ColumnStatsWatermarkExtractor(SCHEMA, columnName, TimeUnit.MICROSECONDS); assertThat(extractor.extractWatermark(split(0))) - .isEqualTo(MIN_VALUES.get(0).get(columnName).longValue() / 1000L); + .isEqualTo(MIN_VALUES.get(0).get(columnName) / 1000L); } @TestTemplate diff --git a/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveClientPool.java b/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveClientPool.java index b0ecb0ceffaa..c5328d8f97e9 100644 --- a/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveClientPool.java +++ b/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveClientPool.java @@ -100,8 +100,7 @@ protected IMetaStoreClient reconnect(IMetaStoreClient client) { @Override protected boolean isConnectionException(Exception e) { return super.isConnectionException(e) - || (e != null - && e instanceof MetaException + || (e instanceof MetaException && e.getMessage() .contains("Got exception: org.apache.thrift.transport.TTransportException")); } diff --git a/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveSchemaConverter.java b/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveSchemaConverter.java index dfad78d344d9..da9ca21feea9 100644 --- a/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveSchemaConverter.java +++ b/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveSchemaConverter.java @@ -42,7 +42,7 @@ class HiveSchemaConverter { private static final Logger LOG = LoggerFactory.getLogger(HiveSchemaConverter.class); private int id; - private boolean autoConvert; + private final boolean autoConvert; private HiveSchemaConverter(boolean autoConvert) { this.autoConvert = autoConvert; diff --git a/hive-metastore/src/test/java/org/apache/iceberg/hive/ScriptRunner.java b/hive-metastore/src/test/java/org/apache/iceberg/hive/ScriptRunner.java index a0b9e8fa45d7..5b318fc753cb 100644 --- a/hive-metastore/src/test/java/org/apache/iceberg/hive/ScriptRunner.java +++ b/hive-metastore/src/test/java/org/apache/iceberg/hive/ScriptRunner.java @@ -33,10 +33,10 @@ public class ScriptRunner { private static final String DEFAULT_DELIMITER = ";"; - private Connection connection; + private final Connection connection; - private boolean stopOnError; - private boolean autoCommit; + private final boolean stopOnError; + private final boolean autoCommit; private PrintWriter logWriter = new PrintWriter(System.out); private PrintWriter errorLogWriter = new PrintWriter(System.err); @@ -110,7 +110,7 @@ private void runScript(Connection conn, Reader reader) throws IOException, SQLEx StringBuilder command = null; try { LineNumberReader lineReader = new LineNumberReader(reader); - String line = null; + String line; while ((line = lineReader.readLine()) != null) { if (command == null) { command = new StringBuilder(); @@ -118,13 +118,11 @@ private void runScript(Connection conn, Reader reader) throws IOException, SQLEx String trimmedLine = line.trim(); if (trimmedLine.startsWith("--")) { println(trimmedLine); - } else if (trimmedLine.length() < 1 || trimmedLine.startsWith("//")) { - // Do nothing - } else if (trimmedLine.length() < 1 || trimmedLine.startsWith("--")) { + } else if (trimmedLine.isEmpty() || trimmedLine.startsWith("//")) { // Do nothing } else if (!fullLineDelimiter && trimmedLine.endsWith(getDelimiter()) || fullLineDelimiter && trimmedLine.equals(getDelimiter())) { - command.append(line.substring(0, line.lastIndexOf(getDelimiter()))); + command.append(line, 0, line.lastIndexOf(getDelimiter())); command.append(" "); Statement statement = conn.createStatement(); diff --git a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java index 95bf6c697c32..ccb8fc3a88f7 100644 --- a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java +++ b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCatalog.java @@ -93,7 +93,7 @@ * Run all the tests from abstract of {@link CatalogTests} with few specific tests related to HIVE. */ public class TestHiveCatalog extends CatalogTests { - private static ImmutableMap meta = + private static final ImmutableMap meta = ImmutableMap.of( "owner", "apache", "group", "iceberg", diff --git a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCommitLocks.java b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCommitLocks.java index 44707f216e18..61af2c7e79f3 100644 --- a/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCommitLocks.java +++ b/hive-metastore/src/test/java/org/apache/iceberg/hive/TestHiveCommitLocks.java @@ -86,7 +86,7 @@ public class TestHiveCommitLocks { private static HiveClientPool spyClientPool = null; private static CachedClientPool spyCachedClientPool = null; private static Configuration overriddenHiveConf; - private static AtomicReference spyClientRef = new AtomicReference<>(); + private static final AtomicReference spyClientRef = new AtomicReference<>(); private static IMetaStoreClient spyClient = null; HiveTableOperations ops = null; TableMetadata metadataV1 = null; @@ -387,7 +387,7 @@ public void testUnLockAfterInterruptedGetTable() throws TException { /** Wraps an ArgumentCaptor to provide data based on the request */ private class ShowLocksResponseElementWrapper extends ShowLocksResponseElement { - private ArgumentCaptor wrapped; + private final ArgumentCaptor wrapped; private ShowLocksResponseElementWrapper(ArgumentCaptor wrapped) { this.wrapped = wrapped; diff --git a/mr/src/main/java/org/apache/iceberg/mr/hive/Deserializer.java b/mr/src/main/java/org/apache/iceberg/mr/hive/Deserializer.java index 62ce4930cc8a..585242cd1119 100644 --- a/mr/src/main/java/org/apache/iceberg/mr/hive/Deserializer.java +++ b/mr/src/main/java/org/apache/iceberg/mr/hive/Deserializer.java @@ -41,7 +41,7 @@ import org.apache.iceberg.types.Types.StructType; class Deserializer { - private FieldDeserializer fieldDeserializer; + private final FieldDeserializer fieldDeserializer; /** * Builder to create a Deserializer instance. Requires an Iceberg Schema and the Hive @@ -279,8 +279,8 @@ String sourceName(String originalName) { * the provided writerInspector. */ private static class ObjectInspectorPair { - private ObjectInspector writerInspector; - private ObjectInspector sourceInspector; + private final ObjectInspector writerInspector; + private final ObjectInspector sourceInspector; ObjectInspectorPair(ObjectInspector writerInspector, ObjectInspector sourceInspector) { this.writerInspector = writerInspector; diff --git a/mr/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergSerDe.java b/mr/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergSerDe.java index f206bba47605..59bb38e5b2d6 100644 --- a/mr/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergSerDe.java +++ b/mr/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergSerDe.java @@ -54,8 +54,9 @@ public class HiveIcebergSerDe extends AbstractSerDe { private ObjectInspector inspector; private Schema tableSchema; - private Map deserializers = Maps.newHashMapWithExpectedSize(1); - private Container row = new Container<>(); + private final Map deserializers = + Maps.newHashMapWithExpectedSize(1); + private final Container row = new Container<>(); @Override public void initialize(@Nullable Configuration configuration, Properties serDeProperties) diff --git a/mr/src/test/java/org/apache/iceberg/mr/TestHelper.java b/mr/src/test/java/org/apache/iceberg/mr/TestHelper.java index 72475f70d718..f9f2334cfcf4 100644 --- a/mr/src/test/java/org/apache/iceberg/mr/TestHelper.java +++ b/mr/src/test/java/org/apache/iceberg/mr/TestHelper.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.nio.file.Path; -import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; @@ -40,6 +39,7 @@ import org.apache.iceberg.data.Record; import org.apache.iceberg.relocated.com.google.common.base.Preconditions; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; +import org.apache.iceberg.relocated.com.google.common.collect.Lists; import org.junit.rules.TemporaryFolder; public class TestHelper { @@ -152,7 +152,7 @@ private GenericAppenderHelper appender() { public static class RecordsBuilder { - private final List records = new ArrayList(); + private final List records = Lists.newArrayList(); private final Schema schema; private RecordsBuilder(Schema schema) { diff --git a/mr/src/test/java/org/apache/iceberg/mr/hive/HiveIcebergTestUtils.java b/mr/src/test/java/org/apache/iceberg/mr/hive/HiveIcebergTestUtils.java index 43a61d83e434..4e1779411add 100644 --- a/mr/src/test/java/org/apache/iceberg/mr/hive/HiveIcebergTestUtils.java +++ b/mr/src/test/java/org/apache/iceberg/mr/hive/HiveIcebergTestUtils.java @@ -39,6 +39,7 @@ import java.util.List; import java.util.UUID; import java.util.stream.Collectors; +import java.util.stream.Stream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.serde2.io.DateWritable; @@ -279,11 +280,14 @@ public static void validateData(List expected, List actual, int */ public static void validateFiles(Table table, Configuration conf, JobID jobId, int dataFileNum) throws IOException { - List dataFiles = - Files.walk(Paths.get(table.location() + "/data")) - .filter(Files::isRegularFile) - .filter(path -> !path.getFileName().toString().startsWith(".")) - .collect(Collectors.toList()); + List dataFiles; + try (Stream files = Files.walk(Paths.get(table.location() + "/data"))) { + dataFiles = + files + .filter(Files::isRegularFile) + .filter(path -> !path.getFileName().toString().startsWith(".")) + .collect(Collectors.toList()); + } assertThat(dataFiles).hasSize(dataFileNum); assertThat( diff --git a/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerLocalScan.java b/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerLocalScan.java index fd15a9a62621..9018d4518cbf 100644 --- a/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerLocalScan.java +++ b/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerLocalScan.java @@ -781,7 +781,7 @@ private void runCreateAndReadTest( expected.addAll(data.get(partition)); } - List descRows = shell.executeStatement("SELECT * FROM " + identifier.toString()); + List descRows = shell.executeStatement("SELECT * FROM " + identifier); List records = HiveIcebergTestUtils.valueForRow(icebergTable.schema(), descRows); HiveIcebergTestUtils.validateData(expected, records, 0); diff --git a/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerWithEngine.java b/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerWithEngine.java index 18fc4f305070..ce3a6fd92441 100644 --- a/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerWithEngine.java +++ b/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerWithEngine.java @@ -1341,14 +1341,15 @@ private StringBuilder buildComplexTypeInnerQuery(Object field, Type type) { } else if (type instanceof Types.StructType) { query.append("named_struct("); ((GenericRecord) field) - .struct().fields().stream() - .forEach( - f -> - query - .append(buildComplexTypeInnerQuery(f.name(), Types.StringType.get())) - .append( - buildComplexTypeInnerQuery( - ((GenericRecord) field).getField(f.name()), f.type()))); + .struct() + .fields() + .forEach( + f -> + query + .append(buildComplexTypeInnerQuery(f.name(), Types.StringType.get())) + .append( + buildComplexTypeInnerQuery( + ((GenericRecord) field).getField(f.name()), f.type()))); query.setLength(query.length() - 1); query.append("),"); } else if (type instanceof Types.StringType) { diff --git a/mr/src/test/java/org/apache/iceberg/mr/hive/TestTables.java b/mr/src/test/java/org/apache/iceberg/mr/hive/TestTables.java index 8c8cf894c9f1..f2710290d5c2 100644 --- a/mr/src/test/java/org/apache/iceberg/mr/hive/TestTables.java +++ b/mr/src/test/java/org/apache/iceberg/mr/hive/TestTables.java @@ -552,10 +552,9 @@ private static String tablePath(TableIdentifier identifier) { private String getStringValueForInsert(Object value, Type type) { String template = "\'%s\'"; if (type.equals(Types.TimestampType.withoutZone())) { - return String.format(template, Timestamp.valueOf((LocalDateTime) value).toString()); + return String.format(template, Timestamp.valueOf((LocalDateTime) value)); } else if (type.equals(Types.TimestampType.withZone())) { - return String.format( - template, Timestamp.from(((OffsetDateTime) value).toInstant()).toString()); + return String.format(template, Timestamp.from(((OffsetDateTime) value).toInstant())); } else if (type.equals(Types.BooleanType.get())) { // in hive2 boolean type values must not be surrounded in apostrophes. Otherwise the value is // translated to true. diff --git a/nessie/src/main/java/org/apache/iceberg/nessie/NessieIcebergClient.java b/nessie/src/main/java/org/apache/iceberg/nessie/NessieIcebergClient.java index 3fc38e436fb9..8bf6bc115d13 100644 --- a/nessie/src/main/java/org/apache/iceberg/nessie/NessieIcebergClient.java +++ b/nessie/src/main/java/org/apache/iceberg/nessie/NessieIcebergClient.java @@ -192,7 +192,7 @@ private Predicate namespacePredicate(Namespace ns) { private TableIdentifier toIdentifier(EntriesResponse.Entry entry) { List elements = entry.getName().getElements(); - return TableIdentifier.of(elements.toArray(new String[elements.size()])); + return TableIdentifier.of(elements.toArray(new String[0])); } public IcebergTable table(TableIdentifier tableIdentifier) { diff --git a/nessie/src/test/java/org/apache/iceberg/nessie/BaseTestIceberg.java b/nessie/src/test/java/org/apache/iceberg/nessie/BaseTestIceberg.java index d56d07f7d27a..40e33759791f 100644 --- a/nessie/src/test/java/org/apache/iceberg/nessie/BaseTestIceberg.java +++ b/nessie/src/test/java/org/apache/iceberg/nessie/BaseTestIceberg.java @@ -172,7 +172,7 @@ protected Table createTable(TableIdentifier tableIdentifier, int count) { createMissingNamespaces(tableIdentifier); return catalog.createTable(tableIdentifier, schema(count)); } catch (Throwable t) { - LOG.error("unable to do create " + tableIdentifier.toString(), t); + LOG.error("unable to do create {}", tableIdentifier, t); throw t; } } diff --git a/nessie/src/test/java/org/apache/iceberg/nessie/TestNessieIcebergClient.java b/nessie/src/test/java/org/apache/iceberg/nessie/TestNessieIcebergClient.java index 05ee755d7c68..b1f844c6d991 100644 --- a/nessie/src/test/java/org/apache/iceberg/nessie/TestNessieIcebergClient.java +++ b/nessie/src/test/java/org/apache/iceberg/nessie/TestNessieIcebergClient.java @@ -168,7 +168,7 @@ public void testCreateNamespaceConflict() .hasMessageContaining("Namespace already exists: a"); client.commitTable( - null, newTableMetadata(), "file:///tmp/iceberg", (String) null, ContentKey.of("a", "tbl")); + null, newTableMetadata(), "file:///tmp/iceberg", null, ContentKey.of("a", "tbl")); assertThatThrownBy(() -> client.createNamespace(Namespace.of("a", "tbl"), Map.of())) .isInstanceOf(AlreadyExistsException.class) @@ -284,7 +284,7 @@ public void testDropNamespaceConflict() throws NessieConflictException, NessieNo client.createNamespace(Namespace.of("a"), Map.of()); client.commitTable( - null, newTableMetadata(), "file:///tmp/iceberg", (String) null, ContentKey.of("a", "tbl")); + null, newTableMetadata(), "file:///tmp/iceberg", null, ContentKey.of("a", "tbl")); assertThatThrownBy(() -> client.dropNamespace(Namespace.of("a", "tbl"))) .isInstanceOf(NoSuchNamespaceException.class) diff --git a/orc/src/main/java/org/apache/iceberg/data/orc/GenericOrcReaders.java b/orc/src/main/java/org/apache/iceberg/data/orc/GenericOrcReaders.java index 18ce07ac8b74..2dde66cdc488 100644 --- a/orc/src/main/java/org/apache/iceberg/data/orc/GenericOrcReaders.java +++ b/orc/src/main/java/org/apache/iceberg/data/orc/GenericOrcReaders.java @@ -209,7 +209,7 @@ protected StructReader( Types.StructType structType, Map idToConstant) { super(readers, structType, idToConstant); - this.template = structType != null ? GenericRecord.create(structType) : null; + this.template = GenericRecord.create(structType); } @Override diff --git a/orc/src/main/java/org/apache/iceberg/orc/ExpressionToSearchArgument.java b/orc/src/main/java/org/apache/iceberg/orc/ExpressionToSearchArgument.java index 609c8aedb563..2be64044e222 100644 --- a/orc/src/main/java/org/apache/iceberg/orc/ExpressionToSearchArgument.java +++ b/orc/src/main/java/org/apache/iceberg/orc/ExpressionToSearchArgument.java @@ -60,8 +60,8 @@ static SearchArgument convert(Expression expr, TypeDescription readSchema) { ImmutableSet.of( TypeID.BINARY, TypeID.FIXED, TypeID.UUID, TypeID.STRUCT, TypeID.MAP, TypeID.LIST); - private SearchArgument.Builder builder; - private Map idToColumnName; + private final SearchArgument.Builder builder; + private final Map idToColumnName; private ExpressionToSearchArgument( SearchArgument.Builder builder, Map idToColumnName) { diff --git a/orc/src/main/java/org/apache/iceberg/orc/ORC.java b/orc/src/main/java/org/apache/iceberg/orc/ORC.java index 7d1405bbd45b..18186fe3f20e 100644 --- a/orc/src/main/java/org/apache/iceberg/orc/ORC.java +++ b/orc/src/main/java/org/apache/iceberg/orc/ORC.java @@ -115,7 +115,7 @@ public static class WriteBuilder { private final Configuration conf; private Schema schema = null; private BiFunction> createWriterFunc; - private Map metadata = Maps.newHashMap(); + private final Map metadata = Maps.newHashMap(); private MetricsConfig metricsConfig; private Function, Context> createContextFunc = Context::dataContext; private final Map config = Maps.newLinkedHashMap(); diff --git a/orc/src/main/java/org/apache/iceberg/orc/ORCSchemaUtil.java b/orc/src/main/java/org/apache/iceberg/orc/ORCSchemaUtil.java index fae1a76c3706..ba6d6eedd1d4 100644 --- a/orc/src/main/java/org/apache/iceberg/orc/ORCSchemaUtil.java +++ b/orc/src/main/java/org/apache/iceberg/orc/ORCSchemaUtil.java @@ -232,7 +232,7 @@ public static Schema convert(TypeDescription orcSchema) { .map(Optional::get) .collect(Collectors.toList()); - if (fields.size() == 0) { + if (fields.isEmpty()) { throw new IllegalArgumentException("ORC schema does not contain Iceberg IDs"); } @@ -326,9 +326,7 @@ private static TypeDescription buildOrcProjection( } else { if (isRequired) { throw new IllegalArgumentException( - String.format( - "Field %d of type %s is required and was not found.", - fieldId, type.toString())); + String.format("Field %d of type %s is required and was not found.", fieldId, type)); } orcType = convert(fieldId, type, false); diff --git a/orc/src/test/java/org/apache/iceberg/orc/TestBloomFilter.java b/orc/src/test/java/org/apache/iceberg/orc/TestBloomFilter.java index cfc29623dad6..2941fb89db62 100644 --- a/orc/src/test/java/org/apache/iceberg/orc/TestBloomFilter.java +++ b/orc/src/test/java/org/apache/iceberg/orc/TestBloomFilter.java @@ -107,7 +107,7 @@ public void testWriteOption() throws Exception { try (Reader reader = OrcFile.createReader( - new Path(outFile.location()), new OrcFile.ReaderOptions(new Configuration())); ) { + new Path(outFile.location()), new OrcFile.ReaderOptions(new Configuration()))) { boolean[] readCols = new boolean[] {false, true, true, false}; RecordReaderImpl rows = (RecordReaderImpl) reader.rows(); OrcIndex indices = rows.readRowIndex(0, null, readCols); diff --git a/parquet/src/main/java/org/apache/iceberg/data/parquet/BaseParquetReaders.java b/parquet/src/main/java/org/apache/iceberg/data/parquet/BaseParquetReaders.java index 79585b097d27..38fd69393023 100644 --- a/parquet/src/main/java/org/apache/iceberg/data/parquet/BaseParquetReaders.java +++ b/parquet/src/main/java/org/apache/iceberg/data/parquet/BaseParquetReaders.java @@ -362,21 +362,19 @@ public ParquetValueReader primitive( case FIXED_LEN_BYTE_ARRAY: return new FixedReader(desc); case BINARY: - if (expected != null - && expected.typeId() == org.apache.iceberg.types.Type.TypeID.STRING) { + if (expected.typeId() == org.apache.iceberg.types.Type.TypeID.STRING) { return new ParquetValueReaders.StringReader(desc); } else { return new ParquetValueReaders.BytesReader(desc); } case INT32: - if (expected != null && expected.typeId() == org.apache.iceberg.types.Type.TypeID.LONG) { + if (expected.typeId() == org.apache.iceberg.types.Type.TypeID.LONG) { return new ParquetValueReaders.IntAsLongReader(desc); } else { return new ParquetValueReaders.UnboxedReader<>(desc); } case FLOAT: - if (expected != null - && expected.typeId() == org.apache.iceberg.types.Type.TypeID.DOUBLE) { + if (expected.typeId() == org.apache.iceberg.types.Type.TypeID.DOUBLE) { return new ParquetValueReaders.FloatAsDoubleReader(desc); } else { return new ParquetValueReaders.UnboxedReader<>(desc); diff --git a/parquet/src/main/java/org/apache/iceberg/parquet/Parquet.java b/parquet/src/main/java/org/apache/iceberg/parquet/Parquet.java index c97512a17d87..3421c0b86d3a 100644 --- a/parquet/src/main/java/org/apache/iceberg/parquet/Parquet.java +++ b/parquet/src/main/java/org/apache/iceberg/parquet/Parquet.java @@ -346,7 +346,7 @@ public FileAppender build() throws IOException { for (Map.Entry entry : columnBloomFilterEnabled.entrySet()) { String colPath = entry.getKey(); String bloomEnabled = entry.getValue(); - propsBuilder.withBloomFilterEnabled(colPath, Boolean.valueOf(bloomEnabled)); + propsBuilder.withBloomFilterEnabled(colPath, Boolean.parseBoolean(bloomEnabled)); } for (Map.Entry entry : columnBloomFilterFpp.entrySet()) { @@ -389,7 +389,7 @@ public FileAppender build() throws IOException { for (Map.Entry entry : columnBloomFilterEnabled.entrySet()) { String colPath = entry.getKey(); String bloomEnabled = entry.getValue(); - parquetWriteBuilder.withBloomFilterEnabled(colPath, Boolean.valueOf(bloomEnabled)); + parquetWriteBuilder.withBloomFilterEnabled(colPath, Boolean.parseBoolean(bloomEnabled)); } for (Map.Entry entry : columnBloomFilterFpp.entrySet()) { diff --git a/parquet/src/main/java/org/apache/iceberg/parquet/ParquetAvro.java b/parquet/src/main/java/org/apache/iceberg/parquet/ParquetAvro.java index 680eb1435967..65c472178d8a 100644 --- a/parquet/src/main/java/org/apache/iceberg/parquet/ParquetAvro.java +++ b/parquet/src/main/java/org/apache/iceberg/parquet/ParquetAvro.java @@ -301,7 +301,7 @@ public Schema map(Schema map, Schema value) { @Override public Schema primitive(Schema primitive) { LogicalType logicalType = primitive.getLogicalType(); - if (logicalType != null && logicalType instanceof LogicalTypes.Decimal) { + if (logicalType instanceof LogicalTypes.Decimal) { LogicalTypes.Decimal decimal = (LogicalTypes.Decimal) logicalType; if (decimal.getPrecision() <= 9) { return new ParquetDecimal(decimal.getPrecision(), decimal.getScale()) diff --git a/parquet/src/main/java/org/apache/iceberg/parquet/ParquetBloomRowGroupFilter.java b/parquet/src/main/java/org/apache/iceberg/parquet/ParquetBloomRowGroupFilter.java index 11201bd88e5a..553b8a0fa3f8 100644 --- a/parquet/src/main/java/org/apache/iceberg/parquet/ParquetBloomRowGroupFilter.java +++ b/parquet/src/main/java/org/apache/iceberg/parquet/ParquetBloomRowGroupFilter.java @@ -277,7 +277,7 @@ private BloomFilter loadBloomFilter(int id) { private boolean shouldRead( PrimitiveType primitiveType, T value, BloomFilter bloom, Type type) { - long hashValue = 0; + long hashValue; switch (primitiveType.getPrimitiveTypeName()) { case INT32: switch (type.typeId()) { diff --git a/parquet/src/main/java/org/apache/iceberg/parquet/ParquetTypeVisitor.java b/parquet/src/main/java/org/apache/iceberg/parquet/ParquetTypeVisitor.java index 1f6e8554c463..17518a175f61 100644 --- a/parquet/src/main/java/org/apache/iceberg/parquet/ParquetTypeVisitor.java +++ b/parquet/src/main/java/org/apache/iceberg/parquet/ParquetTypeVisitor.java @@ -90,7 +90,7 @@ private static T visitThreeLevelList( } private static T visitListElement(Type listElement, ParquetTypeVisitor visitor) { - T elementResult = null; + T elementResult; visitor.beforeElementField(listElement); try { diff --git a/parquet/src/main/java/org/apache/iceberg/parquet/ParquetValueReaders.java b/parquet/src/main/java/org/apache/iceberg/parquet/ParquetValueReaders.java index a335fe11cf49..62a49da25e56 100644 --- a/parquet/src/main/java/org/apache/iceberg/parquet/ParquetValueReaders.java +++ b/parquet/src/main/java/org/apache/iceberg/parquet/ParquetValueReaders.java @@ -343,7 +343,7 @@ public BigDecimal read(BigDecimal ignored) { } public static class BinaryAsDecimalReader extends PrimitiveReader { - private int scale; + private final int scale; public BinaryAsDecimalReader(ColumnDescriptor desc, int scale) { super(desc); diff --git a/parquet/src/main/java/org/apache/iceberg/parquet/ParquetValueWriters.java b/parquet/src/main/java/org/apache/iceberg/parquet/ParquetValueWriters.java index 4eddf91a182f..90766983d8c8 100644 --- a/parquet/src/main/java/org/apache/iceberg/parquet/ParquetValueWriters.java +++ b/parquet/src/main/java/org/apache/iceberg/parquet/ParquetValueWriters.java @@ -371,7 +371,7 @@ public Stream> metrics() { List> fieldMetricsFromWriter = writer.metrics().collect(Collectors.toList()); - if (fieldMetricsFromWriter.size() == 0) { + if (fieldMetricsFromWriter.isEmpty()) { // we are not tracking field metrics for this type ourselves return Stream.empty(); } else if (fieldMetricsFromWriter.size() == 1) { diff --git a/parquet/src/main/java/org/apache/iceberg/parquet/ParquetWriteAdapter.java b/parquet/src/main/java/org/apache/iceberg/parquet/ParquetWriteAdapter.java index 049fed0decde..a4a325527710 100644 --- a/parquet/src/main/java/org/apache/iceberg/parquet/ParquetWriteAdapter.java +++ b/parquet/src/main/java/org/apache/iceberg/parquet/ParquetWriteAdapter.java @@ -38,7 +38,7 @@ @Deprecated public class ParquetWriteAdapter implements FileAppender { private ParquetWriter writer; - private MetricsConfig metricsConfig; + private final MetricsConfig metricsConfig; private ParquetMetadata footer; public ParquetWriteAdapter(ParquetWriter writer, MetricsConfig metricsConfig) { diff --git a/parquet/src/main/java/org/apache/iceberg/parquet/VectorizedParquetReader.java b/parquet/src/main/java/org/apache/iceberg/parquet/VectorizedParquetReader.java index 773e0f7a85d0..35d94f328d60 100644 --- a/parquet/src/main/java/org/apache/iceberg/parquet/VectorizedParquetReader.java +++ b/parquet/src/main/java/org/apache/iceberg/parquet/VectorizedParquetReader.java @@ -45,7 +45,7 @@ public class VectorizedParquetReader extends CloseableGroup implements Closea private final ParquetReadOptions options; private final Function> batchReaderFunc; private final Expression filter; - private boolean reuseContainers; + private final boolean reuseContainers; private final boolean caseSensitive; private final int batchSize; private final NameMapping nameMapping; diff --git a/parquet/src/test/java/org/apache/iceberg/parquet/ParquetWritingTestUtils.java b/parquet/src/test/java/org/apache/iceberg/parquet/ParquetWritingTestUtils.java index 09673e603338..bced36aa4570 100644 --- a/parquet/src/test/java/org/apache/iceberg/parquet/ParquetWritingTestUtils.java +++ b/parquet/src/test/java/org/apache/iceberg/parquet/ParquetWritingTestUtils.java @@ -71,7 +71,7 @@ static long write( GenericData.Record... records) throws IOException { - long len = 0; + long len; FileAppender writer = Parquet.write(localOutput(file)) diff --git a/parquet/src/test/java/org/apache/iceberg/parquet/TestBloomRowGroupFilter.java b/parquet/src/test/java/org/apache/iceberg/parquet/TestBloomRowGroupFilter.java index 5f64c7230601..2da9b2b9deba 100644 --- a/parquet/src/test/java/org/apache/iceberg/parquet/TestBloomRowGroupFilter.java +++ b/parquet/src/test/java/org/apache/iceberg/parquet/TestBloomRowGroupFilter.java @@ -149,7 +149,7 @@ public class TestBloomRowGroupFilter { static { StringBuilder sb = new StringBuilder(); for (int i = 0; i < 200; i += 1) { - sb.append(UUID.randomUUID().toString()); + sb.append(UUID.randomUUID()); } TOO_LONG_FOR_STATS = sb.toString(); } diff --git a/parquet/src/test/java/org/apache/iceberg/parquet/TestDictionaryRowGroupFilter.java b/parquet/src/test/java/org/apache/iceberg/parquet/TestDictionaryRowGroupFilter.java index cf2c1188bf44..d690d3cf5175 100644 --- a/parquet/src/test/java/org/apache/iceberg/parquet/TestDictionaryRowGroupFilter.java +++ b/parquet/src/test/java/org/apache/iceberg/parquet/TestDictionaryRowGroupFilter.java @@ -140,7 +140,7 @@ public class TestDictionaryRowGroupFilter { static { StringBuilder sb = new StringBuilder(); for (int i = 0; i < 200; i += 1) { - sb.append(UUID.randomUUID().toString()); + sb.append(UUID.randomUUID()); } TOO_LONG_FOR_STATS = sb.toString(); } diff --git a/pig/src/main/java/org/apache/iceberg/pig/IcebergStorage.java b/pig/src/main/java/org/apache/iceberg/pig/IcebergStorage.java index 51221666a02b..5f3d3ac665fd 100644 --- a/pig/src/main/java/org/apache/iceberg/pig/IcebergStorage.java +++ b/pig/src/main/java/org/apache/iceberg/pig/IcebergStorage.java @@ -70,8 +70,8 @@ public class IcebergStorage extends LoadFunc public static final String PIG_ICEBERG_TABLES_IMPL = "pig.iceberg.tables.impl"; private static Tables iceberg; - private static Map tables = Maps.newConcurrentMap(); - private static Map locations = Maps.newConcurrentMap(); + private static final Map tables = Maps.newConcurrentMap(); + private static final Map locations = Maps.newConcurrentMap(); private String signature; diff --git a/snowflake/src/main/java/org/apache/iceberg/snowflake/JdbcSnowflakeClient.java b/snowflake/src/main/java/org/apache/iceberg/snowflake/JdbcSnowflakeClient.java index d283fa906447..e947db362a3a 100644 --- a/snowflake/src/main/java/org/apache/iceberg/snowflake/JdbcSnowflakeClient.java +++ b/snowflake/src/main/java/org/apache/iceberg/snowflake/JdbcSnowflakeClient.java @@ -191,13 +191,11 @@ public boolean schemaExists(SnowflakeIdentifier schema) { final String finalQuery = "SHOW TABLES IN SCHEMA IDENTIFIER(?) LIMIT 1"; - List tables; try { - tables = - connectionPool.run( - conn -> - queryHarness.query( - conn, finalQuery, TABLE_RESULT_SET_HANDLER, schema.toIdentifierString())); + connectionPool.run( + conn -> + queryHarness.query( + conn, finalQuery, TABLE_RESULT_SET_HANDLER, schema.toIdentifierString())); } catch (SQLException e) { if (SCHEMA_NOT_FOUND_ERROR_CODES.contains(e.getErrorCode())) { return false; diff --git a/snowflake/src/main/java/org/apache/iceberg/snowflake/SnowflakeCatalog.java b/snowflake/src/main/java/org/apache/iceberg/snowflake/SnowflakeCatalog.java index 06dacad185c6..7f64b4ca49fd 100644 --- a/snowflake/src/main/java/org/apache/iceberg/snowflake/SnowflakeCatalog.java +++ b/snowflake/src/main/java/org/apache/iceberg/snowflake/SnowflakeCatalog.java @@ -176,7 +176,7 @@ public void createNamespace(Namespace namespace, Map metadata) { @Override public List listNamespaces(Namespace namespace) { SnowflakeIdentifier scope = NamespaceHelpers.toSnowflakeIdentifier(namespace); - List results = null; + List results; switch (scope.type()) { case ROOT: results = snowflakeClient.listDatabases(); diff --git a/snowflake/src/main/java/org/apache/iceberg/snowflake/SnowflakeTableMetadata.java b/snowflake/src/main/java/org/apache/iceberg/snowflake/SnowflakeTableMetadata.java index c550b3e13a3a..512c818eeaeb 100644 --- a/snowflake/src/main/java/org/apache/iceberg/snowflake/SnowflakeTableMetadata.java +++ b/snowflake/src/main/java/org/apache/iceberg/snowflake/SnowflakeTableMetadata.java @@ -93,7 +93,7 @@ public String toString() { } public String toDebugString() { - return String.format("%s, rawJsonVal: %s", toString(), rawJsonVal); + return String.format("%s, rawJsonVal: %s", this, rawJsonVal); } /** diff --git a/snowflake/src/test/java/org/apache/iceberg/snowflake/SnowflakeCatalogTest.java b/snowflake/src/test/java/org/apache/iceberg/snowflake/SnowflakeCatalogTest.java index b3d5cef431e5..ecad072de724 100644 --- a/snowflake/src/test/java/org/apache/iceberg/snowflake/SnowflakeCatalogTest.java +++ b/snowflake/src/test/java/org/apache/iceberg/snowflake/SnowflakeCatalogTest.java @@ -89,7 +89,7 @@ public void before() { "s3://tab1/metadata/v3.metadata.json", TableMetadataParser.toJson( TableMetadata.newTableMetadata( - schema, partitionSpec, "s3://tab1", ImmutableMap.of())) + schema, partitionSpec, "s3://tab1", ImmutableMap.of())) .getBytes()); fakeFileIO.addFile( "wasbs://mycontainer@myaccount.blob.core.windows.net/tab3/metadata/v334.metadata.json", @@ -98,13 +98,13 @@ public void before() { schema, partitionSpec, "wasbs://mycontainer@myaccount.blob.core.windows.net/tab1/", - ImmutableMap.of())) + ImmutableMap.of())) .getBytes()); fakeFileIO.addFile( "gs://tab5/metadata/v793.metadata.json", TableMetadataParser.toJson( TableMetadata.newTableMetadata( - schema, partitionSpec, "gs://tab5/", ImmutableMap.of())) + schema, partitionSpec, "gs://tab5/", ImmutableMap.of())) .getBytes()); fakeFileIOFactory = diff --git a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/SparkRowLevelOperationsTestBase.java b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/SparkRowLevelOperationsTestBase.java index f00b94219078..7af9dfc58737 100644 --- a/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/SparkRowLevelOperationsTestBase.java +++ b/spark/v3.5/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/SparkRowLevelOperationsTestBase.java @@ -255,7 +255,7 @@ protected void createOrReplaceView(String name, List data, Encoder enc private Dataset toDS(String schema, String jsonData) { List jsonRows = Arrays.stream(jsonData.split("\n")) - .filter(str -> str.trim().length() > 0) + .filter(str -> !str.trim().isEmpty()) .collect(Collectors.toList()); Dataset jsonDS = spark.createDataset(jsonRows, Encoders.STRING()); diff --git a/spark/v3.5/spark/src/jmh/java/org/apache/iceberg/spark/action/RandomGeneratingUDF.java b/spark/v3.5/spark/src/jmh/java/org/apache/iceberg/spark/action/RandomGeneratingUDF.java index 63d24f7da553..d8f9301a7d82 100644 --- a/spark/v3.5/spark/src/jmh/java/org/apache/iceberg/spark/action/RandomGeneratingUDF.java +++ b/spark/v3.5/spark/src/jmh/java/org/apache/iceberg/spark/action/RandomGeneratingUDF.java @@ -29,7 +29,7 @@ class RandomGeneratingUDF implements Serializable { private final long uniqueValues; - private Random rand = new Random(); + private final Random rand = new Random(); RandomGeneratingUDF(long uniqueValues) { this.uniqueValues = uniqueValues; @@ -43,8 +43,7 @@ UserDefinedFunction randomLongUDF() { UserDefinedFunction randomString() { return udf( - () -> (String) RandomUtil.generatePrimitive(Types.StringType.get(), rand), - DataTypes.StringType) + () -> RandomUtil.generatePrimitive(Types.StringType.get(), rand), DataTypes.StringType) .asNondeterministic() .asNonNullable(); } diff --git a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/PruneColumnsWithReordering.java b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/PruneColumnsWithReordering.java index bbbb46e803e2..f76f12355f1f 100644 --- a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/PruneColumnsWithReordering.java +++ b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/PruneColumnsWithReordering.java @@ -203,7 +203,7 @@ public Type map(Types.MapType map, Supplier keyResult, Supplier valu "Cannot project a map of optional values as required values: %s", map); Preconditions.checkArgument( - StringType.class.isInstance(requestedMap.keyType()), + requestedMap.keyType() instanceof StringType, "Invalid map key type (not string): %s", requestedMap.keyType()); diff --git a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/Spark3Util.java b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/Spark3Util.java index 39518ffbb945..af0fa84f67a1 100644 --- a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/Spark3Util.java +++ b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/Spark3Util.java @@ -888,7 +888,7 @@ public static List getPartitions( JavaConverters.collectionAsScalaIterableConverter(ImmutableList.of(rootPath)) .asScala() .toSeq(), - scala.collection.immutable.Map$.MODULE$.empty(), + scala.collection.immutable.Map$.MODULE$.empty(), userSpecifiedSchema, fileStatusCache, Option.empty(), diff --git a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/SparkTableUtil.java b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/SparkTableUtil.java index 8447dbdcead1..7a96e97fb98a 100644 --- a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/SparkTableUtil.java +++ b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/SparkTableUtil.java @@ -183,7 +183,7 @@ public static List getPartitions( Option> scalaPartitionFilter; if (partitionFilter != null && !partitionFilter.isEmpty()) { Builder, scala.collection.immutable.Map> builder = - Map$.MODULE$.newBuilder(); + Map$.MODULE$.newBuilder(); partitionFilter.forEach((key, value) -> builder.$plus$eq(Tuple2.apply(key, value))); scalaPartitionFilter = Option.apply(builder.result()); } else { diff --git a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/DeleteOrphanFilesSparkAction.java b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/DeleteOrphanFilesSparkAction.java index b00ed42008f1..5fbb4117feb8 100644 --- a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/DeleteOrphanFilesSparkAction.java +++ b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/DeleteOrphanFilesSparkAction.java @@ -116,7 +116,7 @@ public class DeleteOrphanFilesSparkAction extends BaseSparkAction equalSchemes = flattenMap(EQUAL_SCHEMES_DEFAULT); private Map equalAuthorities = Collections.emptyMap(); private PrefixMismatchMode prefixMismatchMode = PrefixMismatchMode.ERROR; - private String location = null; + private String location; private long olderThanTimestamp = System.currentTimeMillis() - TimeUnit.DAYS.toMillis(3); private Dataset compareToFileList; private Consumer deleteFunc = null; diff --git a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteDataFilesSparkAction.java b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteDataFilesSparkAction.java index e301ad512c70..d33e5e540893 100644 --- a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteDataFilesSparkAction.java +++ b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteDataFilesSparkAction.java @@ -393,7 +393,7 @@ private Result doExecuteWithPartialProgress( Stream toGroupStream( RewriteExecutionContext ctx, Map>> groupsByPartition) { return groupsByPartition.entrySet().stream() - .filter(e -> e.getValue().size() != 0) + .filter(e -> !e.getValue().isEmpty()) .flatMap( e -> { StructLike partition = e.getKey(); diff --git a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteManifestsSparkAction.java b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteManifestsSparkAction.java index e9edfeb985c7..60e2b11881cb 100644 --- a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteManifestsSparkAction.java +++ b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/RewriteManifestsSparkAction.java @@ -103,9 +103,9 @@ public class RewriteManifestsSparkAction private final long targetManifestSizeBytes; private final boolean shouldStageManifests; - private PartitionSpec spec = null; + private PartitionSpec spec; private Predicate predicate = manifest -> true; - private String outputLocation = null; + private String outputLocation; RewriteManifestsSparkAction(SparkSession spark, Table table) { super(spark); diff --git a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/RewritePositionDeleteFilesSparkAction.java b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/RewritePositionDeleteFilesSparkAction.java index 1166740f441a..282222ae716f 100644 --- a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/RewritePositionDeleteFilesSparkAction.java +++ b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/actions/RewritePositionDeleteFilesSparkAction.java @@ -309,7 +309,7 @@ private Result doExecuteWithPartialProgress( // stop commit service commitService.close(); List commitResults = commitService.results(); - if (commitResults.size() == 0) { + if (commitResults.isEmpty()) { LOG.error( "{} is true but no rewrite commits succeeded. Check the logs to determine why the individual " + "commits failed. If this is persistent it may help to increase {} which will break the rewrite operation " @@ -331,7 +331,7 @@ private Stream toGroupStream( RewriteExecutionContext ctx, Map>> groupsByPartition) { return groupsByPartition.entrySet().stream() - .filter(e -> e.getValue().size() != 0) + .filter(e -> !e.getValue().isEmpty()) .flatMap( e -> { StructLike partition = e.getKey(); diff --git a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/data/SparkParquetWriters.java b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/data/SparkParquetWriters.java index 209c06bacb3e..678ebd218d71 100644 --- a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/data/SparkParquetWriters.java +++ b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/data/SparkParquetWriters.java @@ -560,7 +560,7 @@ private static class InternalRowWriter extends ParquetValueWriters.StructWriter< private InternalRowWriter(List> writers, List types) { super(writers); - this.types = types.toArray(new DataType[types.size()]); + this.types = types.toArray(new DataType[0]); } @Override diff --git a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkFileWriterFactory.java b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkFileWriterFactory.java index 9b075b675565..50a1259c8626 100644 --- a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkFileWriterFactory.java +++ b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkFileWriterFactory.java @@ -48,7 +48,7 @@ class SparkFileWriterFactory extends BaseFileWriterFactory { private StructType dataSparkType; private StructType equalityDeleteSparkType; private StructType positionDeleteSparkType; - private Map writeProperties; + private final Map writeProperties; SparkFileWriterFactory( Table table, diff --git a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkMicroBatchStream.java b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkMicroBatchStream.java index 320d2e14adc9..49180e07c465 100644 --- a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkMicroBatchStream.java +++ b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkMicroBatchStream.java @@ -395,7 +395,6 @@ public Offset latestOffset(Offset startOffset, ReadLimit limit) { Snapshot nextValid = nextValidSnapshot(curSnapshot); if (nextValid == null) { // nextValide implies all the remaining snapshots should be skipped. - shouldContinueReading = false; break; } // we found the next available snapshot, continue from there. diff --git a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkScanBuilder.java b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkScanBuilder.java index b430e6fca233..6b97e48133fd 100644 --- a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkScanBuilder.java +++ b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkScanBuilder.java @@ -95,7 +95,7 @@ public class SparkScanBuilder private final List metaColumns = Lists.newArrayList(); private final InMemoryMetricsReporter metricsReporter; - private Schema schema = null; + private Schema schema; private boolean caseSensitive; private List filterExpressions = null; private Predicate[] pushedPredicates = NO_PREDICATES; diff --git a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkStagedScanBuilder.java b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkStagedScanBuilder.java index 25393888f95c..c5c86c3ebf28 100644 --- a/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkStagedScanBuilder.java +++ b/spark/v3.5/spark/src/main/java/org/apache/iceberg/spark/source/SparkStagedScanBuilder.java @@ -44,7 +44,7 @@ class SparkStagedScanBuilder implements ScanBuilder, SupportsPushDownRequiredCol private final SparkReadConf readConf; private final List metaColumns = Lists.newArrayList(); - private Schema schema = null; + private Schema schema; SparkStagedScanBuilder(SparkSession spark, Table table, CaseInsensitiveStringMap options) { this.spark = spark; diff --git a/spark/v3.5/spark/src/main/java/org/apache/spark/sql/catalyst/analysis/NoSuchProcedureException.java b/spark/v3.5/spark/src/main/java/org/apache/spark/sql/catalyst/analysis/NoSuchProcedureException.java index 075733aeab71..9ed7167c94c6 100644 --- a/spark/v3.5/spark/src/main/java/org/apache/spark/sql/catalyst/analysis/NoSuchProcedureException.java +++ b/spark/v3.5/spark/src/main/java/org/apache/spark/sql/catalyst/analysis/NoSuchProcedureException.java @@ -32,7 +32,7 @@ public NoSuchProcedureException(Identifier ident) { Option.empty(), Option.empty(), Option.empty(), - Map$.MODULE$.empty(), + Map$.MODULE$.empty(), new QueryContext[0]); } } diff --git a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/TestBase.java b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/TestBase.java index e7d5a0f0398a..40bc4bbce54e 100644 --- a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/TestBase.java +++ b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/TestBase.java @@ -118,7 +118,7 @@ protected long waitUntilAfter(long timestampMillis) { protected List sql(String query, Object... args) { List rows = spark.sql(String.format(query, args)).collectAsList(); - if (rows.size() < 1) { + if (rows.isEmpty()) { return ImmutableList.of(); } diff --git a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/TestSparkCatalogOperations.java b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/TestSparkCatalogOperations.java index d0860ff01451..8f4c09356d13 100644 --- a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/TestSparkCatalogOperations.java +++ b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/TestSparkCatalogOperations.java @@ -38,7 +38,7 @@ import org.junit.jupiter.api.TestTemplate; public class TestSparkCatalogOperations extends CatalogTestBase { - private static boolean useNullableQuerySchema = ThreadLocalRandom.current().nextBoolean(); + private static final boolean useNullableQuerySchema = ThreadLocalRandom.current().nextBoolean(); @Parameters(name = "catalogName = {0}, implementation = {1}, config = {2}") protected static Object[][] parameters() { diff --git a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java index db77da449e5d..a0f1fba0434c 100644 --- a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java +++ b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/actions/TestCreateActions.java @@ -141,7 +141,7 @@ public static Object[][] parameters() { }; } - private String baseTableName = "baseTable"; + private final String baseTableName = "baseTable"; private File tableDir; private String tableLocation; diff --git a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/data/RandomData.java b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/data/RandomData.java index 478afcf09ae3..360b9ff20ec0 100644 --- a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/data/RandomData.java +++ b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/data/RandomData.java @@ -67,7 +67,7 @@ public static List generateList(Schema schema, int numRecords, long seed public static Iterable generateSpark(Schema schema, int numRecords, long seed) { return () -> new Iterator() { - private SparkRandomDataGenerator generator = new SparkRandomDataGenerator(seed); + private final SparkRandomDataGenerator generator = new SparkRandomDataGenerator(seed); private int count = 0; @Override @@ -114,7 +114,7 @@ private static Iterable newIterable( return () -> new Iterator() { private int count = 0; - private RandomDataGenerator generator = newGenerator.get(); + private final RandomDataGenerator generator = newGenerator.get(); @Override public boolean hasNext() { diff --git a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/LogMessage.java b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/LogMessage.java index 53a35eec61ce..2627ec0d2b55 100644 --- a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/LogMessage.java +++ b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/LogMessage.java @@ -22,7 +22,7 @@ import java.util.concurrent.atomic.AtomicInteger; public class LogMessage { - private static AtomicInteger idCounter = new AtomicInteger(0); + private static final AtomicInteger idCounter = new AtomicInteger(0); static LogMessage debug(String date, String message) { return new LogMessage(idCounter.getAndIncrement(), date, "DEBUG", message); diff --git a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestBaseReader.java b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestBaseReader.java index 27e7d7c496ef..7258344823f7 100644 --- a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestBaseReader.java +++ b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestBaseReader.java @@ -87,7 +87,7 @@ public Integer next() { // Main reader class to test base class iteration logic. // Keeps track of iterator closure. private static class ClosureTrackingReader extends BaseReader { - private Map tracker = Maps.newHashMap(); + private final Map tracker = Maps.newHashMap(); ClosureTrackingReader(Table table, List tasks) { super(table, new BaseCombinedScanTask(tasks), null, null, false); diff --git a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestCompressionSettings.java b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestCompressionSettings.java index 34315bc8a991..44885eb84551 100644 --- a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestCompressionSettings.java +++ b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestCompressionSettings.java @@ -245,7 +245,7 @@ private String getCompressionType(InputFile inputFile) throws Exception { return footer.getBlocks().get(0).getColumns().get(0).getCodec().name(); default: FileContext fc = FileContext.getFileContext(CONF); - GenericDatumReader reader = new GenericDatumReader(); + GenericDatumReader reader = new GenericDatumReader<>(); DataFileReader fileReader = (DataFileReader) DataFileReader.openReader( diff --git a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java index e702f1f01eed..336ee5a8d2ea 100644 --- a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java +++ b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestDataFrameWrites.java @@ -91,7 +91,7 @@ public static Collection parameters() { private Map tableProperties; - private org.apache.spark.sql.types.StructType sparkSchema = + private final org.apache.spark.sql.types.StructType sparkSchema = new org.apache.spark.sql.types.StructType( new org.apache.spark.sql.types.StructField[] { new org.apache.spark.sql.types.StructField( @@ -106,16 +106,16 @@ public static Collection parameters() { org.apache.spark.sql.types.Metadata.empty()) }); - private Schema icebergSchema = + private final Schema icebergSchema = new Schema( Types.NestedField.optional(1, "optionalField", Types.StringType.get()), Types.NestedField.required(2, "requiredField", Types.StringType.get())); - private List data0 = + private final List data0 = Arrays.asList( "{\"optionalField\": \"a1\", \"requiredField\": \"bid_001\"}", "{\"optionalField\": \"a2\", \"requiredField\": \"bid_002\"}"); - private List data1 = + private final List data1 = Arrays.asList( "{\"optionalField\": \"d1\", \"requiredField\": \"bid_101\"}", "{\"optionalField\": \"d2\", \"requiredField\": \"bid_102\"}", @@ -220,16 +220,15 @@ private void writeDataWithFailOnPartition( final int numPartitions = 10; final int partitionToFail = new Random().nextInt(numPartitions); MapPartitionsFunction failOnFirstPartitionFunc = - (MapPartitionsFunction) - input -> { - int partitionId = TaskContext.getPartitionId(); + input -> { + int partitionId = TaskContext.getPartitionId(); - if (partitionId == partitionToFail) { - throw new SparkException( - String.format("Intended exception in partition %d !", partitionId)); - } - return input; - }; + if (partitionId == partitionToFail) { + throw new SparkException( + String.format("Intended exception in partition %d !", partitionId)); + } + return input; + }; Dataset df = createDataset(records, schema) @@ -287,8 +286,8 @@ public void testNullableWithWriteOption() throws IOException { .startsWith("2"); File location = temp.resolve("parquet").resolve("test").toFile(); - String sourcePath = String.format("%s/nullable_poc/sourceFolder/", location.toString()); - String targetPath = String.format("%s/nullable_poc/targetFolder/", location.toString()); + String sourcePath = String.format("%s/nullable_poc/sourceFolder/", location); + String targetPath = String.format("%s/nullable_poc/targetFolder/", location); tableProperties = ImmutableMap.of(TableProperties.WRITE_DATA_LOCATION, targetPath); @@ -341,8 +340,8 @@ public void testNullableWithSparkSqlOption() throws IOException { .startsWith("2"); File location = temp.resolve("parquet").resolve("test").toFile(); - String sourcePath = String.format("%s/nullable_poc/sourceFolder/", location.toString()); - String targetPath = String.format("%s/nullable_poc/targetFolder/", location.toString()); + String sourcePath = String.format("%s/nullable_poc/sourceFolder/", location); + String targetPath = String.format("%s/nullable_poc/targetFolder/", location); tableProperties = ImmutableMap.of(TableProperties.WRITE_DATA_LOCATION, targetPath); diff --git a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestIdentityPartitionData.java b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestIdentityPartitionData.java index 0febfd687a00..35a675029c1c 100644 --- a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestIdentityPartitionData.java +++ b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestIdentityPartitionData.java @@ -134,7 +134,7 @@ public static Object[][] parameters() { @TempDir private Path temp; - private PartitionSpec spec = + private final PartitionSpec spec = PartitionSpec.builderFor(LOG_SCHEMA).identity("date").identity("level").build(); private Table table = null; private Dataset logs = null; diff --git a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestPartitionPruning.java b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestPartitionPruning.java index 2bd6dadff016..9464f687b0eb 100644 --- a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestPartitionPruning.java +++ b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestPartitionPruning.java @@ -170,7 +170,7 @@ private static Instant getInstant(String timestampWithoutZone) { @TempDir private java.nio.file.Path temp; - private PartitionSpec spec = + private final PartitionSpec spec = PartitionSpec.builderFor(LOG_SCHEMA) .identity("date") .identity("level") diff --git a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestStructuredStreaming.java b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestStructuredStreaming.java index 3d9d6eb8ebaf..17db46b85c35 100644 --- a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestStructuredStreaming.java +++ b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/source/TestStructuredStreaming.java @@ -115,7 +115,7 @@ public void testStreamingWriteAppendMode() throws Exception { query.stop(); // remove the last commit to force Spark to reprocess batch #1 - File lastCommitFile = new File(checkpoint.toString() + "/commits/1"); + File lastCommitFile = new File(checkpoint + "/commits/1"); assertThat(lastCommitFile.delete()).as("The commit file must be deleted").isTrue(); // restart the query from the checkpoint @@ -176,7 +176,7 @@ public void testStreamingWriteCompleteMode() throws Exception { query.stop(); // remove the last commit to force Spark to reprocess batch #1 - File lastCommitFile = new File(checkpoint.toString() + "/commits/1"); + File lastCommitFile = new File(checkpoint + "/commits/1"); assertThat(lastCommitFile.delete()).as("The commit file must be deleted").isTrue(); // restart the query from the checkpoint @@ -237,7 +237,7 @@ public void testStreamingWriteCompleteModeWithProjection() throws Exception { query.stop(); // remove the last commit to force Spark to reprocess batch #1 - File lastCommitFile = new File(checkpoint.toString() + "/commits/1"); + File lastCommitFile = new File(checkpoint + "/commits/1"); assertThat(lastCommitFile.delete()).as("The commit file must be deleted").isTrue(); // restart the query from the checkpoint diff --git a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/sql/TestCreateTable.java b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/sql/TestCreateTable.java index a2a315f5625f..ae0aa2cda49b 100644 --- a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/sql/TestCreateTable.java +++ b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/sql/TestCreateTable.java @@ -295,7 +295,7 @@ public void testCreateTableLocation() throws Exception { File tableLocation = Files.createTempDirectory(temp, "junit").toFile(); assertThat(tableLocation.delete()).isTrue(); - String location = "file:" + tableLocation.toString(); + String location = "file:" + tableLocation; sql( "CREATE TABLE %s " diff --git a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/sql/TestNamespaceSQL.java b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/sql/TestNamespaceSQL.java index ae9aee8ca416..0ba480692523 100644 --- a/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/sql/TestNamespaceSQL.java +++ b/spark/v3.5/spark/src/test/java/org/apache/iceberg/spark/sql/TestNamespaceSQL.java @@ -64,7 +64,7 @@ protected static Object[][] parameters() { SparkCatalogConfig.HADOOP.catalogName(), SparkCatalogConfig.HADOOP.implementation(), SparkCatalogConfig.HADOOP.properties(), - SparkCatalogConfig.HADOOP.catalogName() + "." + NS.toString(), + SparkCatalogConfig.HADOOP.catalogName() + "." + NS, true }, {