From d1b20dfeb85706f99d74cba1dc717253a63055cb Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Sun, 2 Mar 2025 20:03:13 +0100 Subject: [PATCH 1/4] HDDS-12456. AvoidFileStream --- dev-support/pmd/pmd-ruleset.xml | 2 + .../hadoop/hdds/scm/net/NodeSchemaLoader.java | 4 +- .../certificate/utils/CertificateCodec.java | 6 +-- .../org/apache/hadoop/hdds/utils/IOUtils.java | 36 ++++++++++++++++++ .../hadoop/ozone/common/StorageInfo.java | 31 ++-------------- .../hdds/conf/TestOzoneConfiguration.java | 9 +++-- .../common/helpers/ContainerUtils.java | 5 ++- .../common/helpers/DatanodeIdYaml.java | 8 ++-- .../common/helpers/DatanodeVersionFile.java | 23 ++---------- .../common/impl/ContainerDataYaml.java | 15 ++++---- .../server/ratis/ContainerStateMachine.java | 11 +++--- .../container/common/utils/DiskCheckUtil.java | 17 +++++---- .../keyvalue/KeyValueContainerCheck.java | 3 +- .../replication/ContainerImporter.java | 6 +-- .../replication/GrpcReplicationClient.java | 3 +- .../common/helpers/TestContainerUtils.java | 7 ++-- .../keyvalue/TestKeyValueContainer.java | 37 +++++++++---------- .../keyvalue/TestTarContainerPacker.java | 23 +++++------- .../replication/TestContainerImporter.java | 4 +- .../hadoop/hdds/fs/SaveSpaceUsageToFile.java | 3 +- .../hdds/server/http/ProfileServlet.java | 3 +- .../hadoop/hdds/utils/HddsServerUtil.java | 4 +- .../hdds/utils/TestRDBSnapshotProvider.java | 3 +- .../hadoop/hdds/utils/db/TestRDBStore.java | 5 +-- .../hadoop/ozone/audit/AuditLogTestUtils.java | 8 ++-- .../TestRocksDBCheckpointDiffer.java | 4 +- .../hdds/scm/ha/InterSCMGrpcClient.java | 3 +- .../org/apache/ozone/test/JacocoServer.java | 5 ++- .../cli/container/upgrade/UpgradeUtils.java | 4 +- .../ozone/shell/keys/GetKeyHandler.java | 7 ++-- .../hadoop/ozone/shell/token/TokenOption.java | 9 +++-- .../fs/ozone/TestOzoneFSInputStream.java | 6 +-- .../ozone/client/rpc/OzoneRpcClientTests.java | 5 ++- .../ozone/om/TestOMDbCheckpointServlet.java | 7 ++-- .../hadoop/ozone/om/TestOMRatisSnapshots.java | 7 ++-- .../s3/awssdk/v1/AbstractS3SDKV1Tests.java | 3 +- .../apache/hadoop/ozone/om/OzoneManager.java | 6 +-- .../ozone/om/OzoneManagerPrepareState.java | 8 ++-- .../OmRatisSnapshotProvider.java | 5 ++- .../upgrade/TestOzoneManagerPrepareState.java | 11 +++--- .../security/TestOzoneTokenIdentifier.java | 27 +++++++------- .../apache/hadoop/ozone/recon/ReconUtils.java | 17 +++++---- .../recon/api/TestTriggerDBSyncEndpoint.java | 3 +- .../GetFailedDeletedBlocksTxnSubcommand.java | 6 ++- ...ResetDeletedBlockRetryCountSubcommand.java | 5 ++- .../audit/parser/common/DatabaseHelper.java | 9 +++-- .../datanode/container/ExportSubcommand.java | 5 ++- .../ozone/freon/RandomKeyGenerator.java | 9 +++-- .../ozone/freon/StreamingGenerator.java | 6 +-- 49 files changed, 225 insertions(+), 228 deletions(-) diff --git a/dev-support/pmd/pmd-ruleset.xml b/dev-support/pmd/pmd-ruleset.xml index e4677d6b3402..761f85f0ec36 100644 --- a/dev-support/pmd/pmd-ruleset.xml +++ b/dev-support/pmd/pmd-ruleset.xml @@ -32,5 +32,7 @@ + + .*/generated-sources/.* diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java index ed7e1a1ad853..7493909ee176 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java @@ -20,10 +20,10 @@ import static org.apache.commons.collections.EnumerationUtils.toList; import java.io.File; -import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; +import java.nio.file.Files; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -109,7 +109,7 @@ public NodeSchemaLoadResult loadSchemaFromFile(String schemaFilePath) if (schemaFile.exists()) { LOG.info("Load network topology schema file {}", schemaFile.getAbsolutePath()); - try (FileInputStream inputStream = new FileInputStream(schemaFile)) { + try (InputStream inputStream = Files.newInputStream(schemaFile.toPath())) { return loadSchemaFromStream(schemaFilePath, inputStream); } } else { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateCodec.java index 3e1099b35f38..788bb14f1b04 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateCodec.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateCodec.java @@ -24,8 +24,6 @@ import java.io.ByteArrayInputStream; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -244,7 +242,7 @@ public synchronized void writeCertificate(Path basePath, String fileName, File certificateFile = Paths.get(basePath.toString(), fileName).toFile(); - try (FileOutputStream file = new FileOutputStream(certificateFile)) { + try (OutputStream file = Files.newOutputStream(certificateFile.toPath())) { file.write(pemEncodedCertificate.getBytes(DEFAULT_CHARSET)); } LOG.info("Save certificate to {}", certificateFile.getAbsolutePath()); @@ -271,7 +269,7 @@ private CertPath getCertPath(Path path, String fileName) throws IOException, throw new IOException("Unable to find the requested certificate file. " + "Path: " + certFile); } - try (FileInputStream is = new FileInputStream(certFile)) { + try (InputStream is = Files.newInputStream(certFile.toPath())) { return generateCertPathFromInputStream(is); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java index c8b7634ac99b..28c9ecf323f6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java @@ -17,8 +17,21 @@ package org.apache.hadoop.hdds.utils; +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.nio.file.StandardOpenOption.CREATE; +import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; + +import jakarta.annotation.Nonnull; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.StringWriter; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.Arrays; import java.util.Collection; +import java.util.Properties; import org.slf4j.Logger; /** @@ -95,4 +108,27 @@ public static void closeQuietly(AutoCloseable... closeables) { public static void closeQuietly(Collection closeables) { close(null, closeables); } + + /** Sync the file descriptor, if {@code out} is a {@code FileOutputStream}. */ + public static void syncFD(OutputStream out) throws IOException { + if (out instanceof FileOutputStream) { + ((FileOutputStream) out).getFD().sync(); + } + } + + /** Write {@code properties} to the file at {@code path}, truncating any existing content. */ + public static void writePropertiesToFile(Path path, Properties properties) throws IOException { + StringWriter out = new StringWriter(); + properties.store(out, null); + Files.write(path, out.toString().getBytes(UTF_8), CREATE, TRUNCATE_EXISTING); + } + + /** Read {@link Properties} from the file at {@code path}. */ + public static @Nonnull Properties readPropertiesFromFile(Path path) throws IOException { + Properties props = new Properties(); + try (InputStream in = Files.newInputStream(path)) { + props.load(in); + } + return props; + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java index 96fc6981d452..b2390acdc75d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java @@ -21,14 +21,12 @@ import com.google.common.base.Preconditions; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; -import java.io.RandomAccessFile; import java.util.Properties; import java.util.UUID; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -192,34 +190,11 @@ private void verifyCreationTime() { public void writeTo(File to) throws IOException { - try (RandomAccessFile file = new RandomAccessFile(to, "rws"); - FileOutputStream out = new FileOutputStream(file.getFD())) { - file.seek(0); - /* - * If server is interrupted before this line, - * the version file will remain unchanged. - */ - properties.store(out, null); - /* - * Now the new fields are flushed to the head of the file, but file - * length can still be larger then required and therefore the file can - * contain whole or corrupted fields from its old contents in the end. - * If server is interrupted here and restarted later these extra fields - * either should not effect server behavior or should be handled - * by the server correctly. - */ - file.setLength(out.getChannel().position()); - } + IOUtils.writePropertiesToFile(to.toPath(), properties); } private Properties readFrom(File from) throws IOException { - try (RandomAccessFile file = new RandomAccessFile(from, "rws"); - FileInputStream in = new FileInputStream(file.getFD())) { - Properties props = new Properties(); - file.seek(0); - props.load(in); - return props; - } + return IOUtils.readPropertiesFromFile(from.toPath()); } /** diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java index 16b663a59ebe..63370c864832 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java @@ -31,11 +31,12 @@ import java.io.BufferedWriter; import java.io.File; -import java.io.FileOutputStream; import java.io.IOException; +import java.io.OutputStream; import java.io.OutputStreamWriter; import java.net.URL; import java.nio.charset.StandardCharsets; +import java.nio.file.Files; import java.time.Duration; import java.util.concurrent.TimeUnit; import java.util.stream.Stream; @@ -83,7 +84,7 @@ public void testGetAllPropertiesByTags(@TempDir File tempDir) throws Exception { File coreDefault = new File(tempDir, "core-default-test.xml"); File coreSite = new File(tempDir, "core-site-test.xml"); - FileOutputStream coreDefaultStream = new FileOutputStream(coreDefault); + OutputStream coreDefaultStream = Files.newOutputStream(coreDefault.toPath()); try (BufferedWriter out = new BufferedWriter(new OutputStreamWriter( coreDefaultStream, StandardCharsets.UTF_8))) { startConfig(out); @@ -102,7 +103,7 @@ public void testGetAllPropertiesByTags(@TempDir File tempDir) .getProperty("dfs.random.key")); } - FileOutputStream coreSiteStream = new FileOutputStream(coreSite); + OutputStream coreSiteStream = Files.newOutputStream(coreSite.toPath()); try (BufferedWriter out = new BufferedWriter(new OutputStreamWriter( coreSiteStream, StandardCharsets.UTF_8))) { startConfig(out); @@ -286,7 +287,7 @@ public void testInstantiationWithInputConfiguration(@TempDir File tempDir) Configuration configuration = new Configuration(true); File ozoneSite = new File(tempDir, "ozone-site.xml"); - FileOutputStream ozoneSiteStream = new FileOutputStream(ozoneSite); + OutputStream ozoneSiteStream = Files.newOutputStream(ozoneSite.toPath()); try (BufferedWriter out = new BufferedWriter(new OutputStreamWriter( ozoneSiteStream, StandardCharsets.UTF_8))) { startConfig(out); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java index bbc012d3cb98..fd0f2196f5b6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java @@ -28,8 +28,9 @@ import com.google.common.base.Preconditions; import java.io.File; -import java.io.FileInputStream; import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; import java.nio.file.Paths; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; @@ -177,7 +178,7 @@ public static synchronized DatanodeDetails readDatanodeDetailsFrom(File path) LOG.warn("Error loading DatanodeDetails yaml from {}", path.getAbsolutePath(), e); // Try to load as protobuf before giving up - try (FileInputStream in = new FileInputStream(path)) { + try (InputStream in = Files.newInputStream(path.toPath())) { return DatanodeDetails.getFromProtoBuf( HddsProtos.DatanodeDetailsProto.parseFrom(in)); } catch (IOException io) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java index e73e7b562c5e..e7ea418b24d2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java @@ -18,13 +18,13 @@ package org.apache.hadoop.ozone.container.common.helpers; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; +import java.io.InputStream; import java.io.OutputStreamWriter; import java.io.Writer; import java.lang.reflect.Field; import java.nio.charset.StandardCharsets; +import java.nio.file.Files; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -72,7 +72,7 @@ public static void createDatanodeIdFile(DatanodeDetails datanodeDetails, Yaml yaml = new Yaml(options); try (Writer writer = new OutputStreamWriter( - new FileOutputStream(path), StandardCharsets.UTF_8)) { + Files.newOutputStream(path.toPath()), StandardCharsets.UTF_8)) { yaml.dump(getDatanodeDetailsYaml(datanodeDetails, conf), writer); } } @@ -83,7 +83,7 @@ public static void createDatanodeIdFile(DatanodeDetails datanodeDetails, public static DatanodeDetails readDatanodeIdFile(File path) throws IOException { DatanodeDetails datanodeDetails; - try (FileInputStream inputFileStream = new FileInputStream(path)) { + try (InputStream inputFileStream = Files.newInputStream(path.toPath())) { DatanodeDetailsYaml datanodeDetailsYaml; try { datanodeDetailsYaml = diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java index 2656808c2c76..8c5d0adff2a2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java @@ -18,11 +18,9 @@ package org.apache.hadoop.ozone.container.common.helpers; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; -import java.io.RandomAccessFile; import java.util.Properties; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.OzoneConsts; /** @@ -64,19 +62,9 @@ private Properties createProperties() { */ public void createVersionFile(File path) throws IOException { - try (RandomAccessFile file = new RandomAccessFile(path, "rws"); - FileOutputStream out = new FileOutputStream(file.getFD())) { - file.getChannel().truncate(0); - Properties properties = createProperties(); - /* - * If server is interrupted before this line, - * the version file will remain unchanged. - */ - properties.store(out, null); - } + IOUtils.writePropertiesToFile(path.toPath(), createProperties()); } - /** * Creates a property object from the specified file content. * @param versionFile @@ -84,11 +72,6 @@ public void createVersionFile(File path) throws * @throws IOException */ public static Properties readFrom(File versionFile) throws IOException { - try (RandomAccessFile file = new RandomAccessFile(versionFile, "rws"); - FileInputStream in = new FileInputStream(file.getFD())) { - Properties props = new Properties(); - props.load(in); - return props; - } + return IOUtils.readPropertiesFromFile(versionFile.toPath()); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java index ac57be2e2638..64e162a958d6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java @@ -23,22 +23,22 @@ import com.google.common.base.Preconditions; import java.io.ByteArrayInputStream; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.Writer; import java.nio.charset.StandardCharsets; +import java.nio.file.Files; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeSet; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.slf4j.Logger; @@ -81,7 +81,7 @@ private ContainerDataYaml() { public static void createContainerFile(ContainerType containerType, ContainerData containerData, File containerFile) throws IOException { Writer writer = null; - FileOutputStream out = null; + OutputStream out = null; try { boolean withReplicaIndex = containerData instanceof KeyValueContainerData && @@ -93,8 +93,7 @@ public static void createContainerFile(ContainerType containerType, containerData.computeAndSetChecksum(yaml); // Write the ContainerData with checksum to Yaml file. - out = new FileOutputStream( - containerFile); + out = Files.newOutputStream(containerFile.toPath()); writer = new OutputStreamWriter(out, StandardCharsets.UTF_8); yaml.dump(containerData, writer); } finally { @@ -102,7 +101,7 @@ public static void createContainerFile(ContainerType containerType, if (writer != null) { writer.flush(); // make sure the container metadata is synced to disk. - out.getFD().sync(); + IOUtils.syncFD(out); writer.close(); } } catch (IOException ex) { @@ -121,7 +120,7 @@ public static void createContainerFile(ContainerType containerType, public static ContainerData readContainerFile(File containerFile) throws IOException { Preconditions.checkNotNull(containerFile, "containerFile cannot be null"); - try (FileInputStream inputFileStream = new FileInputStream(containerFile)) { + try (InputStream inputFileStream = Files.newInputStream(containerFile.toPath())) { return readContainer(inputFileStream); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index 19a6675efd36..119cf283b22a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -21,10 +21,10 @@ import com.google.common.base.Preconditions; import com.google.common.util.concurrent.ThreadFactoryBuilder; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; +import java.io.InputStream; import java.io.OutputStream; +import java.nio.file.Files; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; @@ -59,6 +59,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.utils.Cache; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.ResourceCache; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -327,7 +328,7 @@ private long loadSnapshot(SingleFileSnapshotInfo snapshot) public void buildMissingContainerSet(File snapshotFile) throws IOException { // initialize the dispatcher with snapshot so that it build the missing // container list - try (FileInputStream fin = new FileInputStream(snapshotFile)) { + try (InputStream fin = Files.newInputStream(snapshotFile.toPath())) { ContainerProtos.Container2BCSIDMapProto proto = ContainerProtos.Container2BCSIDMapProto .parseFrom(fin); @@ -374,11 +375,11 @@ public long takeSnapshot() throws IOException { final File snapshotFile = storage.getSnapshotFile(ti.getTerm(), ti.getIndex()); LOG.info("{}: Taking a snapshot at:{} file {}", getGroupId(), ti, snapshotFile); - try (FileOutputStream fos = new FileOutputStream(snapshotFile)) { + try (OutputStream fos = Files.newOutputStream(snapshotFile.toPath())) { persistContainerSet(fos); fos.flush(); // make sure the snapshot file is synced - fos.getFD().sync(); + IOUtils.syncFD(fos); } catch (IOException ioe) { LOG.error("{}: Failed to write snapshot at:{} file {}", getGroupId(), ti, snapshotFile); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DiskCheckUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DiskCheckUtil.java index ee488cefe269..89bc3f8393df 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DiskCheckUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DiskCheckUtil.java @@ -19,14 +19,17 @@ import com.google.common.annotations.VisibleForTesting; import java.io.File; -import java.io.FileInputStream; import java.io.FileNotFoundException; -import java.io.FileOutputStream; import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; import java.io.SyncFailedException; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; import java.util.Arrays; import java.util.Random; import java.util.UUID; +import org.apache.hadoop.hdds.utils.IOUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -132,10 +135,10 @@ public boolean checkReadWrite(File storageDir, File testFile = new File(testFileDir, "disk-check-" + UUID.randomUUID()); byte[] writtenBytes = new byte[numBytesToWrite]; RANDOM.nextBytes(writtenBytes); - try (FileOutputStream fos = new FileOutputStream(testFile)) { + try (OutputStream fos = Files.newOutputStream(testFile.toPath())) { fos.write(writtenBytes); - fos.getFD().sync(); - } catch (FileNotFoundException notFoundEx) { + IOUtils.syncFD(fos); + } catch (FileNotFoundException | NoSuchFileException notFoundEx) { logError(storageDir, String.format("Could not find file %s for " + "volume check.", testFile.getAbsolutePath()), notFoundEx); return false; @@ -151,7 +154,7 @@ public boolean checkReadWrite(File storageDir, // Read data back from the test file. byte[] readBytes = new byte[numBytesToWrite]; - try (FileInputStream fis = new FileInputStream(testFile)) { + try (InputStream fis = Files.newInputStream(testFile.toPath())) { int numBytesRead = fis.read(readBytes); if (numBytesRead != numBytesToWrite) { logError(storageDir, String.format("%d bytes written to file %s " + @@ -159,7 +162,7 @@ public boolean checkReadWrite(File storageDir, testFile.getAbsolutePath(), numBytesRead)); return false; } - } catch (FileNotFoundException notFoundEx) { + } catch (FileNotFoundException | NoSuchFileException notFoundEx) { logError(storageDir, String.format("Could not find file %s " + "for volume check.", testFile.getAbsolutePath()), notFoundEx); return false; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java index 2c5e38bb447d..21a5bf03f765 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java @@ -26,6 +26,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; +import java.nio.file.NoSuchFileException; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -114,7 +115,7 @@ metadataDir, new FileNotFoundException("Metadata directory " + .getContainerFile(metadataPath, containerID); try { loadContainerData(containerFile); - } catch (FileNotFoundException ex) { + } catch (FileNotFoundException | NoSuchFileException ex) { return ScanResult.unhealthy( ScanResult.FailureType.MISSING_CONTAINER_FILE, containerFile, ex); } catch (IOException ex) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java index 8e0d301d8578..46bbb6662015 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.container.replication; import jakarta.annotation.Nonnull; -import java.io.FileInputStream; import java.io.IOException; +import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; @@ -117,7 +117,7 @@ public void importContainer(long containerID, Path tarFilePath, KeyValueContainerData containerData; TarContainerPacker packer = getPacker(compression); - try (FileInputStream input = new FileInputStream(tarFilePath.toFile())) { + try (InputStream input = Files.newInputStream(tarFilePath)) { byte[] containerDescriptorYaml = packer.unpackContainerDescriptor(input); containerData = getKeyValueContainerData(containerDescriptorYaml); @@ -125,7 +125,7 @@ public void importContainer(long containerID, Path tarFilePath, ContainerUtils.verifyChecksum(containerData, conf); containerData.setVolume(targetVolume); - try (FileInputStream input = new FileInputStream(tarFilePath.toFile())) { + try (InputStream input = Files.newInputStream(tarFilePath)) { Container container = controller.importContainer( containerData, input, packer); containerSet.addContainerByOverwriteMissingContainer(container); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java index b46d9a4c992d..7b9b24071f93 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.container.replication; import com.google.common.base.Preconditions; -import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; import java.io.UncheckedIOException; @@ -166,7 +165,7 @@ public StreamDownloader(long containerId, CompletableFuture response, Preconditions.checkNotNull(outputPath, "Output path cannot be null"); Path parentPath = Preconditions.checkNotNull(outputPath.getParent()); Files.createDirectories(parentPath); - stream = new FileOutputStream(outputPath.toFile()); + stream = Files.newOutputStream(outputPath); } catch (IOException e) { throw new UncheckedIOException( "Output path can't be used: " + outputPath, e); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestContainerUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestContainerUtils.java index c6b1469083ec..02e3bd3547a2 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestContainerUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestContainerUtils.java @@ -27,10 +27,11 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import java.io.File; -import java.io.FileOutputStream; import java.io.IOException; +import java.io.OutputStream; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.nio.file.Files; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -110,7 +111,7 @@ public void testDatanodeIDPersistent(@TempDir File tempDir) throws Exception { // Test upgrade scenario - protobuf file instead of yaml File protoFile = new File(tempDir, "valid-proto.id"); - try (FileOutputStream out = new FileOutputStream(protoFile)) { + try (OutputStream out = Files.newOutputStream(protoFile.toPath())) { HddsProtos.DatanodeDetailsProto proto = id1.getProtoBufMessage(); proto.writeTo(out); } @@ -137,7 +138,7 @@ private void createMalformedIDFile(File malformedFile) DatanodeDetails id = randomDatanodeDetails(); ContainerUtils.writeDatanodeDetailsTo(id, malformedFile, conf); - try (FileOutputStream out = new FileOutputStream(malformedFile)) { + try (OutputStream out = Files.newOutputStream(malformedFile.toPath())) { out.write("malformed".getBytes(StandardCharsets.UTF_8)); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index 083afa4b0560..52478cf4b105 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -41,9 +41,8 @@ import static org.mockito.Mockito.when; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; +import java.io.InputStream; import java.io.OutputStream; import java.nio.file.Files; import java.nio.file.Path; @@ -273,7 +272,7 @@ public void testEmptyContainerImportExport( folder.toPath().resolve("export.tar")).toFile(); TarContainerPacker packer = new TarContainerPacker(NO_COMPRESSION); //export the container - try (FileOutputStream fos = new FileOutputStream(exportTar)) { + try (OutputStream fos = Files.newOutputStream(exportTar.toPath())) { keyValueContainer.exportContainerData(fos, packer); } @@ -282,7 +281,7 @@ public void testEmptyContainerImportExport( keyValueContainer.delete(); // import container. - try (FileInputStream fis = new FileInputStream(exportTar)) { + try (InputStream fis = Files.newInputStream(exportTar.toPath())) { keyValueContainer.importContainerData(fis, packer); } @@ -306,7 +305,7 @@ public void testUnhealthyContainerImportExport( File exportTar = Files.createFile(folder.toPath().resolve("export.tar")).toFile(); TarContainerPacker packer = new TarContainerPacker(NO_COMPRESSION); //export the container - try (FileOutputStream fos = new FileOutputStream(exportTar)) { + try (OutputStream fos = Files.newOutputStream(exportTar.toPath())) { keyValueContainer.exportContainerData(fos, packer); } @@ -315,7 +314,7 @@ public void testUnhealthyContainerImportExport( keyValueContainer.delete(); // import container. - try (FileInputStream fis = new FileInputStream(exportTar)) { + try (InputStream fis = Files.newInputStream(exportTar.toPath())) { keyValueContainer.importContainerData(fis, packer); } @@ -346,7 +345,7 @@ public void testContainerImportExport(ContainerTestVersionInfo versionInfo) TarContainerPacker packer = new TarContainerPacker(compr); //export the container - try (FileOutputStream fos = new FileOutputStream(folderToExport)) { + try (OutputStream fos = Files.newOutputStream(folderToExport.toPath())) { keyValueContainer .exportContainerData(fos, packer); } @@ -369,7 +368,7 @@ public void testContainerImportExport(ContainerTestVersionInfo versionInfo) StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()), 1); container.populatePathFields(scmId, containerVolume); - try (FileInputStream fis = new FileInputStream(folderToExport)) { + try (InputStream fis = Files.newInputStream(folderToExport.toPath())) { container.importContainerData(fis, packer); } @@ -390,7 +389,7 @@ public void testContainerImportExport(ContainerTestVersionInfo versionInfo) //Can't overwrite existing container KeyValueContainer finalContainer = container; assertThrows(IOException.class, () -> { - try (FileInputStream fis = new FileInputStream(folderToExport)) { + try (InputStream fis = Files.newInputStream(folderToExport.toPath())) { finalContainer.importContainerData(fis, packer); } }, "Container is imported twice. Previous files are overwritten"); @@ -412,7 +411,7 @@ public void testContainerImportExport(ContainerTestVersionInfo versionInfo) KeyValueContainer finalContainer1 = container; assertThrows(IOException.class, () -> { try { - FileInputStream fis = new FileInputStream(folderToExport); + InputStream fis = Files.newInputStream(folderToExport.toPath()); fis.close(); finalContainer1.importContainerData(fis, packer); } finally { @@ -855,7 +854,7 @@ void testAutoCompactionSmallSstFile( folder.toPath().resolve(containerId + "_exported.tar.gz")).toFile(); TarContainerPacker packer = new TarContainerPacker(NO_COMPRESSION); //export the container - try (FileOutputStream fos = new FileOutputStream(folderToExport)) { + try (OutputStream fos = Files.newOutputStream(folderToExport.toPath())) { container.exportContainerData(fos, packer); } exportFiles.add(folderToExport); @@ -878,8 +877,8 @@ void testAutoCompactionSmallSstFile( containerData.setSchemaVersion(schemaVersion); container = new KeyValueContainer(containerData, CONF); container.populatePathFields(scmId, hddsVolume); - try (FileInputStream fis = - new FileInputStream(exportFiles.get(index))) { + try (InputStream fis = + Files.newInputStream(exportFiles.get(index).toPath())) { TarContainerPacker packer = new TarContainerPacker(NO_COMPRESSION); container.importContainerData(fis, packer); containerList.add(container); @@ -925,7 +924,7 @@ public void testIsEmptyContainerStateWhileImport( TarContainerPacker packer = new TarContainerPacker(compr); //export the container - try (FileOutputStream fos = new FileOutputStream(folderToExport)) { + try (OutputStream fos = Files.newOutputStream(folderToExport.toPath())) { keyValueContainer .exportContainerData(fos, packer); } @@ -948,7 +947,7 @@ public void testIsEmptyContainerStateWhileImport( StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()), 1); container.populatePathFields(scmId, containerVolume); - try (FileInputStream fis = new FileInputStream(folderToExport)) { + try (InputStream fis = Files.newInputStream(folderToExport.toPath())) { container.importContainerData(fis, packer); } @@ -974,7 +973,7 @@ public void testIsEmptyContainerStateWhileImportWithoutBlock( TarContainerPacker packer = new TarContainerPacker(compr); //export the container - try (FileOutputStream fos = new FileOutputStream(folderToExport)) { + try (OutputStream fos = Files.newOutputStream(folderToExport.toPath())) { keyValueContainer .exportContainerData(fos, packer); } @@ -996,7 +995,7 @@ public void testIsEmptyContainerStateWhileImportWithoutBlock( StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()), 1); container.populatePathFields(scmId, containerVolume); - try (FileInputStream fis = new FileInputStream(folderToExport)) { + try (InputStream fis = Files.newInputStream(folderToExport.toPath())) { container.importContainerData(fis, packer); } @@ -1072,7 +1071,7 @@ private void testMixedSchemaImport(String dir, if (!file1.createNewFile()) { fail("Failed to create file " + file1.getAbsolutePath()); } - try (FileOutputStream fos = new FileOutputStream(file1)) { + try (OutputStream fos = Files.newOutputStream(file1.toPath())) { container.exportContainerData(fos, packer); } @@ -1088,7 +1087,7 @@ private void testMixedSchemaImport(String dir, // import container to new HddsVolume KeyValueContainer importedContainer = new KeyValueContainer(data, conf); importedContainer.populatePathFields(scmId, hddsVolume2); - try (FileInputStream fio = new FileInputStream(file1)) { + try (InputStream fio = Files.newInputStream(file1.toPath())) { importedContainer.importContainerData(fio, packer); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java index cae19d672198..86068af32b87 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java @@ -29,8 +29,6 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -193,7 +191,7 @@ public void pack(ContainerTestVersionInfo versionInfo, //THEN: check the result TarArchiveInputStream tarStream = null; - try (FileInputStream input = new FileInputStream(targetFile.toFile())) { + try (InputStream input = newInputStream(targetFile)) { InputStream uncompressed = packer.decompress(input); tarStream = new TarArchiveInputStream(uncompressed); @@ -346,7 +344,7 @@ public void unpackContainerDataWithInvalidRelativeChunkFilePath( private KeyValueContainerData unpackContainerData(File containerFile) throws IOException { - try (FileInputStream input = new FileInputStream(containerFile)) { + try (InputStream input = newInputStream(containerFile.toPath())) { KeyValueContainerData data = createContainer(DEST_CONTAINER_ROOT, false); KeyValueContainer container = new KeyValueContainer(data, conf); packer.unpackContainerData(container, input, TEMP_DIR, @@ -356,10 +354,8 @@ private KeyValueContainerData unpackContainerData(File containerFile) } private void writeDescriptor(KeyValueContainer container) throws IOException { - FileOutputStream fileStream = new FileOutputStream( - container.getContainerFile()); - try (OutputStreamWriter writer = new OutputStreamWriter(fileStream, - UTF_8)) { + try (OutputStream fileStream = newOutputStream(container.getContainerFile().toPath()); + OutputStreamWriter writer = new OutputStreamWriter(fileStream, UTF_8)) { IOUtils.write(TEST_DESCRIPTOR_FILE_CONTENT, writer); } } @@ -385,9 +381,8 @@ private File writeSingleFile(Path parentPath, String fileName, assertNotNull(parent); Files.createDirectories(parent); File file = path.toFile(); - FileOutputStream fileStream = new FileOutputStream(file); - try (OutputStreamWriter writer = new OutputStreamWriter(fileStream, - UTF_8)) { + try (OutputStream fileStream = newOutputStream(file.toPath()); + OutputStreamWriter writer = new OutputStreamWriter(fileStream, UTF_8)) { IOUtils.write(content, writer); } return file; @@ -396,7 +391,7 @@ private File writeSingleFile(Path parentPath, String fileName, private File packContainerWithSingleFile(File file, String entryName) throws Exception { File targetFile = TEMP_DIR.resolve("container.tar").toFile(); - try (FileOutputStream output = new FileOutputStream(targetFile); + try (OutputStream output = newOutputStream(targetFile.toPath()); OutputStream compressed = packer.compress(output); TarArchiveOutputStream archive = new TarArchiveOutputStream(compressed)) { @@ -425,8 +420,8 @@ private void assertExampleFileIsGood(Path parentPath, String filename, "example file is missing after pack/unpackContainerData: " + exampleFile); - try (FileInputStream testFile = - new FileInputStream(exampleFile.toFile())) { + try (InputStream testFile = + newInputStream(exampleFile)) { List strings = IOUtils.readLines(testFile, UTF_8); assertEquals(1, strings.size()); assertEquals(content, strings.get(0)); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java index f03a3f407946..6d509392ba59 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java @@ -29,9 +29,9 @@ import static org.mockito.Mockito.when; import java.io.File; -import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.util.HashSet; @@ -188,7 +188,7 @@ private File containerTarFile( yamlFile); File tarFile = new File(tempDir, ContainerUtils.getContainerTarName(containerId)); - try (FileOutputStream output = new FileOutputStream(tarFile)) { + try (OutputStream output = Files.newOutputStream(tarFile.toPath())) { ArchiveOutputStream archive = new TarArchiveOutputStream(output); TarArchiveEntry entry = archive.createArchiveEntry(yamlFile, "container.yaml"); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/fs/SaveSpaceUsageToFile.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/fs/SaveSpaceUsageToFile.java index 80ba7c468169..1a8450b9b60e 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/fs/SaveSpaceUsageToFile.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/fs/SaveSpaceUsageToFile.java @@ -22,7 +22,6 @@ import com.google.common.base.Preconditions; import java.io.File; import java.io.FileNotFoundException; -import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; import java.io.OutputStreamWriter; @@ -109,7 +108,7 @@ public void save(SpaceUsageSource source) { long used = source.getUsedSpace(); if (used > 0) { Instant now = Instant.now(); - try (OutputStream fileOutput = new FileOutputStream(file); + try (OutputStream fileOutput = Files.newOutputStream(file.toPath()); Writer out = new OutputStreamWriter(fileOutput, UTF_8)) { // time is written last, so that truncated writes won't be valid. out.write(used + " " + now.toEpochMilli()); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java index 9ee22beb829d..ad9c7315ba40 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java @@ -20,7 +20,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import java.io.File; -import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.lang.management.ManagementFactory; @@ -395,7 +394,7 @@ protected void doGetDownload(String fileName, final HttpServletRequest req, } else if (safeFileName.endsWith(".tree")) { resp.setContentType("text/html"); } - try (InputStream input = new FileInputStream(requestedFile)) { + try (InputStream input = Files.newInputStream(requestedFile.toPath())) { IOUtils.copy(input, resp.getOutputStream()); } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java index f58887ad6104..eaef78d8df77 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java @@ -45,8 +45,8 @@ import com.google.common.base.Strings; import com.google.protobuf.BlockingService; import java.io.File; -import java.io.FileInputStream; import java.io.IOException; +import java.io.InputStream; import java.io.OutputStream; import java.net.InetSocketAddress; import java.nio.file.Files; @@ -633,7 +633,7 @@ public static void includeFile(File file, String entryName, ArchiveEntry archiveEntry = archiveOutputStream.createArchiveEntry(file, entryName); archiveOutputStream.putArchiveEntry(archiveEntry); - try (FileInputStream fis = new FileInputStream(file)) { + try (InputStream fis = Files.newInputStream(file.toPath())) { IOUtils.copy(fis, archiveOutputStream); archiveOutputStream.flush(); } finally { diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRDBSnapshotProvider.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRDBSnapshotProvider.java index b6da7615911d..45ea49d2e3b0 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRDBSnapshotProvider.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRDBSnapshotProvider.java @@ -27,7 +27,6 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; -import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; import java.nio.charset.StandardCharsets; @@ -118,7 +117,7 @@ public void downloadSnapshot(String leaderNodeID, File targetFile) .map(a -> "".concat(a.getName()).concat(" length: "). concat(String.valueOf(a.length()))) .collect(Collectors.toList())); - try (OutputStream outputStream = new FileOutputStream(targetFile)) { + try (OutputStream outputStream = Files.newOutputStream(targetFile.toPath())) { writeDBCheckpointToStream(dbCheckpoint, outputStream, HAUtils.getExistingSstFiles( rdbSnapshotProvider.getCandidateDir()), new ArrayList<>()); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java index 165dc936b8f2..d03b61496265 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java @@ -28,7 +28,6 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; -import java.io.FileInputStream; import java.io.FilenameFilter; import java.io.IOException; import java.io.InputStream; @@ -430,8 +429,8 @@ private void compareSstWithSameName(File checkpoint1, File checkpoint2) long length2 = fileInCk2.length(); assertEquals(length1, length2, name); - try (InputStream fileStream1 = new FileInputStream(fileInCk1); - InputStream fileStream2 = new FileInputStream(fileInCk2)) { + try (InputStream fileStream1 = Files.newInputStream(fileInCk1.toPath()); + InputStream fileStream2 = Files.newInputStream(fileInCk2.toPath())) { byte[] content1 = new byte[fileStream1.available()]; byte[] content2 = new byte[fileStream2.available()]; fileStream1.read(content1); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/ozone/audit/AuditLogTestUtils.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/ozone/audit/AuditLogTestUtils.java index 11d1050ba739..0af4fb22ba9d 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/ozone/audit/AuditLogTestUtils.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/ozone/audit/AuditLogTestUtils.java @@ -18,11 +18,14 @@ package org.apache.hadoop.ozone.audit; import static java.nio.charset.StandardCharsets.UTF_8; +import static java.nio.file.StandardOpenOption.CREATE; +import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; import static org.apache.ozone.test.GenericTestUtils.waitFor; import java.io.File; -import java.io.FileOutputStream; import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; import java.util.concurrent.TimeoutException; import org.apache.commons.io.FileUtils; @@ -70,8 +73,7 @@ public static boolean auditLogContains(String... strings) { } public static void truncateAuditLogFile() throws IOException { - File auditLogFile = new File(AUDITLOG_FILENAME); - new FileOutputStream(auditLogFile).getChannel().truncate(0).close(); + Files.write(Paths.get(AUDITLOG_FILENAME), new byte[0], CREATE, TRUNCATE_EXISTING); } public static void deleteAuditLogFile() { diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java index 640a6503552b..a1a57ec4189e 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java @@ -47,9 +47,9 @@ import com.google.common.graph.GraphBuilder; import com.google.common.graph.MutableGraph; import java.io.File; -import java.io.FileOutputStream; import java.io.FileWriter; import java.io.IOException; +import java.io.OutputStream; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; @@ -1698,7 +1698,7 @@ public void testSstFilePruning( private void createFileWithContext(String fileName, String context) throws IOException { - try (FileOutputStream fileOutputStream = new FileOutputStream(fileName)) { + try (OutputStream fileOutputStream = Files.newOutputStream(Paths.get(fileName))) { fileOutputStream.write(context.getBytes(UTF_8)); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcClient.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcClient.java index 691aae538dfd..aa6f4f7f0860 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcClient.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/InterSCMGrpcClient.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdds.scm.ha; import com.google.common.base.Preconditions; -import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; import java.io.UncheckedIOException; @@ -133,7 +132,7 @@ public StreamDownloader(CompletableFuture response, this.outputPath = outputPath; try { Preconditions.checkNotNull(outputPath, "Output path cannot be null"); - stream = new FileOutputStream(outputPath.toFile()); + stream = Files.newOutputStream(outputPath); } catch (IOException e) { throw new UncheckedIOException( "Output path can't be used: " + outputPath, e); diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/JacocoServer.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/JacocoServer.java index 5f54f27abf2c..51e746907003 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/JacocoServer.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/JacocoServer.java @@ -17,10 +17,11 @@ package org.apache.ozone.test; -import java.io.FileOutputStream; import java.io.IOException; import java.net.ServerSocket; import java.net.Socket; +import java.nio.file.Files; +import java.nio.file.Paths; import org.jacoco.core.data.ExecutionDataWriter; import org.jacoco.core.data.IExecutionDataVisitor; import org.jacoco.core.data.ISessionInfoVisitor; @@ -44,7 +45,7 @@ private JacocoServer() { @SuppressWarnings("checkstyle:EmptyStatement") public static void main(String[] args) throws IOException { ExecutionDataWriter destination = - new ExecutionDataWriter(new FileOutputStream(destinationFile)); + new ExecutionDataWriter(Files.newOutputStream(Paths.get(destinationFile))); ServerSocket serverSocket = new ServerSocket(port); Runtime.getRuntime().addShutdownHook(new Thread(() -> { try { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeUtils.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeUtils.java index f33cd6af73e5..567fd6df48cc 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeUtils.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeUtils.java @@ -21,11 +21,11 @@ import com.google.common.base.Preconditions; import java.io.File; -import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.Writer; import java.nio.charset.StandardCharsets; +import java.nio.file.Files; import java.util.Date; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -76,7 +76,7 @@ public static File getVolumeUpgradeLockFile(HddsVolume volume) { public static boolean createFile(File file) throws IOException { final Date date = new Date(); - try (Writer writer = new OutputStreamWriter(new FileOutputStream(file), + try (Writer writer = new OutputStreamWriter(Files.newOutputStream(file.toPath()), StandardCharsets.UTF_8)) { writer.write(date.toString()); } diff --git a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/GetKeyHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/GetKeyHandler.java index 16eb73b47f46..bb103665e940 100644 --- a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/GetKeyHandler.java +++ b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/GetKeyHandler.java @@ -21,11 +21,10 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.nio.file.Files; import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.io.IOUtils; @@ -81,12 +80,12 @@ protected void execute(OzoneClient client, OzoneAddress address) OzoneVolume vol = client.getObjectStore().getVolume(volumeName); OzoneBucket bucket = vol.getBucket(bucketName); try (InputStream input = bucket.readKey(keyName); - OutputStream output = new FileOutputStream(dataFile)) { + OutputStream output = Files.newOutputStream(dataFile.toPath())) { IOUtils.copyBytes(input, output, chunkSize); } if (isVerbose() && !"/dev/null".equals(dataFile.getAbsolutePath())) { - try (InputStream stream = new FileInputStream(dataFile)) { + try (InputStream stream = Files.newInputStream(dataFile.toPath())) { String hash = DigestUtils.sha256Hex(stream); out().printf("Downloaded file sha256 checksum : %s%n", hash); } diff --git a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/token/TokenOption.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/token/TokenOption.java index fda2956f9a05..344f0d1be9cd 100644 --- a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/token/TokenOption.java +++ b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/token/TokenOption.java @@ -20,9 +20,10 @@ import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.file.Files; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.Token; @@ -50,7 +51,7 @@ public boolean exists() { public Token decode() throws IOException { Credentials creds = new Credentials(); - try (FileInputStream fis = new FileInputStream(tokenFile)) { + try (InputStream fis = Files.newInputStream(tokenFile.toPath())) { try (DataInputStream dis = new DataInputStream(fis)) { creds.readTokenStorageStream(dis); } @@ -65,7 +66,7 @@ public Token decode() throws IOException { public void persistToken(Token token) throws IOException { - try (FileOutputStream fos = new FileOutputStream(tokenFile)) { + try (OutputStream fos = Files.newOutputStream(tokenFile.toPath())) { try (DataOutputStream dos = new DataOutputStream(fos)) { Credentials ts = new Credentials(); ts.addToken(token.getService(), token); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java index fd977cef3de6..d8dc1e65ede9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java @@ -27,11 +27,11 @@ import java.io.BufferedInputStream; import java.io.EOFException; import java.io.File; -import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.net.URI; import java.nio.ByteBuffer; +import java.nio.file.Files; import java.util.UUID; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.FSDataInputStream; @@ -287,7 +287,7 @@ public void testO3FSByteBufferRead() throws IOException { public void testSequenceFileReaderSync() throws IOException { File srcfile = new File("src/test/resources/testSequenceFile"); Path path = new Path("/" + RandomStringUtils.randomAlphanumeric(5)); - InputStream input = new BufferedInputStream(new FileInputStream(srcfile)); + InputStream input = new BufferedInputStream(Files.newInputStream(srcfile.toPath())); // Upload test SequenceFile file FSDataOutputStream output = fs.create(path); @@ -309,7 +309,7 @@ public void testSequenceFileReaderSync() throws IOException { public void testSequenceFileReaderSyncEC() throws IOException { File srcfile = new File("src/test/resources/testSequenceFile"); Path path = new Path("/" + RandomStringUtils.randomAlphanumeric(5)); - InputStream input = new BufferedInputStream(new FileInputStream(srcfile)); + InputStream input = new BufferedInputStream(Files.newInputStream(srcfile.toPath())); // Upload test SequenceFile file FSDataOutputStream output = ecFs.create(path); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java index 92bd630efb04..06e8f91a50ff 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java @@ -61,13 +61,14 @@ import static org.slf4j.event.Level.DEBUG; import java.io.File; -import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.io.RandomAccessFile; import java.io.UnsupportedEncodingException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; +import java.nio.file.Files; import java.nio.file.Path; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; @@ -4951,7 +4952,7 @@ public void testUploadWithStreamAndMemoryMappedBuffer(@TempDir Path dir) throws final byte[] data = new byte[8 * chunkSize]; ThreadLocalRandom.current().nextBytes(data); final File file = new File(dir.toString(), "data"); - try (FileOutputStream out = new FileOutputStream(file)) { + try (OutputStream out = Files.newOutputStream(file.toPath())) { out.write(data); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java index 1d9b74e055cd..e95c017b9a6c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java @@ -63,7 +63,6 @@ import com.google.common.collect.Sets; import java.io.ByteArrayInputStream; import java.io.File; -import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -526,7 +525,7 @@ private void testWriteDbDataWithoutOmSnapshot() // Get the tarball. Path tmpdir = folder.resolve("bootstrapData"); - try (FileOutputStream fileOutputStream = new FileOutputStream(tempFile)) { + try (OutputStream fileOutputStream = Files.newOutputStream(tempFile.toPath())) { omDbCheckpointServletMock.writeDbDataToStream(dbCheckpoint, requestMock, fileOutputStream, new ArrayList<>(), new ArrayList<>(), tmpdir); } @@ -558,7 +557,7 @@ private void testWriteDbDataWithToExcludeFileList() File dummyFile = new File(dbCheckpoint.getCheckpointLocation().toString(), "dummy.sst"); try (OutputStreamWriter writer = new OutputStreamWriter( - new FileOutputStream(dummyFile), StandardCharsets.UTF_8)) { + Files.newOutputStream(dummyFile.toPath()), StandardCharsets.UTF_8)) { writer.write("Dummy data."); } assertTrue(dummyFile.exists()); @@ -572,7 +571,7 @@ private void testWriteDbDataWithToExcludeFileList() // Get the tarball. Path tmpdir = folder.resolve("bootstrapData"); - try (FileOutputStream fileOutputStream = new FileOutputStream(tempFile)) { + try (OutputStream fileOutputStream = Files.newOutputStream(tempFile.toPath())) { omDbCheckpointServletMock.writeDbDataToStream(dbCheckpoint, requestMock, fileOutputStream, toExcludeList, excludedList, tmpdir); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java index db6f68a5e216..807177b60bb7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java @@ -31,9 +31,8 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; +import java.io.OutputStream; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; @@ -1188,7 +1187,7 @@ private long getSizeOfSstFiles(File tarball) throws IOException { private void createEmptyTarball(File dummyTarFile) throws IOException { - FileOutputStream fileOutputStream = new FileOutputStream(dummyTarFile); + OutputStream fileOutputStream = Files.newOutputStream(dummyTarFile.toPath()); TarArchiveOutputStream archiveOutputStream = new TarArchiveOutputStream(fileOutputStream); archiveOutputStream.close(); @@ -1199,7 +1198,7 @@ private Set getSstFilenames(File tarball) throws IOException { Set sstFilenames = new HashSet<>(); try (TarArchiveInputStream tarInput = - new TarArchiveInputStream(new FileInputStream(tarball))) { + new TarArchiveInputStream(Files.newInputStream(tarball.toPath()))) { TarArchiveEntry entry; while ((entry = tarInput.getNextTarEntry()) != null) { String name = entry.getName(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java index 5858727d34d4..36708601681d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java @@ -70,7 +70,6 @@ import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.File; -import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.RandomAccessFile; @@ -995,7 +994,7 @@ private List uploadParts(String bucketName, String key, String uploadI // Upload the file parts. long filePosition = 0; long fileLength = file.length(); - try (FileInputStream fileInputStream = new FileInputStream(file)) { + try (InputStream fileInputStream = Files.newInputStream(file.toPath())) { for (int i = 1; filePosition < fileLength; i++) { // Because the last part could be less than 5 MB, adjust the part size as // needed. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 35d5169da97f..17793778e433 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.om; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; @@ -109,14 +110,12 @@ import com.google.protobuf.ProtocolMessageEnum; import java.io.BufferedWriter; import java.io.File; -import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.UncheckedIOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.URI; -import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; @@ -1014,8 +1013,7 @@ private void saveOmMetrics() { Files.createDirectories(parent.toPath()); } try (BufferedWriter writer = new BufferedWriter( - new OutputStreamWriter(new FileOutputStream( - getTempMetricsStorageFile()), StandardCharsets.UTF_8))) { + new OutputStreamWriter(Files.newOutputStream(getTempMetricsStorageFile().toPath()), UTF_8))) { OmMetricsInfo metricsInfo = new OmMetricsInfo(); metricsInfo.setNumKeys(metrics.getNumKeys()); WRITER.writeValue(writer, metricsInfo); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerPrepareState.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerPrepareState.java index 4742ce1eecf3..076c13766719 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerPrepareState.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerPrepareState.java @@ -19,9 +19,9 @@ import com.google.common.annotations.VisibleForTesting; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -181,7 +181,7 @@ public synchronized void restorePrepareFromFile(long currentIndex) File prepareMarkerFile = getPrepareMarkerFile(); if (prepareMarkerFile.exists()) { byte[] data = new byte[(int) prepareMarkerFile.length()]; - try (FileInputStream stream = new FileInputStream(prepareMarkerFile)) { + try (InputStream stream = Files.newInputStream(prepareMarkerFile.toPath())) { stream.read(data); } catch (IOException e) { throwPrepareException(e, "Failed to read prepare marker " + @@ -254,7 +254,7 @@ private void writePrepareMarkerFile(long index) throws IOException { File parentDir = markerFile.getParentFile(); Files.createDirectories(parentDir.toPath()); - try (FileOutputStream stream = new FileOutputStream(markerFile)) { + try (OutputStream stream = Files.newOutputStream(markerFile.toPath())) { stream.write(Long.toString(index).getBytes(StandardCharsets.UTF_8)); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis_snapshot/OmRatisSnapshotProvider.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis_snapshot/OmRatisSnapshotProvider.java index 36e7c80307e4..8c6cdd4a7484 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis_snapshot/OmRatisSnapshotProvider.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis_snapshot/OmRatisSnapshotProvider.java @@ -30,11 +30,12 @@ import java.io.DataOutputStream; import java.io.File; -import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.net.HttpURLConnection; import java.net.URL; +import java.nio.file.Files; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -183,7 +184,7 @@ public void downloadSnapshot(String leaderNodeID, File targetFile) */ public static void downloadFileWithProgress(InputStream inputStream, File targetFile) throws IOException { - try (FileOutputStream outputStream = new FileOutputStream(targetFile)) { + try (OutputStream outputStream = Files.newOutputStream(targetFile.toPath())) { byte[] buffer = new byte[8 * 1024]; long totalBytesRead = 0; int bytesRead; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOzoneManagerPrepareState.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOzoneManagerPrepareState.java index 2eb202acca56..800549ad9182 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOzoneManagerPrepareState.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOzoneManagerPrepareState.java @@ -22,10 +22,11 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; import java.nio.charset.Charset; +import java.nio.file.Files; import java.nio.file.Path; import java.util.Random; import org.apache.hadoop.hdds.HddsConfigKeys; @@ -205,8 +206,8 @@ private void writePrepareMarkerFile(byte[] bytes) throws IOException { if (!mkdirs) { throw new IOException("Unable to create marker file directory."); } - try (FileOutputStream stream = - new FileOutputStream(markerFile)) { + try (OutputStream stream = + Files.newOutputStream(markerFile.toPath())) { stream.write(bytes); } } @@ -216,7 +217,7 @@ private long readPrepareMarkerFile() throws Exception { File prepareMarkerFile = prepareState.getPrepareMarkerFile(); byte[] data = new byte[(int) prepareMarkerFile.length()]; - try (FileInputStream stream = new FileInputStream(prepareMarkerFile)) { + try (InputStream stream = Files.newInputStream(prepareMarkerFile.toPath())) { stream.read(data); index = Long.parseLong(new String(data, Charset.defaultCharset())); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java index 10f3a7b98029..85b1d24bd1d3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java @@ -22,10 +22,10 @@ import java.io.ByteArrayInputStream; import java.io.DataInputStream; import java.io.DataOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.file.Files; import java.nio.file.Path; import java.security.GeneralSecurityException; import java.security.InvalidKeyException; @@ -236,19 +236,20 @@ public void testSymmetricTokenPerfHelper(String hmacAlgorithm, int keyLen) { @Test public void testReadWriteInProtobuf(@TempDir Path baseDir) throws IOException { OzoneTokenIdentifier id = getIdentifierInst(); - File idFile = baseDir.resolve("tokenFile").toFile(); + Path idFile = baseDir.resolve("tokenFile"); - FileOutputStream fop = new FileOutputStream(idFile); - DataOutputStream dataOutputStream = new DataOutputStream(fop); - id.write(dataOutputStream); - fop.close(); + try (OutputStream fop = Files.newOutputStream(idFile)) { + DataOutputStream dataOutputStream = new DataOutputStream(fop); + id.write(dataOutputStream); + } - FileInputStream fis = new FileInputStream(idFile); - DataInputStream dis = new DataInputStream(fis); - OzoneTokenIdentifier id2 = new OzoneTokenIdentifier(); + try (InputStream fis = Files.newInputStream(idFile)) { + DataInputStream dis = new DataInputStream(fis); + OzoneTokenIdentifier id2 = new OzoneTokenIdentifier(); - id2.readFields(dis); - assertEquals(id, id2); + id2.readFields(dis); + assertEquals(id, id2); + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index afed69dcd8a6..c80bcaa1ab27 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -34,11 +34,12 @@ import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; import java.net.HttpURLConnection; import java.net.URL; +import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.sql.Timestamp; @@ -167,11 +168,11 @@ public File getReconDbDir(ConfigurationSource conf, String dirConfigKey) { */ public static File createTarFile(Path sourcePath) throws IOException { TarArchiveOutputStream tarOs = null; - FileOutputStream fileOutputStream = null; + OutputStream fileOutputStream = null; try { String sourceDir = sourcePath.toString(); String fileName = sourceDir.concat(".tar"); - fileOutputStream = new FileOutputStream(fileName); + fileOutputStream = Files.newOutputStream(Paths.get(fileName)); tarOs = new TarArchiveOutputStream(fileOutputStream); tarOs.setBigNumberMode(TarArchiveOutputStream.BIGNUMBER_POSIX); File folder = new File(sourceDir); @@ -199,7 +200,7 @@ private static void addFilesToArchive(String source, File file, throws IOException { tarFileOutputStream.putArchiveEntry(new TarArchiveEntry(file, source)); if (file.isFile()) { - try (FileInputStream fileInputStream = new FileInputStream(file)) { + try (InputStream fileInputStream = Files.newInputStream(file.toPath())) { BufferedInputStream bufferedInputStream = new BufferedInputStream(fileInputStream); org.apache.commons.compress.utils.IOUtils.copy(bufferedInputStream, @@ -228,9 +229,9 @@ private static void addFilesToArchive(String source, File file, public void untarCheckpointFile(File tarFile, Path destPath) throws IOException { - FileInputStream fileInputStream = null; + InputStream fileInputStream = null; try { - fileInputStream = new FileInputStream(tarFile); + fileInputStream = Files.newInputStream(tarFile.toPath()); //Create Destination directory if it does not exist. if (!destPath.toFile().exists()) { @@ -259,7 +260,7 @@ public void untarCheckpointFile(File tarFile, Path destPath) int count; byte[] data = new byte[WRITE_BUFFER]; - FileOutputStream fos = new FileOutputStream(f); + OutputStream fos = Files.newOutputStream(f.toPath()); try (BufferedOutputStream dest = new BufferedOutputStream(fos, WRITE_BUFFER)) { while ((count = diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTriggerDBSyncEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTriggerDBSyncEndpoint.java index 513f93177998..b898dd320288 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTriggerDBSyncEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTriggerDBSyncEndpoint.java @@ -31,7 +31,6 @@ import static org.mockito.Mockito.when; import java.io.File; -import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; @@ -112,7 +111,7 @@ public void setUp() throws IOException, AuthenticationException { .getCheckpoint(true); File tarFile = createTarFile(checkpoint.getCheckpointLocation()); HttpURLConnection httpURLConnectionMock = mock(HttpURLConnection.class); - try (InputStream inputStream = new FileInputStream(tarFile)) { + try (InputStream inputStream = Files.newInputStream(tarFile.toPath())) { when(httpURLConnectionMock.getInputStream()).thenReturn(inputStream); } when(reconUtilsMock.makeHttpCall(any(), anyString(), anyBoolean())) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetFailedDeletedBlocksTxnSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetFailedDeletedBlocksTxnSubcommand.java index d5fa544f9396..9717a3856789 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetFailedDeletedBlocksTxnSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetFailedDeletedBlocksTxnSubcommand.java @@ -17,9 +17,11 @@ package org.apache.hadoop.ozone.admin.scm; -import java.io.FileOutputStream; import java.io.IOException; +import java.io.OutputStream; import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Paths; import java.util.List; import java.util.Objects; import java.util.stream.Collectors; @@ -80,7 +82,7 @@ public void execute(ScmClient client) throws IOException { String result = JsonUtils.toJsonStringWithDefaultPrettyPrinter(txns); if (fileName != null) { - try (FileOutputStream f = new FileOutputStream(fileName)) { + try (OutputStream f = Files.newOutputStream(Paths.get(fileName))) { f.write(result.getBytes(StandardCharsets.UTF_8)); } } else { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ResetDeletedBlockRetryCountSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ResetDeletedBlockRetryCountSubcommand.java index b9f2512a0857..b93b8d50b43a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ResetDeletedBlockRetryCountSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ResetDeletedBlockRetryCountSubcommand.java @@ -17,12 +17,13 @@ package org.apache.hadoop.ozone.admin.scm; -import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.Reader; import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -73,7 +74,7 @@ public void execute(ScmClient client) throws IOException { count = client.resetDeletedBlockRetryCount(new ArrayList<>()); } else if (group.fileName != null) { List txIDs; - try (InputStream in = new FileInputStream(group.fileName); + try (InputStream in = Files.newInputStream(Paths.get(group.fileName)); Reader fileReader = new InputStreamReader(in, StandardCharsets.UTF_8)) { DeletedBlocksTransactionInfoWrapper[] txns = JsonUtils.readFromReader(fileReader, diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java index 883f01984318..dbe0d252f348 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java @@ -20,11 +20,12 @@ import static java.nio.charset.StandardCharsets.UTF_8; import java.io.BufferedReader; -import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; +import java.nio.file.Files; +import java.nio.file.Paths; import java.sql.Connection; import java.sql.DriverManager; import java.sql.PreparedStatement; @@ -134,9 +135,9 @@ private static boolean insertAudits(String dbName, String logs) private static ArrayList parseAuditLogs(String filePath) throws IOException { ArrayList listResult = new ArrayList<>(); - try (FileInputStream fis = new FileInputStream(filePath); - InputStreamReader isr = new InputStreamReader(fis, UTF_8); - BufferedReader bReader = new BufferedReader(isr)) { + try (InputStream fis = Files.newInputStream(Paths.get(filePath)); + InputStreamReader isr = new InputStreamReader(fis, UTF_8); + BufferedReader bReader = new BufferedReader(isr)) { String currentLine = bReader.readLine(); String nextLine = bReader.readLine(); String[] entry; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ExportSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ExportSubcommand.java index 70931d66e519..91ef00d217f0 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ExportSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ExportSubcommand.java @@ -21,7 +21,8 @@ import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; import java.io.File; -import java.io.FileOutputStream; +import java.io.OutputStream; +import java.nio.file.Files; import java.util.concurrent.Callable; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.container.replication.ContainerReplicationSource; @@ -72,7 +73,7 @@ public Void call() throws Exception { replicationSource.prepare(containerId); final File destinationFile = new File(destination, "container-" + containerId + ".tar"); - try (FileOutputStream fos = new FileOutputStream(destinationFile)) { + try (OutputStream fos = Files.newOutputStream(destinationFile.toPath())) { try { replicationSource.copyData(containerId, fos, NO_COMPRESSION); } catch (StorageContainerException e) { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java index cbd86de81e50..ddab348a131d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java @@ -28,9 +28,12 @@ import com.fasterxml.jackson.databind.ObjectWriter; import com.google.common.annotations.VisibleForTesting; import java.io.FileNotFoundException; -import java.io.FileOutputStream; import java.io.IOException; +import java.io.OutputStream; import java.io.PrintStream; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; +import java.nio.file.Paths; import java.security.MessageDigest; import java.text.SimpleDateFormat; import java.util.ArrayList; @@ -544,13 +547,13 @@ void printStats(PrintStream out) { String jsonName = new SimpleDateFormat("yyyyMMddHHmmss").format(Time.now()) + ".json"; String jsonPath = jsonDir + "/" + jsonName; - try (FileOutputStream os = new FileOutputStream(jsonPath)) { + try (OutputStream os = Files.newOutputStream(Paths.get(jsonPath))) { ObjectMapper mapper = new ObjectMapper(); mapper.setVisibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY); ObjectWriter writer = mapper.writerWithDefaultPrettyPrinter(); writer.writeValue(os, jobInfo); - } catch (FileNotFoundException e) { + } catch (FileNotFoundException | NoSuchFileException e) { out.println("Json File could not be created for the path: " + jsonPath); out.println(e); } catch (IOException e) { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/StreamingGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/StreamingGenerator.java index dd5e387b0e1f..4e6e5f584628 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/StreamingGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/StreamingGenerator.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.freon; import com.codahale.metrics.Timer; -import java.io.FileOutputStream; import java.io.IOException; +import java.io.OutputStream; import java.nio.file.Files; import java.nio.file.Path; import java.util.concurrent.Callable; @@ -100,9 +100,7 @@ private void generateBaseData() { new ContentGenerator(fileSize.toBytes(), 1024); for (int i = 0; i < numberOfFiles; i++) { - try (FileOutputStream out = new FileOutputStream( - subDir.resolve("file-" + i).toFile()) - ) { + try (OutputStream out = Files.newOutputStream(subDir.resolve("file-" + i))) { contentGenerator.write(out); } } From 9960ed2d347c946a31ca0c4b9130a35b54fd216b Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Wed, 5 Mar 2025 21:21:06 +0100 Subject: [PATCH 2/4] rely on DEFAULT_OPEN_OPTIONS --- .../src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java | 4 +--- .../java/org/apache/hadoop/ozone/audit/AuditLogTestUtils.java | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java index 28c9ecf323f6..b668809615bc 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hdds.utils; import static java.nio.charset.StandardCharsets.UTF_8; -import static java.nio.file.StandardOpenOption.CREATE; -import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; import jakarta.annotation.Nonnull; import java.io.FileOutputStream; @@ -120,7 +118,7 @@ public static void syncFD(OutputStream out) throws IOException { public static void writePropertiesToFile(Path path, Properties properties) throws IOException { StringWriter out = new StringWriter(); properties.store(out, null); - Files.write(path, out.toString().getBytes(UTF_8), CREATE, TRUNCATE_EXISTING); + Files.write(path, out.toString().getBytes(UTF_8)); } /** Read {@link Properties} from the file at {@code path}. */ diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/ozone/audit/AuditLogTestUtils.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/ozone/audit/AuditLogTestUtils.java index 0af4fb22ba9d..0b05d4d01f0e 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/ozone/audit/AuditLogTestUtils.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/ozone/audit/AuditLogTestUtils.java @@ -18,8 +18,6 @@ package org.apache.hadoop.ozone.audit; import static java.nio.charset.StandardCharsets.UTF_8; -import static java.nio.file.StandardOpenOption.CREATE; -import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; import static org.apache.ozone.test.GenericTestUtils.waitFor; import java.io.File; @@ -73,7 +71,7 @@ public static boolean auditLogContains(String... strings) { } public static void truncateAuditLogFile() throws IOException { - Files.write(Paths.get(AUDITLOG_FILENAME), new byte[0], CREATE, TRUNCATE_EXISTING); + Files.write(Paths.get(AUDITLOG_FILENAME), new byte[0]); } public static void deleteAuditLogFile() { From 1b2e19fa9d9118792333ae4a2d8bd2b7a3e17759 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Wed, 5 Mar 2025 21:39:50 +0100 Subject: [PATCH 3/4] replace useless syncFD --- .../java/org/apache/hadoop/hdds/utils/IOUtils.java | 9 --------- .../ozone/container/common/impl/ContainerDataYaml.java | 8 +++++--- .../transport/server/ratis/ContainerStateMachine.java | 10 ++++++---- .../ozone/container/common/utils/DiskCheckUtil.java | 9 ++++++--- 4 files changed, 17 insertions(+), 19 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java index b668809615bc..f6c4907d8437 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java @@ -20,10 +20,8 @@ import static java.nio.charset.StandardCharsets.UTF_8; import jakarta.annotation.Nonnull; -import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; -import java.io.OutputStream; import java.io.StringWriter; import java.nio.file.Files; import java.nio.file.Path; @@ -107,13 +105,6 @@ public static void closeQuietly(Collection closeables) close(null, closeables); } - /** Sync the file descriptor, if {@code out} is a {@code FileOutputStream}. */ - public static void syncFD(OutputStream out) throws IOException { - if (out instanceof FileOutputStream) { - ((FileOutputStream) out).getFD().sync(); - } - } - /** Write {@code properties} to the file at {@code path}, truncating any existing content. */ public static void writePropertiesToFile(Path path, Properties properties) throws IOException { StringWriter out = new StringWriter(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java index 64e162a958d6..3e9876515feb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java @@ -17,6 +17,9 @@ package org.apache.hadoop.ozone.container.common.impl; +import static java.nio.file.StandardOpenOption.CREATE; +import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; +import static java.nio.file.StandardOpenOption.WRITE; import static org.apache.hadoop.ozone.OzoneConsts.REPLICA_INDEX; import static org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData.KEYVALUE_YAML_TAG; @@ -41,6 +44,7 @@ import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; +import org.apache.ratis.util.FileUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.yaml.snakeyaml.DumperOptions; @@ -93,15 +97,13 @@ public static void createContainerFile(ContainerType containerType, containerData.computeAndSetChecksum(yaml); // Write the ContainerData with checksum to Yaml file. - out = Files.newOutputStream(containerFile.toPath()); + out = FileUtils.newOutputStreamForceAtClose(containerFile, CREATE, TRUNCATE_EXISTING, WRITE); writer = new OutputStreamWriter(out, StandardCharsets.UTF_8); yaml.dump(containerData, writer); } finally { try { if (writer != null) { writer.flush(); - // make sure the container metadata is synced to disk. - IOUtils.syncFD(out); writer.close(); } } catch (IOException ex) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index 119cf283b22a..e3bc8eba6ce4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -17,6 +17,10 @@ package org.apache.hadoop.ozone.container.common.transport.server.ratis; +import static java.nio.file.StandardOpenOption.CREATE; +import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; +import static java.nio.file.StandardOpenOption.WRITE; + import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -59,7 +63,6 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.utils.Cache; -import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.ResourceCache; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -97,6 +100,7 @@ import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.ratis.thirdparty.com.google.protobuf.TextFormat; +import org.apache.ratis.util.FileUtils; import org.apache.ratis.util.JavaUtils; import org.apache.ratis.util.LifeCycle; import org.apache.ratis.util.TaskQueue; @@ -375,11 +379,9 @@ public long takeSnapshot() throws IOException { final File snapshotFile = storage.getSnapshotFile(ti.getTerm(), ti.getIndex()); LOG.info("{}: Taking a snapshot at:{} file {}", getGroupId(), ti, snapshotFile); - try (OutputStream fos = Files.newOutputStream(snapshotFile.toPath())) { + try (OutputStream fos = FileUtils.newOutputStreamForceAtClose(snapshotFile, CREATE, TRUNCATE_EXISTING, WRITE)) { persistContainerSet(fos); fos.flush(); - // make sure the snapshot file is synced - IOUtils.syncFD(fos); } catch (IOException ioe) { LOG.error("{}: Failed to write snapshot at:{} file {}", getGroupId(), ti, snapshotFile); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DiskCheckUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DiskCheckUtil.java index 89bc3f8393df..2ca34d83fa90 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DiskCheckUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DiskCheckUtil.java @@ -17,6 +17,10 @@ package org.apache.hadoop.ozone.container.common.utils; +import static java.nio.file.StandardOpenOption.CREATE; +import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; +import static java.nio.file.StandardOpenOption.WRITE; + import com.google.common.annotations.VisibleForTesting; import java.io.File; import java.io.FileNotFoundException; @@ -29,7 +33,7 @@ import java.util.Arrays; import java.util.Random; import java.util.UUID; -import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.ratis.util.FileUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -135,9 +139,8 @@ public boolean checkReadWrite(File storageDir, File testFile = new File(testFileDir, "disk-check-" + UUID.randomUUID()); byte[] writtenBytes = new byte[numBytesToWrite]; RANDOM.nextBytes(writtenBytes); - try (OutputStream fos = Files.newOutputStream(testFile.toPath())) { + try (OutputStream fos = FileUtils.newOutputStreamForceAtClose(testFile, CREATE, TRUNCATE_EXISTING, WRITE)) { fos.write(writtenBytes); - IOUtils.syncFD(fos); } catch (FileNotFoundException | NoSuchFileException notFoundEx) { logError(storageDir, String.format("Could not find file %s for " + "volume check.", testFile.getAbsolutePath()), notFoundEx); From 65313dfd9dd7e5144506e389dacc8a52f13a838f Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Thu, 6 Mar 2025 19:39:19 +0100 Subject: [PATCH 4/4] use AtomicFileOutputStream --- .../org/apache/hadoop/hdds/utils/IOUtils.java | 19 +++++++++---------- .../hadoop/ozone/common/StorageInfo.java | 4 ++-- .../common/helpers/DatanodeVersionFile.java | 4 ++-- 3 files changed, 13 insertions(+), 14 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java index f6c4907d8437..ce42c9660e45 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java @@ -17,17 +17,16 @@ package org.apache.hadoop.hdds.utils; -import static java.nio.charset.StandardCharsets.UTF_8; - import jakarta.annotation.Nonnull; +import java.io.File; import java.io.IOException; import java.io.InputStream; -import java.io.StringWriter; +import java.io.OutputStream; import java.nio.file.Files; -import java.nio.file.Path; import java.util.Arrays; import java.util.Collection; import java.util.Properties; +import org.apache.ratis.util.AtomicFileOutputStream; import org.slf4j.Logger; /** @@ -106,16 +105,16 @@ public static void closeQuietly(Collection closeables) } /** Write {@code properties} to the file at {@code path}, truncating any existing content. */ - public static void writePropertiesToFile(Path path, Properties properties) throws IOException { - StringWriter out = new StringWriter(); - properties.store(out, null); - Files.write(path, out.toString().getBytes(UTF_8)); + public static void writePropertiesToFile(File file, Properties properties) throws IOException { + try (OutputStream out = new AtomicFileOutputStream(file)) { + properties.store(out, null); + } } /** Read {@link Properties} from the file at {@code path}. */ - public static @Nonnull Properties readPropertiesFromFile(Path path) throws IOException { + public static @Nonnull Properties readPropertiesFromFile(File file) throws IOException { Properties props = new Properties(); - try (InputStream in = Files.newInputStream(path)) { + try (InputStream in = Files.newInputStream(file.toPath())) { props.load(in); } return props; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java index b2390acdc75d..c8b527c25697 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java @@ -190,11 +190,11 @@ private void verifyCreationTime() { public void writeTo(File to) throws IOException { - IOUtils.writePropertiesToFile(to.toPath(), properties); + IOUtils.writePropertiesToFile(to, properties); } private Properties readFrom(File from) throws IOException { - return IOUtils.readPropertiesFromFile(from.toPath()); + return IOUtils.readPropertiesFromFile(from); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java index 8c5d0adff2a2..84400c4f2405 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java @@ -62,7 +62,7 @@ private Properties createProperties() { */ public void createVersionFile(File path) throws IOException { - IOUtils.writePropertiesToFile(path.toPath(), createProperties()); + IOUtils.writePropertiesToFile(path, createProperties()); } /** @@ -72,6 +72,6 @@ public void createVersionFile(File path) throws * @throws IOException */ public static Properties readFrom(File versionFile) throws IOException { - return IOUtils.readPropertiesFromFile(versionFile.toPath()); + return IOUtils.readPropertiesFromFile(versionFile); } }