diff --git a/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml
index 8a4b5fa3bc21..55abc2630178 100644
--- a/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml
@@ -16,107 +16,4 @@
limitations under the License.
-->
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java
index 83dfa746e22e..c27319d0ec12 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java
@@ -46,9 +46,7 @@ public abstract class AbstractContractCopyFromLocalTest extends
@Override
public void teardown() throws Exception {
super.teardown();
- if (file != null) {
- file.delete();
- }
+ FileUtils.deleteQuietly(file);
}
@Test
@@ -99,7 +97,7 @@ public void testCopyFileOverwrite() throws Throwable {
public void testCopyMissingFile() throws Throwable {
describe("Copying a file that's not there must fail.");
file = createTempFile("test");
- file.delete();
+ FileUtils.deleteQuietly(file);
// first upload to create
intercept(FileNotFoundException.class, "",
() -> copyFromLocal(file, true));
@@ -262,7 +260,9 @@ public void testCopyDirectoryWithDelete() throws Throwable {
Files.createTempFile(srcDir, "test1", ".txt");
Path src = new Path(srcDir.toUri());
- Path dst = path(srcDir.getFileName().toString());
+ java.nio.file.Path fileName = srcDir.getFileName();
+ Assertions.assertThat(fileName).isNotNull();
+ Path dst = path(fileName.toString());
getFileSystem().copyFromLocalFile(true, true, src, dst);
Assertions.assertThat(srcDir)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractEtagTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractEtagTest.java
index fd86e8983aed..d2136db4d139 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractEtagTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractEtagTest.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
+import org.apache.ratis.util.Preconditions;
import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
@@ -83,7 +84,7 @@ String etagFromStatus(FileStatus st) {
Assertions.assertThat(st)
.describedAs("FileStatus %s", st)
.isInstanceOf(EtagSource.class);
- final String etag = ((EtagSource) st).getEtag();
+ final String etag = Preconditions.assertInstanceOf(st, EtagSource.class).getEtag();
Assertions.assertThat(etag)
.describedAs("Etag of %s", st)
.isNotBlank();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
index 26f06f9abc7d..d7354c7e3042 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
@@ -31,6 +31,7 @@
import java.io.FileNotFoundException;
import java.io.IOException;
+import java.io.InputStream;
import java.net.URI;
import java.security.MessageDigest;
import java.util.ArrayList;
@@ -41,20 +42,19 @@
import java.util.List;
import java.util.Map;
import javax.xml.bind.DatatypeConverter;
+import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneKey;
import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.om.OmConfig;
import org.apache.hadoop.ozone.om.exceptions.OMException;
@@ -445,19 +445,18 @@ private void createAndAssertKey(OzoneBucket ozoneBucket, String key, int length)
private void readKey(OzoneBucket ozoneBucket, String key, int length, byte[] input)
throws Exception {
- OzoneInputStream ozoneInputStream = ozoneBucket.readKey(key);
byte[] read = new byte[length];
- ozoneInputStream.read(read, 0, length);
- ozoneInputStream.close();
+ try (InputStream in = ozoneBucket.readKey(key)) {
+ IOUtils.readFully(in, read);
+ }
String inputString = new String(input, UTF_8);
assertEquals(inputString, new String(read, UTF_8));
// Read using filesystem.
- FSDataInputStream fsDataInputStream = o3fs.open(new Path(key));
- read = new byte[length];
- fsDataInputStream.read(read, 0, length);
- fsDataInputStream.close();
+ try (InputStream in = o3fs.open(new Path(key))) {
+ IOUtils.readFully(in, read);
+ }
assertEquals(inputString, new String(read, UTF_8));
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java
index 8c70d18473cc..a917f7453ea3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java
@@ -126,8 +126,12 @@ public void testInstallCheckPoint() throws Exception {
DBCheckpoint checkpoint = downloadSnapshot();
StorageContainerManager scm = cluster.getStorageContainerManager();
final Path location = checkpoint.getCheckpointLocation();
- final DBStore db = HAUtils.loadDB(conf, location.getParent().toFile(),
- location.getFileName().toString(), SCMDBDefinition.get());
+ Path parent = location.getParent();
+ assertNotNull(parent);
+ Path fileName = location.getFileName();
+ assertNotNull(fileName);
+ final DBStore db = HAUtils.loadDB(conf, parent.toFile(),
+ fileName.toString(), SCMDBDefinition.get());
// Hack the transaction index in the checkpoint so as to ensure the
// checkpointed transaction index is higher than when it was downloaded
// from.
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java
index 71193a17bab0..11baa144476c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java
@@ -240,11 +240,12 @@ public void testInstallCorruptedCheckpointFailure() throws Exception {
// Corrupt the leader checkpoint and install that on the follower. The
// operation should fail and should shutdown.
boolean delete = true;
- for (File file : leaderCheckpointLocation.toFile()
- .listFiles()) {
+ File[] files = leaderCheckpointLocation.toFile().listFiles();
+ assertNotNull(files);
+ for (File file : files) {
if (file.getName().contains(".sst")) {
if (delete) {
- file.delete();
+ FileUtils.deleteQuietly(file);
delete = false;
} else {
delete = true;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java
index a73c94d2ba4a..590818ecc493 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java
@@ -29,7 +29,6 @@
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
-import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hdds.client.BlockID;
@@ -86,7 +85,6 @@ public class TestCommitWatcher {
private long blockSize;
private String volumeName;
private String bucketName;
- private String keyString;
private StorageContainerLocationProtocolClientSideTranslatorPB
storageContainerLocationClient;
@@ -147,7 +145,6 @@ public void init() throws Exception {
//the easiest way to create an open container is creating a key
client = OzoneClientFactory.getRpcClient(conf);
objectStore = client.getObjectStore();
- keyString = UUID.randomUUID().toString();
volumeName = "testblockoutputstream";
bucketName = volumeName;
objectStore.createVolume(volumeName);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestCpuMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestCpuMetrics.java
index 16bbad7be695..79864db35eb0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestCpuMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestCpuMetrics.java
@@ -24,6 +24,7 @@
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.Response;
+import okhttp3.ResponseBody;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.ozone.test.NonHATests;
import org.junit.jupiter.api.Test;
@@ -51,7 +52,9 @@ public void testCpuMetrics() throws IOException {
// when
Response metricsResponse = httpClient.newCall(prometheusMetricsRequest)
.execute();
- String metricsResponseBodyContent = metricsResponse.body().string();
+ ResponseBody body = metricsResponse.body();
+ assertThat(body).isNotNull();
+ String metricsResponseBodyContent = body.string();
// then
assertThat(metricsResponseBodyContent)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java
index ab6e7de9c735..0f4b9850e97a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java
@@ -20,6 +20,7 @@
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT_DEFAULT;
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import com.google.common.collect.Maps;
import java.io.IOException;
@@ -30,6 +31,7 @@
import java.util.List;
import java.util.Map;
import java.util.Scanner;
+import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
@@ -134,6 +136,13 @@ public static void createKey(OzoneBucket bucket, String keyName,
}
}
+ public static void readFully(OzoneBucket bucket, String keyName) throws IOException {
+ int len = Math.toIntExact(bucket.getKey(keyName).getDataSize());
+ try (InputStream inputStream = bucket.readKey(keyName)) {
+ assertDoesNotThrow(() -> IOUtils.readFully(inputStream, len));
+ }
+ }
+
public static String getKey(OzoneBucket bucket, String keyName)
throws IOException {
try (InputStream stream = bucket.readKey(keyName)) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
index 292723bfc6f2..d6f57a6ce77e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
@@ -49,6 +49,7 @@
import static org.apache.ozone.test.GenericTestUtils.getTestStartTime;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
@@ -199,6 +200,7 @@
import org.junit.jupiter.api.MethodOrderer;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestMethodOrder;
+import org.junit.jupiter.api.function.Executable;
import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
@@ -1190,7 +1192,7 @@ public void testPutKeyWithReplicationConfig(String replicationValue,
assertEquals(keyName, key.getName());
try (OzoneInputStream is = bucket.readKey(keyName)) {
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
- is.read(fileContent);
+ IOUtils.readFully(is, fileContent);
assertEquals(value, new String(fileContent, UTF_8));
}
} else {
@@ -1222,7 +1224,7 @@ public void testPutKey() throws IOException {
assertEquals(keyName, key.getName());
try (OzoneInputStream is = bucket.readKey(keyName)) {
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
- is.read(fileContent);
+ IOUtils.readFully(is, fileContent);
verifyReplication(volumeName, bucketName, keyName,
RatisReplicationConfig.getInstance(
HddsProtos.ReplicationFactor.ONE));
@@ -2032,7 +2034,7 @@ public void testPutKeyRatisOneNode() throws IOException {
assertEquals(keyName, key.getName());
try (OzoneInputStream is = bucket.readKey(keyName)) {
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
- is.read(fileContent);
+ IOUtils.readFully(is, fileContent);
verifyReplication(volumeName, bucketName, keyName,
RatisReplicationConfig.getInstance(
HddsProtos.ReplicationFactor.ONE));
@@ -2065,7 +2067,7 @@ public void testPutKeyRatisThreeNodes() throws IOException {
assertEquals(keyName, key.getName());
try (OzoneInputStream is = bucket.readKey(keyName)) {
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
- is.read(fileContent);
+ IOUtils.readFully(is, fileContent);
verifyReplication(volumeName, bucketName, keyName,
RatisReplicationConfig.getInstance(
HddsProtos.ReplicationFactor.THREE));
@@ -2105,7 +2107,7 @@ public void testPutKeyRatisThreeNodesParallel() throws IOException,
assertEquals(keyName, key.getName());
try (OzoneInputStream is = bucket.readKey(keyName)) {
byte[] fileContent = new byte[data.getBytes(UTF_8).length];
- is.read(fileContent);
+ IOUtils.readFully(is, fileContent);
verifyReplication(volumeName, bucketName, keyName,
RatisReplicationConfig.getInstance(
HddsProtos.ReplicationFactor.THREE));
@@ -2204,20 +2206,21 @@ private void readCorruptedKey(String volumeName, String bucketName,
configuration.setFromObject(clientConfig);
RpcClient client = new RpcClient(configuration, null);
- try (InputStream is = client.getKey(volumeName, bucketName, keyName)) {
- is.read(new byte[100]);
+ try {
+ int len = Math.toIntExact(client.getKeyDetails(volumeName, bucketName, keyName).getDataSize());
+ try (InputStream is = client.getKey(volumeName, bucketName, keyName)) {
+ Executable read = () -> IOUtils.readFully(is, len);
+ if (verifyChecksum) {
+ assertThrows(IOException.class, read);
+ } else {
+ assertDoesNotThrow(read);
+ }
+ }
} finally {
client.close();
}
- if (verifyChecksum) {
- fail("Reading corrupted data should fail, as verify checksum is " +
- "enabled");
- }
- } catch (IOException e) {
- if (!verifyChecksum) {
- fail("Reading corrupted data should not fail, as verify checksum is " +
- "disabled");
- }
+ } catch (IOException ignored) {
+ // ignore
}
}
@@ -2306,7 +2309,7 @@ public void testGetKeyDetails() throws IOException {
private void assertInputStreamContent(String expected, InputStream is)
throws IOException {
byte[] fileContent = new byte[expected.getBytes(UTF_8).length];
- is.read(fileContent);
+ IOUtils.readFully(is, fileContent);
assertEquals(expected, new String(fileContent, UTF_8));
}
@@ -2354,7 +2357,7 @@ public void testReadKeyWithCorruptedData() throws IOException {
// throw a checksum mismatch exception.
IOException ioException = assertThrows(IOException.class, () -> {
try (OzoneInputStream is = bucket.readKey(keyName)) {
- is.read(new byte[100]);
+ IOUtils.readFully(is, new byte[100]);
}
});
@@ -2445,8 +2448,7 @@ void testZReadKeyWithUnhealthyContainerReplica() throws Exception {
// Try reading keyName2
GenericTestUtils.setLogLevel(XceiverClientGrpc.class, DEBUG);
try (OzoneInputStream is = bucket.readKey(keyName2)) {
- byte[] content = new byte[100];
- is.read(content);
+ byte[] content = IOUtils.readFully(is, Math.toIntExact(bucket.getKey(keyName2).getDataSize()));
String retValue = new String(content, UTF_8);
assertEquals(value, retValue.trim());
}
@@ -2502,7 +2504,7 @@ void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException {
// failover to next replica
try (OzoneInputStream is = bucket.readKey(keyName)) {
byte[] b = new byte[data.length];
- is.read(b);
+ IOUtils.readFully(is, b);
assertArrayEquals(b, data);
}
corruptData(cluster, containerList.get(1), key);
@@ -2510,7 +2512,7 @@ void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException {
// failover to next replica
try (OzoneInputStream is = bucket.readKey(keyName)) {
byte[] b = new byte[data.length];
- is.read(b);
+ IOUtils.readFully(is, b);
assertArrayEquals(b, data);
}
corruptData(cluster, containerList.get(2), key);
@@ -2519,7 +2521,7 @@ void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException {
IOException ioException = assertThrows(IOException.class, () -> {
try (OzoneInputStream is = bucket.readKey(keyName)) {
byte[] b = new byte[data.length];
- is.read(b);
+ IOUtils.readFully(is, b);
}
});
assertThat(ioException).hasMessageContaining("Checksum mismatch");
@@ -3671,7 +3673,7 @@ void testCommitPartAfterCompleteUpload() throws Exception {
byte[] fileContent = new byte[data.length];
try (OzoneInputStream inputStream = bucket.readKey(keyName)) {
- inputStream.read(fileContent);
+ IOUtils.readFully(inputStream, fileContent);
}
StringBuilder sb = new StringBuilder(data.length);
@@ -4247,7 +4249,7 @@ private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val,
byte[] fileContent = new byte[data.length + data.length + part3.getBytes(
UTF_8).length];
try (OzoneInputStream inputStream = bucket.readKey(keyName)) {
- inputStream.read(fileContent);
+ IOUtils.readFully(inputStream, fileContent);
}
verifyReplication(bucket.getVolumeName(), bucket.getName(), keyName,
@@ -4454,7 +4456,7 @@ public void testKeyReadWriteForGDPR() throws Exception {
try (OzoneInputStream is = bucket.readKey(keyName)) {
byte[] fileContent = new byte[text.getBytes(UTF_8).length];
- is.read(fileContent);
+ IOUtils.readFully(is, fileContent);
//Step 6
assertNotEquals(text, new String(fileContent, UTF_8));
@@ -4852,14 +4854,14 @@ void testGetKeyAndFileWithNetworkTopology() throws IOException {
// read key with topology aware read enabled
try (OzoneInputStream is = bucket.readKey(keyName)) {
byte[] b = new byte[value.getBytes(UTF_8).length];
- is.read(b);
+ IOUtils.readFully(is, b);
assertArrayEquals(b, value.getBytes(UTF_8));
}
// read file with topology aware read enabled
try (OzoneInputStream is = bucket.readKey(keyName)) {
byte[] b = new byte[value.getBytes(UTF_8).length];
- is.read(b);
+ IOUtils.readFully(is, b);
assertArrayEquals(b, value.getBytes(UTF_8));
}
@@ -4873,14 +4875,14 @@ void testGetKeyAndFileWithNetworkTopology() throws IOException {
newStore.getVolume(volumeName).getBucket(bucketName);
try (OzoneInputStream is = newBucket.readKey(keyName)) {
byte[] b = new byte[value.getBytes(UTF_8).length];
- is.read(b);
+ IOUtils.readFully(is, b);
assertArrayEquals(b, value.getBytes(UTF_8));
}
// read file with topology aware read disabled
try (OzoneInputStream is = newBucket.readFile(keyName)) {
byte[] b = new byte[value.getBytes(UTF_8).length];
- is.read(b);
+ IOUtils.readFully(is, b);
assertArrayEquals(b, value.getBytes(UTF_8));
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
index 26f596c44335..df661d4f3478 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
@@ -29,11 +29,11 @@
import java.util.Arrays;
import java.util.List;
import java.util.UUID;
+import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
-import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneBucket;
@@ -298,7 +298,7 @@ public void testMultiBlockWrites3() throws Exception {
OzoneBucket bucket = volume.getBucket(bucketName);
byte[] readData = new byte[keyLen];
try (OzoneInputStream inputStream = bucket.readKey(keyName)) {
- inputStream.read(readData);
+ IOUtils.readFully(inputStream, readData);
}
assertArrayEquals(writtenData, readData);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
index 4ffc1d4f6529..91f4d4ba7ce1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
@@ -199,11 +199,11 @@ public void testRatisSnapshotRetention() throws Exception {
RatisServerConfiguration ratisServerConfiguration =
conf.getObject(RatisServerConfiguration.class);
- stateMachine =
- (ContainerStateMachine) TestHelper.getStateMachine(cluster);
- storage = (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
- Path parentPath = getSnapshotPath(storage);
- int numSnapshots = parentPath.getParent().toFile().listFiles().length;
+ Path parentPath = getSnapshotPath(storage).getParent();
+ assertThat(parentPath).isNotNull();
+ File[] files = parentPath.toFile().listFiles();
+ assertThat(files).isNotNull();
+ int numSnapshots = files.length;
assertThat(Math.abs(ratisServerConfiguration.getNumSnapshotsRetained() - numSnapshots))
.isLessThanOrEqualTo(1);
@@ -220,11 +220,9 @@ public void testRatisSnapshotRetention() throws Exception {
key.write(("ratis" + i).getBytes(UTF_8));
key.close();
}
- stateMachine =
- (ContainerStateMachine) TestHelper.getStateMachine(cluster);
- storage = (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
- parentPath = getSnapshotPath(storage);
- numSnapshots = parentPath.getParent().toFile().listFiles().length;
+ files = parentPath.toFile().listFiles();
+ assertThat(files).isNotNull();
+ numSnapshots = files.length;
assertThat(Math.abs(ratisServerConfiguration.getNumSnapshotsRetained() - numSnapshots))
.isLessThanOrEqualTo(1);
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
index 2ad7391edf81..94114825fa61 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
@@ -52,7 +52,7 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.ArrayUtils;
+import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.client.ReplicationConfig;
@@ -72,7 +72,6 @@
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.utils.HddsServerUtil;
-import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -726,7 +725,7 @@ void testWriteStateMachineDataIdempotencyWithClosedContainer()
Thread closeContainerThread = new Thread(r1);
closeContainerThread.start();
threadList.add(closeContainerThread);
- latch.await(600, TimeUnit.SECONDS);
+ assertTrue(latch.await(600, TimeUnit.SECONDS));
for (int i = 0; i < 101; i++) {
threadList.get(i).join();
}
@@ -858,13 +857,12 @@ private void validateData(String key, int locationCount, String payload) throws
assertEquals(locationCount,
keyInfo.getLatestVersionLocations().getLocationListCount());
- byte[] buffer = new byte[1024];
+ byte[] buffer = new byte[Math.toIntExact(keyInfo.getDataSize())];
try (OzoneInputStream o = objectStore.getVolume(volumeName)
.getBucket(bucketName).readKey(key)) {
- o.read(buffer, 0, 1024);
+ IOUtils.readFully(o, buffer);
}
- int end = ArrayUtils.indexOf(buffer, (byte) 0);
- String response = new String(buffer, 0, end, StandardCharsets.UTF_8);
+ String response = new String(buffer, StandardCharsets.UTF_8);
assertEquals(payload, response);
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java
index 08e8ddba47c8..db7fcc5e3d43 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java
@@ -26,8 +26,10 @@
import static org.junit.jupiter.api.Assertions.assertSame;
import java.io.IOException;
+import java.io.InputStream;
import java.util.List;
import java.util.UUID;
+import org.apache.commons.io.IOUtils;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
@@ -36,7 +38,6 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.ObjectStore;
@@ -46,7 +47,6 @@
import org.apache.hadoop.ozone.client.OzoneKey;
import org.apache.hadoop.ozone.client.OzoneKeyDetails;
import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
@@ -156,14 +156,13 @@ public void testHybridPipelineOnDatanode() throws IOException {
byte[] b1 = new byte[data.length];
byte[] b2 = new byte[data.length];
// now try to read both the keys
- OzoneInputStream is = bucket.readKey(keyName1);
- is.read(b1);
- is.close();
+ try (InputStream is = bucket.readKey(keyName1)) {
+ IOUtils.readFully(is, b1);
+ }
- // now try to read both the keys
- is = bucket.readKey(keyName2);
- is.read(b2);
- is.close();
+ try (InputStream is = bucket.readKey(keyName2)) {
+ IOUtils.readFully(is, b2);
+ }
assertArrayEquals(b1, data);
assertArrayEquals(b1, b2);
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
index 8e6a74f838b2..7fe57f8477ba 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
@@ -48,6 +48,7 @@
import java.util.stream.Collectors;
import javax.xml.bind.DatatypeConverter;
import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
@@ -56,7 +57,6 @@
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.OzoneTestUtils;
@@ -475,7 +475,7 @@ public void testCommitPartAfterCompleteUpload() throws Exception {
byte[] fileContent = new byte[data.length];
try (OzoneInputStream inputStream = bucket.readKey(keyName)) {
- inputStream.read(fileContent);
+ IOUtils.readFully(inputStream, fileContent);
}
StringBuilder sb = new StringBuilder(data.length);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
index 6645687e8d1c..2753d833419e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
@@ -39,6 +39,7 @@
import java.util.HashMap;
import java.util.UUID;
import java.util.concurrent.ThreadLocalRandom;
+import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@@ -170,7 +171,7 @@ private void testPutKeySuccessWithBlockTokenWithBucketLayout(
byte[] fileContent;
try (OzoneInputStream is = bucket.readKey(keyName)) {
fileContent = new byte[value.getBytes(UTF_8).length];
- is.read(fileContent);
+ IOUtils.readFully(is, fileContent);
}
String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestChunkInputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestChunkInputStream.java
index 68a5504b1a0e..972eae13d7cb 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestChunkInputStream.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestChunkInputStream.java
@@ -24,6 +24,7 @@
import java.io.IOException;
import java.nio.ByteBuffer;
+import org.apache.commons.io.IOUtils;
import org.apache.hadoop.hdds.scm.storage.BlockInputStream;
import org.apache.hadoop.hdds.scm.storage.ChunkInputStream;
import org.apache.hadoop.ozone.client.OzoneClient;
@@ -76,7 +77,7 @@ private void testChunkReadBuffers(TestBucket bucket) throws Exception {
// To read 1 byte of chunk data, ChunkInputStream should get one full
// checksum boundary worth of data from Container and store it in buffers.
- chunk0Stream.read(new byte[1]);
+ IOUtils.readFully(chunk0Stream, new byte[1]);
checkBufferSizeAndCapacity(chunk0Stream.getCachedBuffers(), 1, 0,
BYTES_PER_CHECKSUM);
@@ -117,7 +118,7 @@ private void testChunkReadBuffers(TestBucket bucket) throws Exception {
expectedNumBuffers, expectedNumBuffers - 1, BYTES_PER_CHECKSUM);
// Read the last byte of chunk and verify that the buffers are released.
- chunk0Stream.read(new byte[1]);
+ IOUtils.readFully(chunk0Stream, new byte[1]);
assertNull(chunk0Stream.getCachedBuffers(),
"ChunkInputStream did not release buffers after reaching EOF.");
}
@@ -208,14 +209,14 @@ private byte[] readDataFromChunk(ChunkInputStream chunkInputStream,
int offset, int readDataLength) throws IOException {
byte[] readData = new byte[readDataLength];
chunkInputStream.seek(offset);
- chunkInputStream.read(readData, 0, readDataLength);
+ IOUtils.readFully(chunkInputStream, readData);
return readData;
}
private byte[] readDataFromChunk(ChunkInputStream chunkInputStream,
int readDataLength) throws IOException {
byte[] readData = new byte[readDataLength];
- chunkInputStream.read(readData, 0, readDataLength);
+ IOUtils.readFully(chunkInputStream, readData);
return readData;
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java
index 6daa0235a6c3..7fd87d47cf7d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java
@@ -30,6 +30,7 @@
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
+import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
@@ -111,7 +112,7 @@ private void validate(TestBucket bucket, KeyInputStream keyInputStream,
keyInputStream.seek(seek);
byte[] readData = new byte[readLength];
- keyInputStream.read(readData, 0, readLength);
+ IOUtils.readFully(keyInputStream, readData);
bucket.validateData(inputData, (int) seek, readData);
}
@@ -267,7 +268,7 @@ public void testSeek(TestBucket bucket) throws Exception {
.getContainerOpCountMetrics(ContainerProtos.Type.ReadChunk));
byte[] readData = new byte[CHUNK_SIZE];
- keyInputStream.read(readData, 0, CHUNK_SIZE);
+ IOUtils.readFully(keyInputStream, readData);
// Since we read data from index 150 to 250 and the chunk boundary is
// 100 bytes, we need to read 2 chunks.
@@ -360,7 +361,7 @@ private void testSkip(TestBucket bucket) throws Exception {
.getContainerOpCountMetrics(ContainerProtos.Type.ReadChunk));
byte[] readData = new byte[CHUNK_SIZE];
- keyInputStream.read(readData, 0, CHUNK_SIZE);
+ IOUtils.readFully(keyInputStream, readData);
// Since we reading data from index 150 to 250 and the chunk boundary is
// 100 bytes, we need to read 2 chunks.
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
index 635993cc8732..c976c59a8a61 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
@@ -50,6 +50,7 @@
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import java.util.stream.Collectors;
+import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
@@ -364,7 +365,7 @@ public void testECContainerReplication() throws Exception {
//Reading through file and comparing with input data.
byte[] readData = new byte[size];
try (OzoneInputStream inputStream = createInputStream(client)) {
- inputStream.read(readData);
+ IOUtils.readFully(inputStream, readData);
Assertions.assertArrayEquals(readData, originalData);
}
Assertions.assertEquals(0, failedReadChunkCountMap.size());
@@ -373,7 +374,7 @@ public void testECContainerReplication() throws Exception {
int firstReadLen = 1024 * 3;
Arrays.fill(readData, (byte) 0);
//Reading first stripe.
- inputStream.read(readData, 0, firstReadLen);
+ IOUtils.readFully(inputStream, readData, 0, firstReadLen);
Assertions.assertEquals(0, failedReadChunkCountMap.size());
//Checking the initial state as per the latest location.
assertState(cluster, ImmutableMap.of(1, replicaIndexMap.get(1), 2, replicaIndexMap.get(2),
@@ -402,7 +403,7 @@ public void testECContainerReplication() throws Exception {
assertState(cluster, ImmutableMap.of(1, replicaIndexMap.get(3), 2, replicaIndexMap.get(2),
3, replicaIndexMap.get(1), 4, replicaIndexMap.get(4), 5, replicaIndexMap.get(5)));
// Reading the Stripe 2 from the pre initialized inputStream
- inputStream.read(readData, firstReadLen, size - firstReadLen);
+ IOUtils.readFully(inputStream, readData, firstReadLen, size - firstReadLen);
// Asserting there was a failure in the first read chunk.
Assertions.assertEquals(ImmutableMap.of(1, 1, 3, 1), failedReadChunkCountMap);
Assertions.assertArrayEquals(readData, originalData);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
index c454facea070..1f9b470d6379 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
@@ -35,6 +35,7 @@
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeoutException;
+import org.apache.commons.io.IOUtils;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -186,7 +187,7 @@ public static void validateData(String keyName, byte[] data,
objectStore.getVolume(volumeName).getBucket(bucketName)
.readKey(keyName)) {
byte[] readData = new byte[data.length];
- is.read(readData);
+ IOUtils.readFully(is, readData);
MessageDigest sha1 = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
sha1.update(data);
MessageDigest sha2 = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
index 0a8f8092f536..ce6ce28f34c2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
@@ -215,7 +215,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue()
File lingeringBlock =
new File(containerInternalObj.
getContainerData().getChunksPath() + "/1.block");
- lingeringBlock.createNewFile();
+ FileUtils.touch(lingeringBlock);
// Check container exists before sending delete container command
assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId()));
@@ -333,7 +333,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckFalse()
File lingeringBlock =
new File(containerInternalObj.
getContainerData().getChunksPath() + "/1.block");
- lingeringBlock.createNewFile();
+ FileUtils.touch(lingeringBlock);
// Check container exists before sending delete container command
assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId()));
@@ -401,7 +401,7 @@ public void testDeleteNonEmptyContainerBlockTable()
File lingeringBlock =
new File(containerInternalObj.
getContainerData().getChunksPath() + "/1.block");
- lingeringBlock.createNewFile();
+ FileUtils.touch(lingeringBlock);
ContainerMetrics metrics =
hddsDatanodeService
.getDatanodeStateMachine().getContainer().getMetrics();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/DatanodeTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/DatanodeTestUtils.java
index 7c72627a7c14..90de2d6a00fa 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/DatanodeTestUtils.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/DatanodeTestUtils.java
@@ -17,6 +17,8 @@
package org.apache.hadoop.ozone.dn;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
@@ -156,7 +158,7 @@ public static void restoreDataFileFromFailure(File... files)
public static void injectContainerMetaDirFailure(File... dirs) {
for (File dir : dirs) {
if (dir.exists()) {
- dir.setWritable(false, false);
+ assertTrue(dir.setWritable(false, false));
}
}
}
@@ -169,7 +171,7 @@ public static void injectContainerMetaDirFailure(File... dirs) {
public static void restoreContainerMetaDirFromFailure(File... dirs) {
for (File dir : dirs) {
if (dir.exists()) {
- dir.setWritable(true, true);
+ assertTrue(dir.setWritable(true, true));
}
}
}
@@ -182,7 +184,7 @@ public static void restoreContainerMetaDirFromFailure(File... dirs) {
*/
public static void simulateBadRootDir(File rootDir) {
if (rootDir.exists()) {
- rootDir.setWritable(false);
+ assertTrue(rootDir.setWritable(false));
}
}
@@ -203,7 +205,7 @@ public static void simulateBadVolume(StorageVolume vol) {
*/
public static void restoreBadRootDir(File rootDir) {
if (rootDir.exists()) {
- rootDir.setWritable(true);
+ assertTrue(rootDir.setWritable(true));
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
index d52adf41a2d9..7cbec0e8f059 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
@@ -1664,13 +1664,12 @@ private void verifyFileStatus(String directory,
for (OzoneFileStatus fileStatus : fileStatuses) {
String normalizedKeyName = fileStatus.getTrimmedName();
- String parent =
- Paths.get(fileStatus.getKeyInfo().getKeyName()).getParent()
- .toString();
if (!recursive) {
+ Path parent = Paths.get(fileStatus.getKeyInfo().getKeyName()).getParent();
// if recursive is false, verify all the statuses have the input
// directory as parent
- assertEquals(parent, directory);
+ assertNotNull(parent);
+ assertEquals(directory, parent.toString());
}
// verify filestatus is present in directory or file set accordingly
if (fileStatus.isDirectory()) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java
index 7956222bbe4c..c8ffbd2602bf 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java
@@ -23,6 +23,7 @@
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.params.provider.Arguments.of;
+import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
@@ -32,15 +33,14 @@
import java.util.List;
import java.util.Optional;
import java.util.stream.Stream;
+import org.apache.commons.io.IOUtils;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.client.OzoneKey;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.ozone.test.NonHATests;
import org.junit.jupiter.api.AfterAll;
@@ -379,10 +379,10 @@ private static void createAndAssertKeys(OzoneBucket ozoneBucket, List ke
private static void readkey(OzoneBucket ozoneBucket, String key, int length, byte[] input)
throws Exception {
- OzoneInputStream ozoneInputStream = ozoneBucket.readKey(key);
byte[] read = new byte[length];
- ozoneInputStream.read(read, 0, length);
- ozoneInputStream.close();
+ try (InputStream in = ozoneBucket.readKey(key)) {
+ IOUtils.readFully(in, read);
+ }
assertEquals(new String(input, StandardCharsets.UTF_8), new String(read, StandardCharsets.UTF_8));
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java
index 73b064720e9b..e09249319d39 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java
@@ -21,17 +21,18 @@
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Optional;
+import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.BucketArgs;
import org.apache.hadoop.ozone.client.OzoneBucket;
@@ -39,7 +40,6 @@
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.client.OzoneKey;
import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.ozone.test.NonHATests;
import org.junit.jupiter.api.AfterAll;
@@ -650,10 +650,10 @@ private static void createAndAssertKeys(OzoneBucket ozoneBucket, List ke
private static void readkey(OzoneBucket ozoneBucket, String key, int length, byte[] input)
throws Exception {
- OzoneInputStream ozoneInputStream = ozoneBucket.readKey(key);
byte[] read = new byte[length];
- ozoneInputStream.read(read, 0, length);
- ozoneInputStream.close();
+ try (InputStream ozoneInputStream = ozoneBucket.readKey(key)) {
+ IOUtils.readFully(ozoneInputStream, read);
+ }
assertEquals(new String(input, StandardCharsets.UTF_8), new String(read, StandardCharsets.UTF_8));
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
index ca8ede0a6a61..084dbded41de 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
@@ -694,7 +694,7 @@ private void prepSnapshotData() throws Exception {
Path fabricatedSnapshot = Paths.get(
new File(snapshotDirName).getParent(),
"fabricatedSnapshot");
- fabricatedSnapshot.toFile().mkdirs();
+ assertTrue(fabricatedSnapshot.toFile().mkdirs());
assertTrue(Paths.get(fabricatedSnapshot.toString(),
FABRICATED_FILE_NAME).toFile().createNewFile());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
index ee59372d80b8..20def61f70f9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.om;
import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
+import static org.apache.hadoop.ozone.TestDataUtil.readFully;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_KEY;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.om.OmSnapshotManager.OM_HARDLINK_FILE;
@@ -40,7 +41,6 @@
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
-import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
@@ -51,6 +51,8 @@
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.ExitManager;
@@ -59,7 +61,6 @@
import org.apache.hadoop.hdds.utils.DBCheckpointMetrics;
import org.apache.hadoop.hdds.utils.FaultInjector;
import org.apache.hadoop.hdds.utils.HAUtils;
-import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.utils.TransactionInfo;
import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
import org.apache.hadoop.hdds.utils.db.RDBCheckpointUtils;
@@ -74,7 +75,6 @@
import org.apache.hadoop.ozone.client.OzoneKeyDetails;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.VolumeArgs;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -352,7 +352,9 @@ private void checkSnapshot(OzoneManager leaderOM, OzoneManager followerOM,
int hardLinkCount = 0;
try (Stream list = Files.list(leaderSnapshotDir)) {
for (Path leaderSnapshotSST: list.collect(Collectors.toList())) {
- String fileName = leaderSnapshotSST.getFileName().toString();
+ Path path = leaderSnapshotSST.getFileName();
+ assertNotNull(path);
+ String fileName = path.toString();
if (fileName.toLowerCase().endsWith(".sst")) {
Path leaderActiveSST =
@@ -1008,11 +1010,12 @@ public void testInstallCorruptedCheckpointFailure() throws Exception {
// Corrupt the leader checkpoint and install that on the OM. The
// operation should fail and OM should shutdown.
boolean delete = true;
- for (File file : leaderCheckpointLocation.toFile()
- .listFiles()) {
+ File[] files = leaderCheckpointLocation.toFile().listFiles();
+ assertNotNull(files);
+ for (File file : files) {
if (file.getName().contains(".sst")) {
if (delete) {
- file.delete();
+ FileUtils.deleteQuietly(file);
delete = false;
} else {
delete = true;
@@ -1090,10 +1093,7 @@ private void getKeys(List keys, int round) throws IOException {
private void readKeys(List keys) throws IOException {
for (String keyName : keys) {
- OzoneInputStream inputStream = ozoneBucket.readKey(keyName);
- byte[] data = new byte[100];
- inputStream.read(data, 0, 100);
- inputStream.close();
+ readFully(ozoneBucket, keyName);
}
}
@@ -1110,7 +1110,9 @@ private void unTarLatestTarBall(OzoneManager followerOm, Path tempDir)
throws IOException {
File snapshotDir = followerOm.getOmSnapshotProvider().getSnapshotDir();
// Find the latest tarball.
- String tarBall = Arrays.stream(Objects.requireNonNull(snapshotDir.list())).
+ String[] list = snapshotDir.list();
+ assertNotNull(list);
+ String tarBall = Arrays.stream(list).
filter(s -> s.toLowerCase().endsWith(".tar")).
reduce("", (s1, s2) -> s1.compareToIgnoreCase(s2) > 0 ? s1 : s2);
FileUtil.unTar(new File(snapshotDir, tarBall), tempDir.toFile());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java
index eaaedda5cffd..a26634a69276 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java
@@ -33,6 +33,7 @@
import static org.junit.jupiter.api.Assertions.fail;
import java.io.IOException;
+import java.io.InputStream;
import java.net.URI;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
@@ -42,9 +43,9 @@
import java.util.List;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
+import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -52,7 +53,6 @@
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OmUtils;
@@ -66,7 +66,6 @@
import org.apache.hadoop.ozone.client.OzoneKeyDetails;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.io.KeyOutputStream;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
@@ -568,10 +567,10 @@ private void createAndAssertKeys(OzoneBucket ozoneBucket, List keys)
private void readKey(OzoneBucket ozoneBucket, String key, int length, byte[] input)
throws Exception {
- OzoneInputStream ozoneInputStream = ozoneBucket.readKey(key);
byte[] read = new byte[length];
- ozoneInputStream.read(read, 0, length);
- ozoneInputStream.close();
+ try (InputStream ozoneInputStream = ozoneBucket.readKey(key)) {
+ IOUtils.readFully(ozoneInputStream, read);
+ }
String inputString = new String(input, StandardCharsets.UTF_8);
assertEquals(inputString, new String(read, StandardCharsets.UTF_8));
@@ -581,10 +580,9 @@ private void readKey(OzoneBucket ozoneBucket, String key, int length, byte[] inp
bucketName, volumeName, StandardCharsets.UTF_8);
OzoneFileSystem o3fs = (OzoneFileSystem) FileSystem.get(new URI(rootPath),
conf);
- FSDataInputStream fsDataInputStream = o3fs.open(new Path(key));
- read = new byte[length];
- fsDataInputStream.read(read, 0, length);
- fsDataInputStream.close();
+ try (InputStream fsDataInputStream = o3fs.open(new Path(key))) {
+ IOUtils.readFully(fsDataInputStream, read);
+ }
assertEquals(inputString, new String(read, StandardCharsets.UTF_8));
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java
index ae3f3c239400..043b9f90d108 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java
@@ -24,6 +24,7 @@
import static org.apache.hadoop.ozone.audit.AuditLogTestUtils.verifyAuditLog;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertThrows;
import java.io.IOException;
@@ -58,10 +59,7 @@
@Timeout(300)
public class TestOmAcls {
- private static boolean volumeAclAllow = true;
- private static boolean bucketAclAllow = true;
- private static boolean keyAclAllow = true;
- private static boolean prefixAclAllow = true;
+ private static OzoneAccessAuthorizerTest authorizer;
private static MiniOzoneCluster cluster = null;
private static OzoneClient client;
private static LogCapturer logCapturer;
@@ -87,6 +85,7 @@ public static void init() throws Exception {
cluster.waitForClusterToBeReady();
client = cluster.newClient();
logCapturer = LogCapturer.captureLogs(OzoneManager.class);
+ authorizer = assertInstanceOf(OzoneAccessAuthorizerTest.class, cluster.getOzoneManager().getAccessAuthorizer());
}
@AfterAll
@@ -103,15 +102,15 @@ public void setup() throws IOException {
logCapturer.clearOutput();
AuditLogTestUtils.truncateAuditLogFile();
- TestOmAcls.volumeAclAllow = true;
- TestOmAcls.bucketAclAllow = true;
- TestOmAcls.keyAclAllow = true;
- TestOmAcls.prefixAclAllow = true;
+ authorizer.volumeAclAllow = true;
+ authorizer.bucketAclAllow = true;
+ authorizer.keyAclAllow = true;
+ authorizer.prefixAclAllow = true;
}
@Test
public void testCreateVolumePermissionDenied() throws Exception {
- TestOmAcls.volumeAclAllow = false;
+ authorizer.volumeAclAllow = false;
OMException exception = assertThrows(OMException.class,
() -> TestDataUtil.createVolumeAndBucket(client));
@@ -125,7 +124,7 @@ public void testCreateVolumePermissionDenied() throws Exception {
@Test
public void testReadVolumePermissionDenied() throws Exception {
OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client);
- TestOmAcls.volumeAclAllow = false;
+ authorizer.volumeAclAllow = false;
ObjectStore objectStore = client.getObjectStore();
OMException exception = assertThrows(OMException.class, () ->
objectStore.getVolume(bucket.getVolumeName()));
@@ -138,7 +137,7 @@ public void testReadVolumePermissionDenied() throws Exception {
@Test
public void testCreateBucketPermissionDenied() throws Exception {
- TestOmAcls.bucketAclAllow = false;
+ authorizer.bucketAclAllow = false;
OMException exception = assertThrows(OMException.class,
() -> TestDataUtil.createVolumeAndBucket(client));
@@ -152,7 +151,7 @@ public void testCreateBucketPermissionDenied() throws Exception {
@Test
public void testReadBucketPermissionDenied() throws Exception {
OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client);
- TestOmAcls.bucketAclAllow = false;
+ authorizer.bucketAclAllow = false;
ObjectStore objectStore = client.getObjectStore();
OMException exception = assertThrows(OMException.class,
() -> objectStore.getVolume(
@@ -167,7 +166,7 @@ public void testReadBucketPermissionDenied() throws Exception {
@Test
public void testCreateKeyPermissionDenied() throws Exception {
- TestOmAcls.keyAclAllow = false;
+ authorizer.keyAclAllow = false;
OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client);
@@ -183,7 +182,7 @@ public void testReadKeyPermissionDenied() throws Exception {
OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client);
TestDataUtil.createKey(bucket, "testKey", "testcontent".getBytes(StandardCharsets.UTF_8));
- TestOmAcls.keyAclAllow = false;
+ authorizer.keyAclAllow = false;
OMException exception = assertThrows(OMException.class,
() -> TestDataUtil.getKey(bucket, "testKey"));
@@ -197,7 +196,7 @@ public void testReadKeyPermissionDenied() throws Exception {
public void testSetACLPermissionDenied() throws Exception {
OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client);
- TestOmAcls.bucketAclAllow = false;
+ authorizer.bucketAclAllow = false;
OMException exception = assertThrows(OMException.class,
() -> bucket.setAcl(new ArrayList<>()));
@@ -212,17 +211,22 @@ public void testSetACLPermissionDenied() throws Exception {
*/
static class OzoneAccessAuthorizerTest implements IAccessAuthorizer {
+ private boolean volumeAclAllow = true;
+ private boolean bucketAclAllow = true;
+ private boolean keyAclAllow = true;
+ private boolean prefixAclAllow = true;
+
@Override
public boolean checkAccess(IOzoneObj ozoneObject, RequestContext context) {
switch (((OzoneObjInfo) ozoneObject).getResourceType()) {
case VOLUME:
- return TestOmAcls.volumeAclAllow;
+ return volumeAclAllow;
case BUCKET:
- return TestOmAcls.bucketAclAllow;
+ return bucketAclAllow;
case KEY:
- return TestOmAcls.keyAclAllow;
+ return keyAclAllow;
case PREFIX:
- return TestOmAcls.prefixAclAllow;
+ return prefixAclAllow;
default:
return false;
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
index cdcd38c6648b..7dec6a385268 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
@@ -32,16 +32,17 @@
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.anyInt;
-import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.when;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -199,7 +200,7 @@ public void testVolumeOps() throws Exception {
"volumeManager", mockVm);
// inject exception to test for Failure Metrics on the write path
- OMMetadataManager metadataManager = mockWritePathExceptions(OmVolumeArgs.class);
+ OMMetadataManager metadataManager = mockWritePathExceptions(TestOmMetrics::mockVolumeTable);
volumeArgs = createVolumeArgs();
doVolumeOps(volumeArgs);
@@ -291,7 +292,7 @@ public void testBucketOps() throws Exception {
ozoneManager, "bucketManager", mockBm);
// inject exception to test for Failure Metrics on the write path
- OMMetadataManager metadataManager = mockWritePathExceptions(OmBucketInfo.class);
+ OMMetadataManager metadataManager = mockWritePathExceptions(TestOmMetrics::mockBucketTable);
doBucketOps(bucketInfo);
ecBucketInfo = createBucketInfo(true);
@@ -425,7 +426,7 @@ public void testKeyOps() throws Exception {
omMetadataReader, "keyManager", mockKm);
// inject exception to test for Failure Metrics on the write path
- OMMetadataManager metadataManager = mockWritePathExceptions(OmBucketInfo.class);
+ OMMetadataManager metadataManager = mockWritePathExceptions(TestOmMetrics::mockBucketTable);
keyArgs = createKeyArgs(volumeName, bucketName,
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE));
doKeyOps(keyArgs);
@@ -646,33 +647,34 @@ public void testSnapshotOps() throws Exception {
assertEquals(initialNumSnapshotListFails + 1, getLongCounter("NumSnapshotListFails", omMetrics));
}
- private OMMetadataManager mockWritePathExceptions(Classklass) throws Exception {
- String tableName;
- if (klass == OmBucketInfo.class) {
- tableName = "bucketTable";
- } else {
- tableName = "volumeTable";
- }
- OMMetadataManager metadataManager = (OMMetadataManager)
- HddsWhiteboxTestUtils.getInternalState(ozoneManager, "metadataManager");
- OMMetadataManager mockMm = spy(metadataManager);
- @SuppressWarnings("unchecked")
- Table table = (Table)
- HddsWhiteboxTestUtils.getInternalState(metadataManager, tableName);
- Table mockTable = spy(table);
- doThrow(exception).when(mockTable).isExist(any());
- if (klass == OmBucketInfo.class) {
- doReturn(mockTable).when(mockMm).getBucketTable();
- } else {
- doReturn(mockTable).when(mockMm).getVolumeTable();
- }
+ private OMMetadataManager mockWritePathExceptions(
+ Function> getTable
+ ) throws Exception {
+ OMMetadataManager metadataManager = ozoneManager.getMetadataManager();
+ OMMetadataManager spy = spy(metadataManager);
+ Table table = getTable.apply(spy);
+ doThrow(exception).when(table).isExist(any());
HddsWhiteboxTestUtils.setInternalState(
- ozoneManager, "metadataManager", mockMm);
+ ozoneManager, "metadataManager", spy);
// Return the original metadataManager so it can be restored later
return metadataManager;
}
+ private static Table mockVolumeTable(OMMetadataManager spy) {
+ Table table = spy(spy.getVolumeTable());
+ when(spy.getVolumeTable())
+ .thenReturn(table);
+ return table;
+ }
+
+ private static Table mockBucketTable(OMMetadataManager spy) {
+ Table table = spy(spy.getBucketTable());
+ when(spy.getBucketTable())
+ .thenReturn(table);
+ return table;
+ }
+
@Test
public void testAclOperations() throws Exception {
// get initial values for metrics
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java
index f6c819e50521..5933c4d7446e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java
@@ -42,11 +42,11 @@
import java.util.Iterator;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
+import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
@@ -378,7 +378,7 @@ protected void testCreateFile(OzoneBucket ozoneBucket, String keyName,
try (OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName)) {
byte[] fileContent = new byte[data.getBytes(UTF_8).length];
- ozoneInputStream.read(fileContent);
+ IOUtils.readFully(ozoneInputStream, fileContent);
assertEquals(data, new String(fileContent, UTF_8));
}
@@ -430,7 +430,7 @@ protected void createKeyTest(boolean checkSuccess) throws Exception {
try (OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName)) {
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
- ozoneInputStream.read(fileContent);
+ IOUtils.readFully(ozoneInputStream, fileContent);
assertEquals(value, new String(fileContent, UTF_8));
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java
index 50a17cc57270..0f2c43a86d85 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java
@@ -40,6 +40,7 @@
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
@@ -220,7 +221,7 @@ private void createMultipartKeyAndReadKey(OzoneBucket ozoneBucket,
try (OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName)) {
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
- ozoneInputStream.read(fileContent);
+ IOUtils.readFully(ozoneInputStream, fileContent);
assertEquals(value, new String(fileContent, UTF_8));
}
}
@@ -272,11 +273,9 @@ void testOMRestart() throws Exception {
OzoneManager leaderOM = getCluster().getOzoneManager(leaderOMNodeId);
- // Get follower OMs
+ // Get follower OM
OzoneManager followerOM1 = getCluster().getOzoneManager(
leaderOM.getPeerNodes().get(0).getNodeId());
- OzoneManager followerOM2 = getCluster().getOzoneManager(
- leaderOM.getPeerNodes().get(1).getNodeId());
// Do some transactions so that the log index increases
String userName = "user" + RandomStringUtils.randomNumeric(5);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
index d8b05683ff4d..165d6dc8a548 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
@@ -79,6 +79,7 @@
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
+import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
@@ -90,7 +91,6 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils;
-import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.utils.db.DBProfile;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.RDBStore;
@@ -2307,7 +2307,7 @@ private void checkDayWeekMonthSnapshotData(OzoneBucket ozoneBucketClient,
try (OzoneInputStream ozoneInputStream =
ozoneBucketClient.readKey(keyName)) {
byte[] fileContent = new byte[keyName.length()];
- ozoneInputStream.read(fileContent);
+ IOUtils.readFully(ozoneInputStream, fileContent);
assertEquals(keyName, new String(fileContent, UTF_8));
}
}
@@ -2346,7 +2346,7 @@ private void validateSnapshotDataIntegrity(String snapshotPrefix,
if (snapKeyNameMatcher.matches()) {
String truncatedSnapshotKeyName = snapKeyNameMatcher.group(3);
byte[] fileContent = new byte[truncatedSnapshotKeyName.length()];
- ozoneInputStream.read(fileContent);
+ IOUtils.readFully(ozoneInputStream, fileContent);
assertEquals(truncatedSnapshotKeyName,
new String(fileContent, UTF_8));
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java
index 30444b9aac64..6ed4542a27bd 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java
@@ -37,6 +37,7 @@
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
+import java.io.InputStream;
import java.net.URI;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
@@ -52,6 +53,7 @@
import java.util.TreeSet;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
+import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataInputStream;
@@ -64,7 +66,6 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestDataUtil;
@@ -75,7 +76,6 @@
import org.apache.hadoop.ozone.client.OzoneKey;
import org.apache.hadoop.ozone.client.OzoneSnapshot;
import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.om.KeyManagerImpl;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OmSnapshotManager;
@@ -89,7 +89,7 @@
import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
@@ -163,7 +163,7 @@ private void init() throws Exception {
keyManager.stop();
}
- @BeforeEach
+ @BeforeAll
public void setupFsClient() throws IOException {
String rootPath = String.format("%s://%s.%s/",
OzoneConsts.OZONE_URI_SCHEME, bucketName, VOLUME_NAME);
@@ -178,6 +178,7 @@ public void setupFsClient() throws IOException {
@AfterAll
void tearDown() {
IOUtils.closeQuietly(client);
+ IOUtils.closeQuietly(fs);
if (cluster != null) {
cluster.shutdown();
}
@@ -210,9 +211,6 @@ public void deleteRootDir()
return false;
}
}, 1000, 120000);
-
- IOUtils.closeQuietly(fs);
- IOUtils.closeQuietly(o3fs);
}
@Test
@@ -364,10 +362,10 @@ private void createKey(OzoneBucket ozoneBucket, String key, int length)
private void readkey(OzoneBucket ozoneBucket, String key, int length, byte[] input)
throws Exception {
- OzoneInputStream ozoneInputStream = ozoneBucket.readKey(key);
byte[] read = new byte[length];
- ozoneInputStream.read(read, 0, length);
- ozoneInputStream.close();
+ try (InputStream ozoneInputStream = ozoneBucket.readKey(key)) {
+ IOUtils.readFully(ozoneInputStream, read);
+ }
String inputString = new String(input, StandardCharsets.UTF_8);
assertEquals(inputString, new String(read, StandardCharsets.UTF_8));
@@ -377,10 +375,9 @@ private void readkey(OzoneBucket ozoneBucket, String key, int length, byte[] inp
bucketName, VOLUME_NAME);
OzoneFileSystem o3fsNew = (OzoneFileSystem) FileSystem
.get(new URI(rootPath), conf);
- FSDataInputStream fsDataInputStream = o3fsNew.open(new Path(key));
- read = new byte[length];
- fsDataInputStream.read(read, 0, length);
- fsDataInputStream.close();
+ try (InputStream fsDataInputStream = o3fsNew.open(new Path(key))) {
+ IOUtils.readFully(fsDataInputStream, read);
+ }
assertEquals(inputString, new String(read, StandardCharsets.UTF_8));
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java
index e2bd219753ad..33f963b59a32 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java
@@ -23,6 +23,7 @@
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_PRUNE_DAEMON_RUN_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
+import static org.apache.hadoop.ozone.TestDataUtil.readFully;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath;
import static org.apache.hadoop.ozone.om.TestOzoneManagerHAWithStoppedNodes.createKey;
@@ -42,9 +43,9 @@
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
+import org.apache.commons.io.IOUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.StorageUnit;
-import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.utils.TransactionInfo;
import org.apache.hadoop.hdds.utils.db.RDBCheckpointUtils;
import org.apache.hadoop.hdds.utils.db.Table;
@@ -59,7 +60,6 @@
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.VolumeArgs;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OmFailoverProxyUtil;
import org.apache.hadoop.ozone.om.OmSnapshot;
@@ -658,10 +658,7 @@ private List writeKeys(long keyCount) throws IOException {
private void readKeys(List keys) throws IOException {
for (String keyName : keys) {
- OzoneInputStream inputStream = ozoneBucket.readKey(keyName);
- byte[] data = new byte[100];
- inputStream.read(data, 0, 100);
- inputStream.close();
+ readFully(ozoneBucket, keyName);
}
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
index d7fbce84f1ee..5ab9061f6f04 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
@@ -57,6 +57,7 @@
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
+import org.apache.commons.io.FileUtils;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
import org.apache.hadoop.crypto.key.kms.server.MiniKMS;
@@ -182,8 +183,7 @@ protected static void startCluster(OzoneConfiguration conf) throws Exception {
testFilePathString = path + OZONE_URI_DELIMITER + "testFile";
testFile = new File(testFilePathString);
- testFile.getParentFile().mkdirs();
- testFile.createNewFile();
+ FileUtils.touch(testFile);
// Init HA cluster
omServiceId = "om-service-test1";
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java
index 201881af972d..594f5cd47e25 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java
@@ -115,9 +115,7 @@ public class TestOzoneTenantShell {
@BeforeAll
public static void init() throws Exception {
// Remove audit log output if it exists
- if (AUDIT_LOG_FILE.exists()) {
- AUDIT_LOG_FILE.delete();
- }
+ FileUtils.deleteQuietly(AUDIT_LOG_FILE);
conf = new OzoneConfiguration();
conf.setBoolean(OZONE_OM_TENANT_DEV_SKIP_RANGER, true);
@@ -134,8 +132,7 @@ public static void init() throws Exception {
}
testFile = new File(path + OzoneConsts.OZONE_URI_DELIMITER + "testFile");
- testFile.getParentFile().mkdirs();
- testFile.createNewFile();
+ FileUtils.touch(testFile);
ozoneSh = new OzoneShell();
tenantShell = new TenantShell();
@@ -160,9 +157,7 @@ public static void shutdown() {
cluster.shutdown();
}
- if (AUDIT_LOG_FILE.exists()) {
- AUDIT_LOG_FILE.delete();
- }
+ FileUtils.deleteQuietly(AUDIT_LOG_FILE);
}
@BeforeEach