reverseOrder = new ArrayList<>(pipeline.getNodes());
- Collections.reverse(reverseOrder);
- return MockPipeline.createPipeline(reverseOrder);
- }
-
@ParameterizedTest
@MethodSource("exceptionsTriggersRefresh")
public void testRefreshOnReadFailureAfterUnbuffer(IOException ex)
diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java
index 3d2ff00d64f..9b061f5392d 100644
--- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java
+++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java
@@ -20,10 +20,10 @@
import java.io.IOException;
import java.util.Map;
-import java.util.Random;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
@@ -42,12 +42,13 @@
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.pipeline.MockPipeline;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-
+import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.ValueSource;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -56,37 +57,28 @@
*
* Compares bytes written to the stream and received in the ChunkWriteRequests.
*/
-public class TestBlockOutputStreamCorrectness {
-
- private static final long SEED = 18480315L;
+class TestBlockOutputStreamCorrectness {
- private int writeUnitSize = 1;
+ private static final int DATA_SIZE = 256 * (int) OzoneConsts.MB;
+ private static final byte[] DATA = RandomUtils.nextBytes(DATA_SIZE);
- @Test
- public void test() throws IOException {
+ @ParameterizedTest
+ @ValueSource(ints = { 1, 1024, 1024 * 1024 })
+ void test(final int writeSize) throws IOException {
+ assertEquals(0, DATA_SIZE % writeSize);
final BufferPool bufferPool = new BufferPool(4 * 1024 * 1024, 32 / 4);
for (int block = 0; block < 10; block++) {
- BlockOutputStream outputStream =
- createBlockOutputStream(bufferPool);
-
- Random random = new Random(SEED);
-
- int max = 256 * 1024 * 1024 / writeUnitSize;
-
- byte[] writeBuffer = new byte[writeUnitSize];
- for (int t = 0; t < max; t++) {
- if (writeUnitSize > 1) {
- for (int i = 0; i < writeBuffer.length; i++) {
- writeBuffer[i] = (byte) random.nextInt();
+ try (BlockOutputStream outputStream = createBlockOutputStream(bufferPool)) {
+ for (int i = 0; i < DATA_SIZE / writeSize; i++) {
+ if (writeSize > 1) {
+ outputStream.write(DATA, i * writeSize, writeSize);
+ } else {
+ outputStream.write(DATA[i]);
}
- outputStream.write(writeBuffer, 0, writeBuffer.length);
- } else {
- outputStream.write((byte) random.nextInt());
}
}
- outputStream.close();
}
}
@@ -126,9 +118,8 @@ private static class MockXceiverClientSpi extends XceiverClientSpi {
private final Pipeline pipeline;
- private final Random expectedRandomStream = new Random(SEED);
-
private final AtomicInteger counter = new AtomicInteger();
+ private int i;
MockXceiverClientSpi(Pipeline pipeline) {
super();
@@ -175,8 +166,8 @@ public XceiverClientReply sendCommandAsync(
ByteString data = request.getWriteChunk().getData();
final byte[] writePayload = data.toByteArray();
for (byte b : writePayload) {
- byte expectedByte = (byte) expectedRandomStream.nextInt();
- assertEquals(expectedByte, b);
+ assertEquals(DATA[i], b);
+ ++i;
}
break;
default:
diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBufferPool.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBufferPool.java
index aabbbb3eedc..b56c503df9b 100644
--- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBufferPool.java
+++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBufferPool.java
@@ -26,8 +26,8 @@
import java.util.LinkedList;
import java.util.concurrent.ThreadLocalRandom;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -79,8 +79,8 @@ private static Deque assertAllocate(BufferPool pool) {
assertEmpty(allocated, size);
fill(allocated); // make buffer contents unique, for equals check
- assertFalse(buffers.contains(allocated),
- () -> "buffer " + n + ": " + allocated + " already in: " + buffers);
+ assertThat(buffers).withFailMessage("buffer " + n + ": " + allocated + " already in: " + buffers)
+ .doesNotContain(allocated);
buffers.addLast(allocated);
}
diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java
index a5de86a84f6..acd8a613ab9 100644
--- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java
+++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java
@@ -49,7 +49,7 @@
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getReadChunkResponse;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java
index 8db662cee07..41bf46a8ea2 100644
--- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java
+++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java
@@ -281,7 +281,6 @@ public synchronized BlockExtendedInputStream create(
public static class TestBlockInputStream extends BlockExtendedInputStream {
private ByteBuffer data;
- private boolean closed = false;
private BlockID blockID;
private long length;
private boolean shouldError = false;
@@ -304,10 +303,6 @@ public static class TestBlockInputStream extends BlockExtendedInputStream {
data.position(0);
}
- public boolean isClosed() {
- return closed;
- }
-
public void setShouldErrorOnSeek(boolean val) {
this.shouldErrorOnSeek = val;
}
@@ -377,9 +372,7 @@ protected int readWithStrategy(ByteReaderStrategy strategy) throws
}
@Override
- public void close() {
- closed = true;
- }
+ public void close() { }
@Override
public void unbuffer() {
diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java
index e8ada43b08a..97bf71c204a 100644
--- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java
+++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java
@@ -169,7 +169,7 @@ public void testCorrectStreamCreatedDependingOnDataLocations()
BlockLocationInfo blockInfo =
ECStreamTestUtil.createKeyInfo(repConfig, blockLength, dnMap);
- try (ECBlockInputStreamProxy bis = createBISProxy(repConfig, blockInfo)) {
+ try (ECBlockInputStreamProxy ignored = createBISProxy(repConfig, blockInfo)) {
// Not all locations present, so we expect on;y the "missing=true" stream
// to be present.
assertThat(streamFactory.getStreams()).containsKey(false);
@@ -181,7 +181,7 @@ public void testCorrectStreamCreatedDependingOnDataLocations()
dnMap = ECStreamTestUtil.createIndexMap(2, 3, 4, 5);
blockInfo = ECStreamTestUtil.createKeyInfo(repConfig, blockLength, dnMap);
- try (ECBlockInputStreamProxy bis = createBISProxy(repConfig, blockInfo)) {
+ try (ECBlockInputStreamProxy ignored = createBISProxy(repConfig, blockInfo)) {
// Not all locations present, so we expect on;y the "missing=true" stream
// to be present.
assertThat(streamFactory.getStreams()).doesNotContainKey(false);
diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java
index c708fc28ddb..f7a4bb0643e 100644
--- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java
+++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.client.io;
import com.google.common.collect.ImmutableSet;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
@@ -41,7 +42,6 @@
import java.util.HashSet;
import java.util.List;
import java.util.Map;
-import java.util.Random;
import java.util.Set;
import java.util.SplittableRandom;
import java.util.concurrent.ExecutorService;
@@ -52,6 +52,7 @@
import static java.util.Collections.singleton;
import static java.util.stream.Collectors.toSet;
import static org.apache.hadoop.ozone.client.io.ECStreamTestUtil.generateParity;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -645,7 +646,7 @@ public void testSeekToPartialOffsetFails() {
}
private Integer getRandomStreamIndex(Set set) {
- return set.stream().skip(new Random().nextInt(set.size()))
+ return set.stream().skip(RandomUtils.nextInt(0, set.size()))
.findFirst().orElse(null);
}
@@ -800,7 +801,7 @@ public void testFailedLocationsAreNotRead() throws IOException {
// created in the stream factory, indicating we did not read them.
List streams = streamFactory.getBlockStreams();
for (TestBlockInputStream stream : streams) {
- assertTrue(stream.getEcReplicaIndex() > 2);
+ assertThat(stream.getEcReplicaIndex()).isGreaterThan(2);
}
}
}
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index b3cf683ec80..20dce15d4d1 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -29,7 +29,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
jar
- false
@@ -86,6 +85,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
javax.annotation
javax.annotation-api
+
+ jakarta.annotation
+ jakarta.annotation-api
+
io.dropwizard.metrics
@@ -102,7 +105,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
org.bouncycastle
- bcprov-jdk15on
+ bcprov-jdk18on
@@ -138,7 +141,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
org.bouncycastle
- bcpkix-jdk15on
+ bcpkix-jdk18on
${bouncycastle.version}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index 7b007fdca1f..787f023df2e 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -392,4 +392,9 @@ private HddsConfigKeys() {
public static final String OZONE_AUDIT_LOG_DEBUG_CMD_LIST_DNAUDIT =
"ozone.audit.log.debug.cmd.list.dnaudit";
+
+ public static final String HDDS_DATANODE_SLOW_OP_WARNING_THRESHOLD_KEY =
+ "hdds.datanode.slow.op.warning.threshold";
+ public static final String HDDS_DATANODE_SLOW_OP_WARNING_THRESHOLD_DEFAULT =
+ "500ms";
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java
index b244b8cf75d..7a94d77c770 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hdds;
-import java.util.UUID;
import java.util.concurrent.atomic.AtomicLong;
/**
@@ -41,13 +40,4 @@ public static long getLongId() {
return LONG_COUNTER.incrementAndGet();
}
- /**
- * Returns a uuid.
- *
- * @return UUID.
- */
- public static UUID getUUId() {
- return UUID.randomUUID();
- }
-
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 06885ed3dc6..ee1c9669a1b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -20,8 +20,8 @@
import com.google.protobuf.ServiceException;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
+import jakarta.annotation.Nonnull;
+import jakarta.annotation.Nullable;
import javax.management.ObjectName;
import java.io.File;
import java.io.IOException;
@@ -105,17 +105,6 @@ public final class HddsUtils {
private static final Logger LOG = LoggerFactory.getLogger(HddsUtils.class);
- /**
- * The service ID of the solitary Ozone SCM service.
- */
- public static final String OZONE_SCM_SERVICE_ID = "OzoneScmService";
- public static final String OZONE_SCM_SERVICE_INSTANCE_ID =
- "OzoneScmServiceInstance";
-
- private static final String MULTIPLE_SCM_NOT_YET_SUPPORTED =
- ScmConfigKeys.OZONE_SCM_NAMES + " must contain a single hostname."
- + " Multiple SCM hosts are currently unsupported";
-
public static final ByteString REDACTED =
ByteString.copyFromUtf8("");
@@ -654,30 +643,6 @@ public static File createDir(String dirPath) {
return dirFile;
}
- /**
- * Leverages the Configuration.getPassword method to attempt to get
- * passwords from the CredentialProvider API before falling back to
- * clear text in config - if falling back is allowed.
- * @param conf Configuration instance
- * @param alias name of the credential to retrieve
- * @return String credential value or null
- */
- static String getPassword(ConfigurationSource conf, String alias) {
- String password = null;
- try {
- char[] passchars = conf.getPassword(alias);
- if (passchars != null) {
- password = new String(passchars);
- }
- } catch (IOException ioe) {
- LOG.warn("Setting password to null since IOException is caught"
- + " when getting password", ioe);
-
- password = null;
- }
- return password;
- }
-
/**
* Utility string formatter method to display SCM roles.
*
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/NodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/NodeDetails.java
index 8349b12e6bf..fc4e796ffff 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/NodeDetails.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/NodeDetails.java
@@ -111,15 +111,6 @@ public String getRatisHostPortStr() {
return hostPort.toString();
}
- public String getRatisAddressPortStr() {
- StringBuilder hostPort = new StringBuilder();
- hostPort.append(getInetAddress().getHostAddress())
- .append(":")
- .append(ratisPort);
- return hostPort.toString();
- }
-
-
public int getRatisPort() {
return ratisPort;
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java
index 8a4d75a31fb..b3a762e2eda 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java
@@ -19,8 +19,6 @@
package org.apache.hadoop.hdds.client;
import com.google.common.base.Strings;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import static org.apache.hadoop.ozone.OzoneConsts.GB;
import static org.apache.hadoop.ozone.OzoneConsts.KB;
@@ -33,8 +31,6 @@
* a storage volume.
*/
public final class OzoneQuota {
- public static final Logger LOG =
- LoggerFactory.getLogger(OzoneQuota.class);
public static final String OZONE_QUOTA_B = "B";
public static final String OZONE_QUOTA_KB = "KB";
@@ -144,16 +140,6 @@ private OzoneQuota(long quotaInNamespace, RawQuotaInBytes rawQuotaInBytes) {
this.quotaInBytes = rawQuotaInBytes.sizeInBytes();
}
- /**
- * Formats a quota as a string.
- *
- * @param quota the quota to format
- * @return string representation of quota
- */
- public static String formatQuota(OzoneQuota quota) {
- return String.valueOf(quota.getRawSize()) + quota.getUnit();
- }
-
/**
* Parses a user provided string space quota and returns the
* Quota Object.
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
index bb5ff0067f4..69cce8db6d6 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
@@ -101,16 +101,6 @@ public static T newInstanceOf(Class configurationClass) {
return conf.getObject(configurationClass);
}
- /**
- * @return a new {@code OzoneConfiguration} instance set from the given
- * {@code configObject}
- */
- public static OzoneConfiguration fromObject(T configObject) {
- OzoneConfiguration conf = new OzoneConfiguration();
- conf.setFromObject(configObject);
- return conf;
- }
-
public OzoneConfiguration() {
OzoneConfiguration.activate();
loadDefaults();
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/freon/FakeClusterTopology.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/freon/FakeClusterTopology.java
index ddecf1f0607..2d29dc8565c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/freon/FakeClusterTopology.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/freon/FakeClusterTopology.java
@@ -52,7 +52,7 @@ public class FakeClusterTopology {
public FakeClusterTopology() {
try {
for (int i = 0; i < 9; i++) {
- datanodes.add(createDatanode(i));
+ datanodes.add(createDatanode());
if ((i + 1) % 3 == 0) {
pipelines.add(Pipeline.newBuilder()
.setId(PipelineID.randomId().getProtobuf())
@@ -69,7 +69,7 @@ public FakeClusterTopology() {
}
}
- private DatanodeDetailsProto createDatanode(int index) {
+ private DatanodeDetailsProto createDatanode() {
return DatanodeDetailsProto.newBuilder()
.setUuid(UUID.randomUUID().toString())
.setHostName("localhost")
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java
index 7808dccaf5d..6f776072d9c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java
@@ -24,7 +24,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.annotation.Nullable;
+import jakarta.annotation.Nullable;
import java.time.Duration;
import java.util.OptionalLong;
import java.util.concurrent.Executors;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java
deleted file mode 100644
index 915fe3557e2..00000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Functional interfaces for ozone, similar to java.util.function.
- */
-package org.apache.hadoop.hdds.function;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
index 739f6ebd656..5b6fb6fe9b8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
@@ -21,7 +21,6 @@
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
-import java.util.Objects;
import java.util.Set;
import java.util.UUID;
@@ -563,14 +562,6 @@ public int hashCode() {
return uuid.hashCode();
}
- // Skip The OpStates which may change in Runtime.
- public int getSignature() {
- return Objects
- .hash(uuid, uuidString, ipAddress, hostName, ports,
- certSerialId, version, setupTime, revision, buildDate,
- initialVersion, currentVersion);
- }
-
/**
* Returns DatanodeDetails.Builder instance.
*
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ServerNotLeaderException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ServerNotLeaderException.java
index 5a1e2864b5d..89e6a05b6bb 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ServerNotLeaderException.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ServerNotLeaderException.java
@@ -28,7 +28,6 @@
* Exception thrown when a server is not a leader for Ratis group.
*/
public class ServerNotLeaderException extends IOException {
- private final String currentPeerId;
private final String leader;
private static final Pattern CURRENT_PEER_ID_PATTERN =
Pattern.compile("Server:(.*) is not the leader[.]+.*", Pattern.DOTALL);
@@ -39,7 +38,6 @@ public class ServerNotLeaderException extends IOException {
public ServerNotLeaderException(RaftPeerId currentPeerId) {
super("Server:" + currentPeerId + " is not the leader. Could not " +
"determine the leader node.");
- this.currentPeerId = currentPeerId.toString();
this.leader = null;
}
@@ -47,7 +45,6 @@ public ServerNotLeaderException(RaftPeerId currentPeerId,
String suggestedLeader) {
super("Server:" + currentPeerId + " is not the leader. Suggested leader is"
+ " Server:" + suggestedLeader + ".");
- this.currentPeerId = currentPeerId.toString();
this.leader = suggestedLeader;
}
@@ -57,7 +54,6 @@ public ServerNotLeaderException(String message) {
Matcher currentLeaderMatcher = CURRENT_PEER_ID_PATTERN.matcher(message);
if (currentLeaderMatcher.matches()) {
- this.currentPeerId = currentLeaderMatcher.group(1);
Matcher suggestedLeaderMatcher =
SUGGESTED_LEADER_PATTERN.matcher(message);
@@ -77,7 +73,6 @@ public ServerNotLeaderException(String message) {
this.leader = null;
}
} else {
- this.currentPeerId = null;
this.leader = null;
}
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/RemoveSCMRequest.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/RemoveSCMRequest.java
index e47411f3f33..014f62343c6 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/RemoveSCMRequest.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/RemoveSCMRequest.java
@@ -35,59 +35,10 @@ public RemoveSCMRequest(String clusterId, String scmId, String addr) {
this.ratisAddr = addr;
}
- public static RemoveSCMRequest getFromProtobuf(
- HddsProtos.RemoveScmRequestProto proto) {
- return new Builder().setClusterId(proto.getClusterId())
- .setScmId(proto.getScmId()).setRatisAddr(proto.getRatisAddr()).build();
- }
-
public HddsProtos.RemoveScmRequestProto getProtobuf() {
return HddsProtos.RemoveScmRequestProto.newBuilder().setClusterId(clusterId)
.setScmId(scmId).setRatisAddr(ratisAddr).build();
}
- /**
- * Builder for RemoveSCMRequest.
- */
- public static class Builder {
- private String clusterId;
- private String scmId;
- private String ratisAddr;
-
-
- /**
- * sets the cluster id.
- * @param cid clusterId to be set
- * @return Builder for RemoveSCMRequest
- */
- public RemoveSCMRequest.Builder setClusterId(String cid) {
- this.clusterId = cid;
- return this;
- }
-
- /**
- * sets the scmId.
- * @param id scmId
- * @return Builder for RemoveSCMRequest
- */
- public RemoveSCMRequest.Builder setScmId(String id) {
- this.scmId = id;
- return this;
- }
-
- /**
- * Set ratis address in Scm HA.
- * @param addr address in the format of [ip|hostname]:port
- * @return Builder for RemoveSCMRequest
- */
- public RemoveSCMRequest.Builder setRatisAddr(String addr) {
- this.ratisAddr = addr;
- return this;
- }
-
- public RemoveSCMRequest build() {
- return new RemoveSCMRequest(clusterId, scmId, ratisAddr);
- }
- }
/**
* Gets the clusterId from the Version file.
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java
index 46816a63d34..2fc04e00f23 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java
@@ -73,8 +73,17 @@ public class ScmConfig extends ReconfigurableConfig {
+ "org.apache.hadoop.hdds.scm.PipelineChoosePolicy. "
+ "The class decides which pipeline will be used to find or "
+ "allocate Ratis containers. If not set, "
- + "org.apache.hadoop.hdds.scm.pipeline.choose.algorithms. "
- + "RandomPipelineChoosePolicy will be used as default value."
+ + "org.apache.hadoop.hdds.scm.pipeline.choose.algorithms."
+ + "RandomPipelineChoosePolicy will be used as default value. "
+ + "The following values can be used, "
+ + "(1) org.apache.hadoop.hdds.scm.pipeline.choose.algorithms."
+ + "RandomPipelineChoosePolicy : random choose one pipeline. "
+ + "(2) org.apache.hadoop.hdds.scm.pipeline.choose.algorithms."
+ + "HealthyPipelineChoosePolicy : random choose one healthy pipeline. "
+ + "(3) org.apache.hadoop.hdds.scm.pipeline.choose.algorithms."
+ + "CapacityPipelineChoosePolicy : choose the pipeline with lower "
+ + "utilization from the two pipelines. Note that random choose "
+ + "method will be executed twice in this policy."
)
private String pipelineChoosePolicyName;
@@ -85,11 +94,20 @@ public class ScmConfig extends ReconfigurableConfig {
tags = { ConfigTag.SCM, ConfigTag.PIPELINE },
description =
"The full name of class which implements "
- + "org.apache.hadoop.hdds.scm.PipelineChoosePolicy. "
- + "The class decides which pipeline will be used when "
- + "selecting an EC Pipeline. If not set, "
- + "org.apache.hadoop.hdds.scm.pipeline.choose.algorithms. "
- + "RandomPipelineChoosePolicy will be used as default value."
+ + "org.apache.hadoop.hdds.scm.PipelineChoosePolicy. "
+ + "The class decides which pipeline will be used when "
+ + "selecting an EC Pipeline. If not set, "
+ + "org.apache.hadoop.hdds.scm.pipeline.choose.algorithms."
+ + "RandomPipelineChoosePolicy will be used as default value. "
+ + "The following values can be used, "
+ + "(1) org.apache.hadoop.hdds.scm.pipeline.choose.algorithms."
+ + "RandomPipelineChoosePolicy : random choose one pipeline. "
+ + "(2) org.apache.hadoop.hdds.scm.pipeline.choose.algorithms."
+ + "HealthyPipelineChoosePolicy : random choose one healthy pipeline. "
+ + "(3) org.apache.hadoop.hdds.scm.pipeline.choose.algorithms."
+ + "CapacityPipelineChoosePolicy : choose the pipeline with lower "
+ + "utilization from the two pipelines. Note that random choose "
+ + "method will be executed twice in this policy."
)
private String ecPipelineChoosePolicyName;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
index b03cead27e7..402398e36c3 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
@@ -20,10 +20,12 @@
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.annotation.InterfaceStability;
import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerResponseProto;
import org.apache.hadoop.hdds.scm.DatanodeAdminError;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerReplicaInfo;
import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
@@ -39,6 +41,7 @@
import java.util.List;
import java.util.Map;
import java.util.Optional;
+import java.util.UUID;
/**
* The interface to call into underlying container layer.
@@ -178,6 +181,14 @@ ContainerWithPipeline createContainer(HddsProtos.ReplicationType type,
HddsProtos.ReplicationFactor replicationFactor,
String owner) throws IOException;
+ /**
+ * Gets the list of underReplicated and unClosed containers on a decommissioning node.
+ *
+ * @param dn - Datanode detail
+ * @return Lists of underReplicated and Unclosed containers
+ */
+ Map> getContainersOnDecomNode(DatanodeDetails dn) throws IOException;
+
/**
* Returns a set of Nodes that meet a query criteria. Passing null for opState
* or nodeState acts like a wild card, returning all nodes in that state.
@@ -194,6 +205,14 @@ List queryNode(HddsProtos.NodeOperationalState opState,
HddsProtos.NodeState nodeState, HddsProtos.QueryScope queryScope,
String poolName) throws IOException;
+ /**
+ * Returns a node with the given UUID.
+ * @param uuid - datanode uuid string
+ * @return A nodes that matches the requested UUID.
+ * @throws IOException
+ */
+ HddsProtos.Node queryNode(UUID uuid) throws IOException;
+
/**
* Allows a list of hosts to be decommissioned. The hosts are identified
* by their hostname and optionally port in the format foo.com:port.
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java
index 1dbbc738432..df8e9d45e13 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java
@@ -74,7 +74,10 @@ public enum HealthState {
"OpenUnhealthyContainers"),
QUASI_CLOSED_STUCK(
"Containers QuasiClosed with insufficient datanode origins",
- "StuckQuasiClosedContainers");
+ "StuckQuasiClosedContainers"),
+ OPEN_WITHOUT_PIPELINE(
+ "Containers in OPEN state without any healthy Pipeline",
+ "OpenContainersWithoutPipeline");
private String description;
private String metricName;
@@ -129,11 +132,6 @@ public void incrementAndSample(HealthState stat, ContainerID container) {
incrementAndSample(stat.toString(), container);
}
- public void incrementAndSample(HddsProtos.LifeCycleState stat,
- ContainerID container) {
- incrementAndSample(stat.toString(), container);
- }
-
public void setComplete() {
reportTimeStamp = System.currentTimeMillis();
}
@@ -238,10 +236,6 @@ protected void setSample(String stat, List sample) {
containerSample.put(stat, sample);
}
- public List getSample(HddsProtos.LifeCycleState stat) {
- return getSample(stat.toString());
- }
-
public List getSample(HealthState stat) {
return getSample(stat.toString());
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java
index 9ba766bc941..80e09af172b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java
@@ -115,10 +115,6 @@ public static Collection getSCMNodeIds(ConfigurationSource conf,
return conf.getTrimmedStringCollection(key);
}
- public static String getLocalSCMNodeId(String scmServiceId) {
- return addSuffix(ScmConfigKeys.OZONE_SCM_NODES_KEY, scmServiceId);
- }
-
/**
* Add non empty and non null suffix to a key.
*/
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetConstants.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetConstants.java
index 633ffba9e96..8ee6decc9c4 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetConstants.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetConstants.java
@@ -36,7 +36,6 @@ private NetConstants() {
public static final int NODE_COST_DEFAULT = 0;
public static final int ANCESTOR_GENERATION_DEFAULT = 0;
public static final int ROOT_LEVEL = 1;
- public static final String NODE_COST_PREFIX = "$";
public static final String DEFAULT_RACK = "/default-rack";
public static final String DEFAULT_NODEGROUP = "/default-nodegroup";
public static final String DEFAULT_DATACENTER = "/default-datacenter";
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java
index 7463c52e953..18c530140e8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java
@@ -26,7 +26,6 @@
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
-import java.util.regex.Pattern;
/**
* Utility class to facilitate network topology functions.
@@ -35,9 +34,6 @@ public final class NetUtils {
private static final Logger LOG = LoggerFactory.getLogger(NetUtils.class);
- private static final Pattern TRAILING_PATH_SEPARATOR =
- Pattern.compile(NetConstants.PATH_SEPARATOR_STR + "+$");
-
private NetUtils() {
// Prevent instantiation
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java
index 5e0697eaafd..2dc86c1b685 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java
@@ -657,7 +657,7 @@ private Node chooseNodeInternal(String scope, int leafIndex,
ancestorGen);
if (availableNodes <= 0) {
- LOG.warn("No available node in (scope=\"{}\" excludedScope=\"{}\" " +
+ LOG.info("No available node in (scope=\"{}\" excludedScope=\"{}\" " +
"excludedNodes=\"{}\" ancestorGen=\"{}\").",
scopeNode.getNetworkFullPath(), excludedScopes, excludedNodes,
ancestorGen);
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index b587cc924b0..e8bddb42cfb 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -19,6 +19,7 @@
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto;
@@ -44,6 +45,7 @@
import java.util.Map;
import java.util.Optional;
import java.util.Set;
+import java.util.UUID;
/**
* ContainerLocationProtocol is used by an HDFS node to find the set of nodes
@@ -53,7 +55,8 @@
.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
public interface StorageContainerLocationProtocol extends Closeable {
- @SuppressWarnings("checkstyle:ConstantName")
+ // Accessed and checked via reflection in Hadoop RPC - changing it is incompatible
+ @SuppressWarnings({"checkstyle:ConstantName", "unused"})
/**
* Version 1: Initial version.
*/
@@ -219,6 +222,14 @@ List listContainer(long startContainerID,
*/
void deleteContainer(long containerID) throws IOException;
+ /**
+ * Gets the list of underReplicated and unClosed containers on a decommissioning node.
+ *
+ * @param dn - Datanode detail
+ * @return Lists of underReplicated and unClosed containers
+ */
+ Map> getContainersOnDecomNode(DatanodeDetails dn) throws IOException;
+
/**
* Queries a list of Node Statuses. Passing a null for either opState or
* state acts like a wildcard returning all nodes in that state.
@@ -232,6 +243,8 @@ List queryNode(HddsProtos.NodeOperationalState opState,
HddsProtos.NodeState state, HddsProtos.QueryScope queryScope,
String poolName, int clientVersion) throws IOException;
+ HddsProtos.Node queryNode(UUID uuid) throws IOException;
+
List decommissionNodes(List nodes)
throws IOException;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java
index e0458f03472..9acb0e5c33a 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java
@@ -144,17 +144,6 @@ public static ContainerCommandResponseProto putBlockResponseSuccess(
.build();
}
- /**
- * Returns successful blockResponse.
- * @param msg - Request.
- * @return Response.
- */
- public static ContainerCommandResponseProto getBlockResponseSuccess(
- ContainerCommandRequestProto msg) {
-
- return getSuccessResponse(msg);
- }
-
public static ContainerCommandResponseProto getBlockDataResponse(
ContainerCommandRequestProto msg, BlockData data) {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/ssl/PemFileBasedKeyStoresFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/ssl/PemFileBasedKeyStoresFactory.java
index 25bec2145f0..9a9002195c8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/ssl/PemFileBasedKeyStoresFactory.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/ssl/PemFileBasedKeyStoresFactory.java
@@ -19,7 +19,6 @@
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
import org.apache.hadoop.hdds.annotation.InterfaceStability;
-import org.apache.hadoop.hdds.security.SecurityConfig;
import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateNotification;
import org.slf4j.Logger;
@@ -58,8 +57,7 @@ public class PemFileBasedKeyStoresFactory implements KeyStoresFactory,
private TrustManager[] trustManagers;
private final CertificateClient caClient;
- public PemFileBasedKeyStoresFactory(SecurityConfig securityConfig,
- CertificateClient client) {
+ public PemFileBasedKeyStoresFactory(CertificateClient client) {
this.caClient = client;
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.java
index 16cbaf9be7d..91a98b799c2 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.java
@@ -24,7 +24,6 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto;
import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.token.Token.TrivialRenewer;
import org.apache.hadoop.util.ProtobufUtils;
import java.io.DataInput;
@@ -185,17 +184,5 @@ public byte[] getBytes() {
}
return builder.build().toByteArray();
}
-
- /**
- * Default TrivialRenewer.
- */
- @InterfaceAudience.Private
- public static class Renewer extends TrivialRenewer {
-
- @Override
- protected Text getKind() {
- return KIND_NAME;
- }
- }
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSelector.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSelector.java
index 989c8df0fb3..6f13ac34a29 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSelector.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSelector.java
@@ -55,25 +55,4 @@ public Token selectToken(Text service,
}
return null;
}
-
- /**
- * Static method to avoid instantiation.
- * */
- @SuppressWarnings("unchecked")
- public static Token selectBlockToken(Text service,
- Collection> tokens) {
- if (service == null) {
- return null;
- }
- for (Token extends TokenIdentifier> token : tokens) {
- if (OzoneBlockTokenIdentifier.KIND_NAME.equals(token.getKind())
- && token.getService().equals(service)) {
- if (LOG.isTraceEnabled()) {
- LOG.trace("Getting token for service:{}", service);
- }
- return (Token) token;
- }
- }
- return null;
- }
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java
index 47982449853..e196d0df9d7 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java
@@ -21,10 +21,8 @@
import org.apache.hadoop.hdds.security.exception.OzoneSecurityException;
import org.apache.hadoop.hdds.security.ssl.KeyStoresFactory;
-import org.apache.hadoop.hdds.security.x509.certificate.authority.CAType;
import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateSignRequest;
import org.apache.hadoop.hdds.security.x509.exception.CertificateException;
-import org.bouncycastle.pkcs.PKCS10CertificationRequest;
import java.io.Closeable;
import java.io.IOException;
@@ -138,17 +136,6 @@ X509Certificate getCertificate(String certSerialId)
*/
List getCAList();
- /**
- * Return the pem encoded CA certificate list.
- *
- * If list is null, fetch the list from SCM and returns the list.
- * If list is not null, return the pem encoded CA certificate list.
- *
- * @return list of pem encoded CA certificates.
- * @throws IOException
- */
- List listCA() throws IOException;
-
/**
* Update and returns the pem encoded CA certificate list.
* @return list of pem encoded CA certificates.
@@ -156,16 +143,6 @@ X509Certificate getCertificate(String certSerialId)
*/
List updateCAList() throws IOException;
- /**
- * Creates digital signature over the data stream using the components private
- * key.
- *
- * @param data data to be signed
- * @return byte array - containing the signature
- * @throws CertificateException - on Error
- */
- byte[] signData(byte[] data) throws CertificateException;
-
/**
* Verifies a digital Signature, given the signature and the certificate of
* the signer.
@@ -186,26 +163,6 @@ boolean verifySignature(byte[] data, byte[] signature,
CertificateSignRequest.Builder getCSRBuilder()
throws CertificateException;
- /**
- * Send request to SCM to sign the certificate and save certificates returned
- * by SCM to PEM files on disk.
- *
- * @return the serial ID of the new certificate
- */
- String signAndStoreCertificate(PKCS10CertificationRequest request)
- throws CertificateException;
-
- /**
- * Stores the Certificate for this client. Don't use this api to add
- * trusted certificates of others.
- *
- * @param pemEncodedCert - pem encoded X509 Certificate
- * @param caType - Is CA certificate.
- * @throws CertificateException - on Error.
- */
- void storeCertificate(String pemEncodedCert, CAType caType)
- throws CertificateException;
-
default void assertValidKeysAndCertificate() throws OzoneSecurityException {
try {
Objects.requireNonNull(getPublicKey());
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/SelfSignedCertificate.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/SelfSignedCertificate.java
index 802c3ff07ee..87834cdb456 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/SelfSignedCertificate.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/SelfSignedCertificate.java
@@ -262,15 +262,6 @@ public Builder addIpAddress(String ip) {
return this;
}
- public Builder addServiceName(
- String serviceName) {
- Preconditions.checkNotNull(
- serviceName, "Service Name cannot be null");
-
- this.addAltName(GeneralName.otherName, serviceName);
- return this;
- }
-
private Builder addAltName(int tag, String name) {
if (altNames == null) {
altNames = new ArrayList<>();
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exception/CertificateException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exception/CertificateException.java
index df7cdde0473..208cff7c816 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exception/CertificateException.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exception/CertificateException.java
@@ -88,7 +88,6 @@ public enum ErrorCode {
BOOTSTRAP_ERROR,
CSR_ERROR,
CRYPTO_SIGNATURE_VERIFICATION_ERROR,
- CERTIFICATE_NOT_FOUND_ERROR,
RENEW_ERROR,
ROLLBACK_ERROR
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java
index cad0f7ffc28..d14129972c6 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java
@@ -31,7 +31,6 @@
import com.fasterxml.jackson.databind.SerializationFeature;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
-import com.fasterxml.jackson.databind.type.CollectionType;
import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
/**
@@ -74,22 +73,6 @@ public static ObjectNode createObjectNode(Object next) {
return MAPPER.valueToTree(next);
}
- /**
- * Deserialize a list of elements from a given string,
- * each element in the list is in the given type.
- *
- * @param str json string.
- * @param elementType element type.
- * @return List of elements of type elementType
- * @throws IOException
- */
- public static List> toJsonList(String str, Class> elementType)
- throws IOException {
- CollectionType type = MAPPER.getTypeFactory()
- .constructCollectionType(List.class, elementType);
- return MAPPER.readValue(str, type);
- }
-
/**
* Utility to sequentially write a large collection of items to a file.
*/
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
index 6c0272e256e..31aaca568e4 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
@@ -35,7 +35,6 @@
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.http.client.methods.HttpRequestBase;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -127,17 +126,6 @@ public static InetSocketAddress updateListenAddress(OzoneConfiguration conf,
return updatedAddr;
}
-
- /**
- * Releases a http connection if the request is not null.
- * @param request
- */
- public static void releaseConnection(HttpRequestBase request) {
- if (request != null) {
- request.releaseConnection();
- }
- }
-
/**
* Get the location where SCM should store its metadata directories.
* Fall back to OZONE_METADATA_DIRS if not defined.
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcServerInterceptor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcServerInterceptor.java
index bd35d56c77b..802c1531230 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcServerInterceptor.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcServerInterceptor.java
@@ -45,7 +45,7 @@ public void onMessage(ReqT message) {
.importAndCreateSpan(
call.getMethodDescriptor().getFullMethodName(),
headers.get(GrpcClientInterceptor.TRACING_HEADER));
- try (Scope scope = GlobalTracer.get().activateSpan(span)) {
+ try (Scope ignored = GlobalTracer.get().activateSpan(span)) {
super.onMessage(message);
} finally {
span.finish();
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java
index d30e50f8034..b968d407232 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java
@@ -149,16 +149,6 @@ public static R executeInNewSpan(String spanName,
return executeInSpan(span, supplier);
}
- /**
- * Execute a function inside an activated new span.
- */
- public static void executeInNewSpan(String spanName,
- CheckedRunnable runnable) throws E {
- Span span = GlobalTracer.get()
- .buildSpan(spanName).start();
- executeInSpan(span, runnable);
- }
-
/**
* Execute {@code supplier} in the given {@code span}.
* @return the value returned by {@code supplier}
@@ -190,15 +180,6 @@ private static void executeInSpan(Span span,
}
}
- /**
- * Execute a new function as a child span of the parent.
- */
- public static R executeAsChildSpan(String spanName,
- String parentName, CheckedSupplier supplier) throws E {
- Span span = TracingUtil.importAndCreateSpan(spanName, parentName);
- return executeInSpan(span, supplier);
- }
-
/**
* Execute a new function as a child span of the parent.
*/
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java
index 109f4b3df05..4620a483385 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java
@@ -20,7 +20,6 @@
import org.slf4j.Logger;
-import java.io.Closeable;
import java.util.Arrays;
import java.util.Collection;
@@ -40,11 +39,11 @@ private IOUtils() {
* null.
* @param closeables the objects to close
*/
- public static void cleanupWithLogger(Logger logger, Closeable... closeables) {
+ public static void cleanupWithLogger(Logger logger, AutoCloseable... closeables) {
if (closeables == null) {
return;
}
- for (Closeable c : closeables) {
+ for (AutoCloseable c : closeables) {
if (c != null) {
try {
c.close();
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BooleanCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BooleanCodec.java
index 53058c96ff3..2ec396c0ffa 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BooleanCodec.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BooleanCodec.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdds.utils.db;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
/**
* Codec to serialize/deserialize {@link Boolean}.
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java
index 1675905a674..46779648e67 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdds.utils.db;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import java.io.IOException;
/**
@@ -42,14 +42,6 @@ default boolean supportCodecBuffer() {
return false;
}
- /**
- * @return an upper bound, which should be obtained without serialization,
- * of the serialized size of the given object.
- */
- default int getSerializedSizeUpperBound(T object) {
- throw new UnsupportedOperationException();
- }
-
/**
* Serialize the given object to bytes.
*
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java
index 97311b921c8..64e494a5af1 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java
@@ -420,17 +420,6 @@ public CodecBuffer put(byte val) {
return this;
}
- /**
- * Similar to {@link ByteBuffer#put(byte[])}.
- *
- * @return this object.
- */
- public CodecBuffer put(byte[] array) {
- assertRefCnt(1);
- buf.writeBytes(array);
- return this;
- }
-
/**
* Similar to {@link ByteBuffer#put(ByteBuffer)}.
*
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java
index 0ab907dfb3c..dff0b015ed5 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DelegatedCodec.java
@@ -19,7 +19,7 @@
import org.apache.ratis.util.function.CheckedFunction;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import java.io.IOException;
/**
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java
index cd082d30e16..50488053159 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hdds.utils.db;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import java.nio.ByteBuffer;
/**
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java
index 912485052f3..9e776cc18f7 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java
@@ -18,7 +18,7 @@
*/
package org.apache.hadoop.hdds.utils.db;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import java.nio.ByteBuffer;
/**
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2Codec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2Codec.java
index e6b8338d5d0..96d12d1ebe5 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2Codec.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto2Codec.java
@@ -22,7 +22,7 @@
import com.google.protobuf.Parser;
import org.apache.ratis.util.function.CheckedFunction;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto3Codec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto3Codec.java
index d353a489d9f..30245e033e0 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto3Codec.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Proto3Codec.java
@@ -22,7 +22,7 @@
import org.apache.ratis.thirdparty.com.google.protobuf.MessageLite;
import org.apache.ratis.thirdparty.com.google.protobuf.Parser;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.concurrent.ConcurrentHashMap;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ShortCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ShortCodec.java
index 8f2e9d322ad..f6482e5712c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ShortCodec.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ShortCodec.java
@@ -20,7 +20,7 @@
import java.nio.ByteBuffer;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
/**
* Codec to serialize/deserialize {@link Short}.
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodecBase.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodecBase.java
index 58d2edec762..1df55237937 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodecBase.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodecBase.java
@@ -23,7 +23,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.CharBuffer;
@@ -88,8 +88,7 @@ public boolean isFixedLength() {
* When {@link #isFixedLength()} is true,
* the upper bound equals to the serialized size.
*/
- @Override
- public int getSerializedSizeUpperBound(String s) {
+ private int getSerializedSizeUpperBound(String s) {
return maxBytesPerChar * s.length();
}
@@ -177,8 +176,7 @@ public CodecBuffer toCodecBuffer(@Nonnull String object,
}
@Override
- public String fromCodecBuffer(@Nonnull CodecBuffer buffer)
- throws IOException {
+ public String fromCodecBuffer(@Nonnull CodecBuffer buffer) {
return decode(buffer.asReadOnlyByteBuffer());
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/UuidCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/UuidCodec.java
index 33ef2895fae..dfccaa0ab75 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/UuidCodec.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/UuidCodec.java
@@ -18,7 +18,7 @@
*/
package org.apache.hadoop.hdds.utils.db;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import java.nio.ByteBuffer;
import java.util.UUID;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/io/ByteBufferInputStream.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/io/ByteBufferInputStream.java
index d2301bed2cb..94e762dea96 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/io/ByteBufferInputStream.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/io/ByteBufferInputStream.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hdds.utils.io;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/io/LengthOutputStream.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/io/LengthOutputStream.java
index 3f8fcd9f56c..1ffae958952 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/io/LengthOutputStream.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/io/LengthOutputStream.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdds.utils.io;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import java.io.FilterOutputStream;
import java.io.IOException;
import java.io.OutputStream;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index c7fb230119e..a0d4b59db16 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -145,9 +145,6 @@ public final class OzoneConfigKeys {
"dfs.container.ratis.ipc.random.port";
public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT =
false;
- public static final String OZONE_TRACE_ENABLED_KEY =
- "ozone.trace.enabled";
- public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false;
public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS =
"ozone.metastore.rocksdb.statistics";
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 37741f8cff9..9069c425e7d 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -148,17 +148,6 @@ public final class OzoneConsts {
public static final String MULTIPART_FORM_DATA_BOUNDARY = "---XXX";
- /**
- * Supports Bucket Versioning.
- */
- public enum Versioning {
- NOT_DEFINED, ENABLED, DISABLED;
-
- public static Versioning getVersioning(boolean versioning) {
- return versioning ? ENABLED : DISABLED;
- }
- }
-
// Block ID prefixes used in datanode containers.
public static final String DELETING_KEY_PREFIX = "#deleting#";
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java
index 00f2e55f97f..985c238fd77 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java
@@ -44,7 +44,6 @@ public enum OzoneManagerVersion implements ComponentVersion {
+ " newer and an unknown server version has arrived to the client.");
public static final OzoneManagerVersion CURRENT = latest();
- public static final int CURRENT_VERSION = CURRENT.version;
private static final Map BY_PROTO_VALUE =
Arrays.stream(values())
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java
index 059ed650f37..f8b3febfeca 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java
@@ -33,8 +33,6 @@
import org.apache.hadoop.ozone.common.utils.BufferUtils;
import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Class to compute and verify checksums for chunks.
@@ -42,8 +40,6 @@
* This class is not thread safe.
*/
public class Checksum {
- public static final Logger LOG = LoggerFactory.getLogger(Checksum.class);
-
private static Function newMessageDigestFunction(
String algorithm) {
final MessageDigest md;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/MonotonicClock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/MonotonicClock.java
deleted file mode 100644
index 62a323d2538..00000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/MonotonicClock.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.common;
-
-import org.apache.hadoop.util.Time;
-
-import java.time.Clock;
-import java.time.Instant;
-import java.time.ZoneId;
-
-/**
- * This is a class which implements the Clock interface. It is a copy of the
- * Java Clock.SystemClock only it uses MonotonicNow (nanotime) rather than
- * System.currentTimeMills.
- */
-
-public final class MonotonicClock extends Clock {
-
- private final ZoneId zoneId;
-
- public MonotonicClock(ZoneId zone) {
- this.zoneId = zone;
- }
-
- @Override
- public ZoneId getZone() {
- return zoneId;
- }
-
- @Override
- public Clock withZone(ZoneId zone) {
- if (zone.equals(this.zoneId)) { // intentional NPE
- return this;
- }
- return new MonotonicClock(zone);
- }
-
- @Override
- public long millis() {
- return Time.monotonicNow();
- }
-
- @Override
- public Instant instant() {
- return Instant.ofEpochMilli(millis());
- }
-
- @Override
- public boolean equals(Object obj) {
- if (obj instanceof MonotonicClock) {
- return zoneId.equals(((MonotonicClock) obj).zoneId);
- }
- return false;
- }
-
- @Override
- public int hashCode() {
- return zoneId.hashCode() + 1;
- }
-
- @Override
- public String toString() {
- return "MonotonicClock[" + zoneId + "]";
- }
-
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/OzoneChecksumException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/OzoneChecksumException.java
index 8ba7b6da1eb..7bc7f618a3f 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/OzoneChecksumException.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/OzoneChecksumException.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.ozone.common;
import java.io.IOException;
-import java.security.NoSuchAlgorithmException;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
import org.apache.hadoop.hdds.annotation.InterfaceStability;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -46,17 +45,6 @@ public OzoneChecksumException(
unrecognizedChecksumType));
}
- /**
- * OzoneChecksumException to wrap around NoSuchAlgorithmException.
- * @param algorithm name of algorithm
- * @param ex original exception thrown
- */
- public OzoneChecksumException(
- String algorithm, NoSuchAlgorithmException ex) {
- super(String.format("NoSuchAlgorithmException thrown while computing " +
- "SHA-256 checksum using algorithm %s", algorithm), ex);
- }
-
/**
* OzoneChecksumException to throw with custom message.
*/
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/BufferUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/BufferUtils.java
index 8bfb7490c4d..c6ad754f19b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/BufferUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/utils/BufferUtils.java
@@ -136,10 +136,4 @@ public static int getNumberOfBins(long numElements, int maxElementsPerBin) {
}
return Math.toIntExact(n);
}
-
- public static void clearBuffers(ByteBuffer[] byteBuffers) {
- for (ByteBuffer buffer : byteBuffers) {
- buffer.clear();
- }
- }
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
index 8f89be3c118..4bd170df8e8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
@@ -146,22 +146,6 @@ public synchronized Map getMetadata() {
return Collections.unmodifiableMap(this.metadata);
}
- /**
- * Returns value of a key.
- */
- public synchronized String getValue(String key) {
- return metadata.get(key);
- }
-
- /**
- * Deletes a metadata entry from the map.
- *
- * @param key - Key
- */
- public synchronized void deleteKey(String key) {
- metadata.remove(key);
- }
-
@SuppressWarnings("unchecked")
private List castChunkList() {
return (List)chunkList;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java
index 6f31ee40c4b..fdf40af9e09 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java
@@ -48,10 +48,6 @@ public ChunkInfoList(List chunks) {
this.chunks = Collections.unmodifiableList(chunks);
}
- public List asList() {
- return chunks;
- }
-
/**
* @return A new {@link ChunkInfoList} created from protobuf data.
*/
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java
index f2bad543a9e..7dfcf3eb8c8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java
@@ -282,7 +282,7 @@ public void run() {
try {
// ignore return value, just used for wait
- boolean b = semaphore.tryAcquire(sleepTime, TimeUnit.MILLISECONDS);
+ boolean ignored = semaphore.tryAcquire(sleepTime, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
LOG.warn("Lease manager is interrupted. Shutting down...", e);
Thread.currentThread().interrupt();
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java
deleted file mode 100644
index 2740c177901..00000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java
+++ /dev/null
@@ -1,172 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lock;
-
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-/**
- * Lock implementation which also maintains counter.
- */
-public final class ActiveLock {
-
- private ReentrantReadWriteLock lock;
- private AtomicInteger count;
-
- /**
- * Use ActiveLock#newInstance to create instance.
- *
- * @param fairness - if true the lock uses a fair ordering policy, else
- * non-fair ordering.
- */
- private ActiveLock(boolean fairness) {
- this.lock = new ReentrantReadWriteLock(fairness);
- this.count = new AtomicInteger(0);
- }
-
- /**
- * Creates a new instance of ActiveLock.
- *
- * @return new ActiveLock
- */
- public static ActiveLock newInstance(boolean fairness) {
- return new ActiveLock(fairness);
- }
-
- /**
- * Acquires read lock.
- *
- *
Acquires the read lock if the write lock is not held by
- * another thread and returns immediately.
- *
- *
If the write lock is held by another thread then
- * the current thread becomes disabled for thread scheduling
- * purposes and lies dormant until the read lock has been acquired.
- */
- void readLock() {
- lock.readLock().lock();
- }
-
- /**
- * Attempts to release the read lock.
- *
- *
If the number of readers is now zero then the lock
- * is made available for write lock attempts.
- */
- void readUnlock() {
- lock.readLock().unlock();
- }
-
- /**
- * Acquires write lock.
- *
- *
Acquires the write lock if neither the read nor write lock
- * are held by another thread
- * and returns immediately, setting the write lock hold count to
- * one.
- *
- *
If the current thread already holds the write lock then the
- * hold count is incremented by one and the method returns
- * immediately.
- *
- *
If the lock is held by another thread then the current
- * thread becomes disabled for thread scheduling purposes and
- * lies dormant until the write lock has been acquired.
- */
- void writeLock() {
- lock.writeLock().lock();
- }
-
- /**
- * Attempts to release the write lock.
- *
- *
If the current thread is the holder of this lock then
- * the hold count is decremented. If the hold count is now
- * zero then the lock is released.
- */
- void writeUnlock() {
- lock.writeLock().unlock();
- }
-
- /**
- * Increment the active count of the lock.
- */
- void incrementActiveCount() {
- count.incrementAndGet();
- }
-
- /**
- * Decrement the active count of the lock.
- */
- void decrementActiveCount() {
- count.decrementAndGet();
- }
-
- /**
- * Returns the active count on the lock.
- *
- * @return Number of active leases on the lock.
- */
- int getActiveLockCount() {
- return count.get();
- }
-
- /**
- * Returns the number of reentrant read holds on this lock by the current
- * thread.
- *
- * @return the number of holds on the read lock by the current thread,
- * or zero if the read lock is not held by the current thread
- */
- int getReadHoldCount() {
- return lock.getReadHoldCount();
- }
-
- /**
- * Returns the number of reentrant write holds on this lock by the current
- * thread.
- *
- * @return the number of holds on the write lock by the current thread,
- * or zero if the write lock is not held by the current thread
- */
- int getWriteHoldCount() {
- return lock.getWriteHoldCount();
- }
-
- /**
- * Queries if the write lock is held by the current thread.
- *
- * @return {@code true} if the current thread holds the write lock and
- * {@code false} otherwise
- */
- boolean isWriteLockedByCurrentThread() {
- return lock.isWriteLockedByCurrentThread();
- }
-
- /**
- * Resets the active count on the lock.
- */
- void resetCounter() {
- count.set(0);
- }
-
- @Override
- public String toString() {
- return lock.toString();
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java
deleted file mode 100644
index 1cbe758736a..00000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java
+++ /dev/null
@@ -1,284 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lock;
-
-import org.apache.commons.pool2.impl.GenericObjectPool;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.function.Consumer;
-
-/**
- * Manages the locks on a given resource. A new lock is created for each
- * and every unique resource. Uniqueness of resource depends on the
- * {@code equals} implementation of it.
- */
-public class LockManager {
-
- private static final Logger LOG = LoggerFactory.getLogger(LockManager.class);
-
- private final Map activeLocks = new ConcurrentHashMap<>();
- private final GenericObjectPool lockPool;
-
- /**
- * Creates new LockManager instance with the given Configuration.and uses
- * non-fair mode for locks.
- *
- * @param conf Configuration object
- */
- public LockManager(final ConfigurationSource conf) {
- this(conf, false);
- }
-
-
- /**
- * Creates new LockManager instance with the given Configuration.
- *
- * @param conf Configuration object
- * @param fair - true to use fair lock ordering, else non-fair lock ordering.
- */
- public LockManager(final ConfigurationSource conf, boolean fair) {
- lockPool =
- new GenericObjectPool<>(new PooledLockFactory(fair));
- lockPool.setMaxTotal(-1);
- }
-
- /**
- * Acquires the lock on given resource.
- *
- * If the lock is not available then the current thread becomes
- * disabled for thread scheduling purposes and lies dormant until the
- * lock has been acquired.
- *
- * @param resource on which the lock has to be acquired
- * @deprecated Use {@link LockManager#writeLock} instead
- */
- public void lock(final R resource) {
- writeLock(resource);
- }
-
- /**
- * Releases the lock on given resource.
- *
- * @param resource for which the lock has to be released
- * @deprecated Use {@link LockManager#writeUnlock} instead
- */
- public void unlock(final R resource) {
- writeUnlock(resource);
- }
-
- /**
- * Acquires the read lock on given resource.
- *
- *
Acquires the read lock on resource if the write lock is not held by
- * another thread and returns immediately.
- *
- *
If the write lock on resource is held by another thread then
- * the current thread becomes disabled for thread scheduling
- * purposes and lies dormant until the read lock has been acquired.
- *
- * @param resource on which the read lock has to be acquired
- */
- public void readLock(final R resource) {
- acquire(resource, ActiveLock::readLock);
- }
-
- /**
- * Releases the read lock on given resource.
- *
- * @param resource for which the read lock has to be released
- * @throws IllegalMonitorStateException if the current thread does not
- * hold this lock
- */
- public void readUnlock(final R resource) throws IllegalMonitorStateException {
- release(resource, ActiveLock::readUnlock);
- }
-
- /**
- * Acquires the write lock on given resource.
- *
- *
Acquires the write lock on resource if neither the read nor write lock
- * are held by another thread and returns immediately.
- *
- *
If the current thread already holds the write lock then the
- * hold count is incremented by one and the method returns
- * immediately.
- *
- *
If the lock is held by another thread then the current
- * thread becomes disabled for thread scheduling purposes and
- * lies dormant until the write lock has been acquired.
- *
- * @param resource on which the lock has to be acquired
- */
- public void writeLock(final R resource) {
- acquire(resource, ActiveLock::writeLock);
- }
-
- /**
- * Releases the write lock on given resource.
- *
- * @param resource for which the lock has to be released
- * @throws IllegalMonitorStateException if the current thread does not
- * hold this lock
- */
- public void writeUnlock(final R resource)
- throws IllegalMonitorStateException {
- release(resource, ActiveLock::writeUnlock);
- }
-
- /**
- * Acquires the lock on given resource using the provided lock function.
- *
- * @param resource on which the lock has to be acquired
- * @param lockFn function to acquire the lock
- */
- private void acquire(final R resource, final Consumer lockFn) {
- lockFn.accept(getLockForLocking(resource));
- }
-
- /**
- * Releases the lock on given resource using the provided release function.
- *
- * @param resource for which the lock has to be released
- * @param releaseFn function to release the lock
- */
- private void release(final R resource, final Consumer releaseFn) {
- final ActiveLock lock = getLockForReleasing(resource);
- releaseFn.accept(lock);
- decrementActiveLockCount(resource);
- }
-
- /**
- * Returns {@link ActiveLock} instance for the given resource,
- * on which the lock can be acquired.
- *
- * @param resource on which the lock has to be acquired
- * @return {@link ActiveLock} instance
- */
- private ActiveLock getLockForLocking(final R resource) {
- /*
- * While getting a lock object for locking we should
- * atomically increment the active count of the lock.
- *
- * This is to avoid cases where the selected lock could
- * be removed from the activeLocks map and returned to
- * the object pool.
- */
- return activeLocks.compute(resource, (k, v) -> {
- final ActiveLock lock;
- try {
- if (v == null) {
- lock = lockPool.borrowObject();
- } else {
- lock = v;
- }
- lock.incrementActiveCount();
- } catch (Exception ex) {
- LOG.error("Unable to obtain lock.", ex);
- throw new RuntimeException(ex);
- }
- return lock;
- });
- }
-
- /**
- * Returns {@link ActiveLock} instance for the given resource,
- * for which the lock has to be released.
- *
- * @param resource for which the lock has to be released
- * @return {@link ActiveLock} instance
- */
- private ActiveLock getLockForReleasing(final R resource) {
- if (activeLocks.containsKey(resource)) {
- return activeLocks.get(resource);
- }
- // Someone is releasing a lock which was never acquired.
- LOG.error("Trying to release the lock on {}, which was never acquired.",
- resource);
- throw new IllegalMonitorStateException("Releasing lock on resource "
- + resource + " without acquiring lock");
- }
-
- /**
- * Decrements the active lock count and returns the {@link ActiveLock}
- * object to pool if the active count is 0.
- *
- * @param resource resource to which the ActiveLock is associated
- */
- private void decrementActiveLockCount(final R resource) {
- activeLocks.computeIfPresent(resource, (k, v) -> {
- v.decrementActiveCount();
- if (v.getActiveLockCount() != 0) {
- return v;
- }
- lockPool.returnObject(v);
- return null;
- });
- }
-
- /**
- * Returns the number of reentrant read holds on this lock by the current
- * thread on a given resource.
- *
- * @param resource for which the read lock hold count has to be returned
- * @return the number of holds on the read lock by the current thread,
- * or zero if the read lock is not held by the current thread
- */
- public int getReadHoldCount(final R resource) {
- ActiveLock activeLock = activeLocks.get(resource);
- if (activeLock != null) {
- return activeLock.getReadHoldCount();
- }
- return 0;
- }
-
- /**
- * Returns the number of reentrant write holds on this lock by the current
- * thread on a given resource.
- *
- * @param resource for which the write lock hold count has to be returned
- * @return the number of holds on the write lock by the current thread,
- * or zero if the write lock is not held by the current thread
- */
- public int getWriteHoldCount(final R resource) {
- ActiveLock activeLock = activeLocks.get(resource);
- if (activeLock != null) {
- return activeLock.getWriteHoldCount();
- }
- return 0;
- }
-
- /**
- * Queries if the write lock is held by the current thread on a given
- * resource.
- *
- * @param resource for which the query has to be returned
- * @return {@code true} if the current thread holds the write lock and
- * {@code false} otherwise
- */
- public boolean isWriteLockedByCurrentThread(final R resource) {
- ActiveLock activeLock = activeLocks.get(resource);
- if (activeLock != null) {
- return activeLock.isWriteLockedByCurrentThread();
- }
- return false;
- }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java
deleted file mode 100644
index 1e3ba05a3a2..00000000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lock;
-
-import org.apache.commons.pool2.BasePooledObjectFactory;
-import org.apache.commons.pool2.PooledObject;
-import org.apache.commons.pool2.impl.DefaultPooledObject;
-
-/**
- * Pool factory to create {@code ActiveLock} instances.
- */
-public class PooledLockFactory extends BasePooledObjectFactory {
-
- private boolean fairness;
-
- PooledLockFactory(boolean fair) {
- this.fairness = fair;
- }
- @Override
- public ActiveLock create() throws Exception {
- return ActiveLock.newInstance(fairness);
- }
-
- @Override
- public PooledObject wrap(ActiveLock activeLock) {
- return new DefaultPooledObject<>(activeLock);
- }
-
- @Override
- public void activateObject(PooledObject pooledObject) {
- pooledObject.getObject().resetCounter();
- }
-}
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 048af241a33..47067de5fed 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1298,15 +1298,6 @@
see ozone.scm.heartbeat.thread.interval before changing this value.
-
- ozone.trace.enabled
- false
- OZONE, DEBUG
-
- Setting this flag to true dumps the HTTP request/ response in
- the logs. Very useful when debugging REST protocol.
-
-
ozone.key.preallocation.max.blocks
@@ -3440,6 +3431,14 @@
Timeout for the request submitted directly to Ratis in datanode.
+
+ hdds.datanode.slow.op.warning.threshold
+ OZONE, DATANODE, PERFORMANCE
+ 500ms
+
+ Thresholds for printing slow-operation audit logs.
+
+
ozone.om.keyname.character.check.enabled
OZONE, OM
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
index fa5e9e60abe..44f08176267 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
@@ -43,7 +43,6 @@
public class TestReplicationConfig {
private static final int MB = 1024 * 1024;
- private static final int KB = 1024;
//NOTE: if a new chunkSize is used/added in the parameters other than KB or MB
// please revisit the method createECDescriptor, to handle the new chunkSize.
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageSource.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageSource.java
index 26c861dc68b..76b6a0db89b 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageSource.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageSource.java
@@ -22,10 +22,6 @@
*/
public final class MockSpaceUsageSource {
- public static SpaceUsageSource zero() {
- return fixed(0, 0);
- }
-
public static SpaceUsageSource unlimited() {
return fixed(Long.MAX_VALUE, Long.MAX_VALUE);
}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java
index 9c701ca1fc7..674c1233dee 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java
@@ -20,6 +20,7 @@
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckParams.Builder;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import org.mockito.stubbing.Answer;
import java.io.File;
@@ -30,11 +31,10 @@
import java.util.concurrent.atomic.AtomicLong;
import static org.apache.hadoop.hdds.fs.MockSpaceUsageCheckParams.newBuilder;
-import static org.apache.ozone.test.GenericTestUtils.getTestDir;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyLong;
+import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@@ -44,8 +44,8 @@
*/
public class TestCachingSpaceUsageSource {
- private static final File DIR =
- getTestDir(TestCachingSpaceUsageSource.class.getSimpleName());
+ @TempDir
+ private static File dir;
@Test
public void providesInitialValueUntilStarted() {
@@ -156,7 +156,7 @@ private static Builder paramsBuilder(AtomicLong savedValue) {
}
private static Builder paramsBuilder() {
- return newBuilder(DIR)
+ return newBuilder(dir)
.withSource(MockSpaceUsageSource.fixed(10000, 1000))
.withRefresh(Duration.ofMinutes(5));
}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java
index a87f3fad25e..8363f8b41b6 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java
@@ -17,16 +17,13 @@
*/
package org.apache.hadoop.hdds.fs;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.util.Shell;
-import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import static org.apache.hadoop.ozone.OzoneConsts.KB;
-import static org.apache.ozone.test.GenericTestUtils.getTestDir;
import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assumptions.assumeFalse;
import static org.junit.jupiter.api.Assumptions.assumeTrue;
@@ -40,18 +37,12 @@
*/
public class TestDU {
- private static final File DIR = getTestDir(TestDU.class.getSimpleName());
+ @TempDir
+ private File dir;
@BeforeEach
void setUp() {
assumeFalse(Shell.WINDOWS);
- FileUtil.fullyDelete(DIR);
- assertTrue(DIR.mkdirs());
- }
-
- @AfterEach
- void tearDown() throws IOException {
- FileUtil.fullyDelete(DIR);
}
static void createFile(File newFile, int size) throws IOException {
@@ -80,7 +71,7 @@ static void createFile(File newFile, int size) throws IOException {
@Test
void testGetUsed() throws Exception {
final long writtenSize = 32 * KB;
- File file = new File(DIR, "data");
+ File file = new File(dir, "data");
createFile(file, (int) writtenSize);
SpaceUsageSource du = new DU(file);
@@ -91,9 +82,9 @@ void testGetUsed() throws Exception {
@Test
void testExcludePattern() throws IOException {
- createFile(new File(DIR, "include.txt"), (int) (4 * KB));
- createFile(new File(DIR, "exclude.tmp"), (int) (100 * KB));
- SpaceUsageSource du = new DU(DIR, "*.tmp");
+ createFile(new File(dir, "include.txt"), (int) (4 * KB));
+ createFile(new File(dir, "exclude.tmp"), (int) (100 * KB));
+ SpaceUsageSource du = new DU(dir, "*.tmp");
long usedSpace = du.getUsedSpace();
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java
index 6e603f8ff0b..4e8379c9498 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java
@@ -22,8 +22,8 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
-import static org.apache.ozone.test.GenericTestUtils.getTestDir;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertSame;
@@ -38,8 +38,7 @@ public void testCreateViaConfig() {
}
@Test
- public void testParams() {
- File dir = getTestDir(getClass().getSimpleName());
+ public void testParams(@TempDir File dir) {
Duration refresh = Duration.ofHours(1);
OzoneConfiguration conf = new OzoneConfiguration();
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java
index 85b21df86b9..04cfd420317 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java
@@ -17,45 +17,30 @@
*/
package org.apache.hadoop.hdds.fs;
-import org.apache.hadoop.fs.FileUtil;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.IOException;
import static org.apache.hadoop.hdds.fs.TestDU.createFile;
-import static org.apache.ozone.test.GenericTestUtils.getTestDir;
import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests for {@link DedicatedDiskSpaceUsage}.
*/
class TestDedicatedDiskSpaceUsage {
- private static final File DIR =
- getTestDir(TestDedicatedDiskSpaceUsage.class.getSimpleName());
+ @TempDir
+ private File dir;
private static final int FILE_SIZE = 1024;
- @BeforeEach
- void setUp() {
- FileUtil.fullyDelete(DIR);
- assertTrue(DIR.mkdirs());
- }
-
- @AfterEach
- void tearDown() {
- FileUtil.fullyDelete(DIR);
- }
-
@Test
void testGetUsed() throws IOException {
- File file = new File(DIR, "data");
+ File file = new File(dir, "data");
createFile(file, FILE_SIZE);
- SpaceUsageSource subject = new DedicatedDiskSpaceUsage(DIR);
+ SpaceUsageSource subject = new DedicatedDiskSpaceUsage(dir);
// condition comes from TestDFCachingGetSpaceUsed in Hadoop Common
assertThat(subject.getUsedSpace()).isGreaterThanOrEqualTo(FILE_SIZE - 20);
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java
index 0142ee56af7..8391976da09 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java
@@ -23,8 +23,9 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import static org.apache.hadoop.hdds.fs.DedicatedDiskSpaceUsageFactory.Conf.configKeyForRefreshPeriod;
-import static org.apache.ozone.test.GenericTestUtils.getTestDir;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
+
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertSame;
@@ -40,10 +41,9 @@ public void testCreateViaConfig() {
}
@Test
- public void testParams() {
+ public void testParams(@TempDir File dir) {
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(configKeyForRefreshPeriod(), "2m");
- File dir = getTestDir(getClass().getSimpleName());
SpaceUsageCheckParams params = new DedicatedDiskSpaceUsageFactory()
.setConfiguration(conf)
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSaveSpaceUsageToFile.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSaveSpaceUsageToFile.java
index f35e6975082..6a901d6cbc4 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSaveSpaceUsageToFile.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSaveSpaceUsageToFile.java
@@ -18,10 +18,9 @@
package org.apache.hadoop.hdds.fs;
import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.fs.FileUtil;
-import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.IOException;
@@ -30,7 +29,6 @@
import java.time.Instant;
import java.util.OptionalLong;
-import static org.apache.ozone.test.GenericTestUtils.getTestDir;
import static org.apache.ozone.test.GenericTestUtils.waitFor;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
@@ -41,8 +39,8 @@
*/
public class TestSaveSpaceUsageToFile {
- private static final File DIR =
- getTestDir(TestSaveSpaceUsageToFile.class.getSimpleName());
+ @TempDir
+ private File dir;
private static final Duration LONG_EXPIRY = Duration.ofMinutes(15);
@@ -53,14 +51,7 @@ public class TestSaveSpaceUsageToFile {
@BeforeEach
public void setup() {
- FileUtil.fullyDelete(DIR);
- assertTrue(DIR.mkdirs());
- file = new File(DIR, "space_usage.txt");
- }
-
- @AfterEach
- public void cleanup() {
- FileUtil.fullyDelete(DIR);
+ file = new File(dir, "space_usage.txt");
}
@Test
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java
index 4030f6e46d4..b05deaa0d66 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java
@@ -29,7 +29,6 @@
import static org.apache.hadoop.ozone.ClientVersion.VERSION_HANDLES_UNKNOWN_DN_PORTS;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* Test for {@link DatanodeDetails}.
@@ -50,14 +49,10 @@ void protoIncludesNewPortsOnlyForV1() {
}
public static void assertPorts(HddsProtos.DatanodeDetailsProto dn,
- Set expectedPorts) {
+ Set expectedPorts) throws IllegalArgumentException {
assertEquals(expectedPorts.size(), dn.getPortsCount());
for (HddsProtos.Port port : dn.getPortsList()) {
- try {
- assertThat(expectedPorts).contains(Port.Name.valueOf(port.getName()));
- } catch (IllegalArgumentException e) {
- fail("Unknown port: " + port.getName());
- }
+ assertThat(expectedPorts).contains(Port.Name.valueOf(port.getName()));
}
}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java
index 3bf2ef40231..f022a6030c0 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java
@@ -112,6 +112,7 @@ void testJsonOutput() throws IOException {
assertEquals(0, stats.get("EMPTY").longValue());
assertEquals(0, stats.get("OPEN_UNHEALTHY").longValue());
assertEquals(0, stats.get("QUASI_CLOSED_STUCK").longValue());
+ assertEquals(0, stats.get("OPEN_WITHOUT_PIPELINE").longValue());
JsonNode samples = json.get("samples");
assertEquals(ARRAY, samples.get("UNDER_REPLICATED").getNodeType());
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
index 67b210a05eb..f737ec23a0c 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
@@ -52,7 +52,7 @@
import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assumptions.assumeTrue;
import static org.junit.jupiter.params.provider.Arguments.arguments;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.reset;
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestTracingUtil.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestTracingUtil.java
index 8d5c56fc0d5..39884fcd5a9 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestTracingUtil.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestTracingUtil.java
@@ -51,8 +51,7 @@ public void testInitTracing() {
Configuration config = Configuration.fromEnv("testInitTracing");
JaegerTracer tracer = config.getTracerBuilder().build();
GlobalTracer.registerIfAbsent(tracer);
- try (AutoCloseable scope = TracingUtil
- .createActivatedSpan("initTracing")) {
+ try (AutoCloseable ignored = TracingUtil.createActivatedSpan("initTracing")) {
exportCurrentSpan();
} catch (Exception e) {
fail("Should not get exception");
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSLayoutVersionManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSLayoutVersionManager.java
index 253cf7dfe47..c73dfdea03a 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSLayoutVersionManager.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSLayoutVersionManager.java
@@ -26,7 +26,7 @@
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.doCallRealMethod;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/NativeCheckSumCRC32.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/NativeCheckSumCRC32.java
similarity index 100%
rename from hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/NativeCheckSumCRC32.java
rename to hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/NativeCheckSumCRC32.java
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java
index 5b88f5cb300..9567fa2c281 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java
@@ -22,7 +22,7 @@
import org.junit.jupiter.api.Test;
import java.nio.charset.StandardCharsets;
-import java.util.Random;
+import org.apache.commons.lang3.RandomUtils;
import java.util.zip.Checksum;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -59,11 +59,9 @@ void testCorrectness() {
checkBytes("hello world!".getBytes(StandardCharsets.UTF_8));
- final Random random = new Random();
- final byte[] bytes = new byte[1 << 10];
+ final int len = 1 << 10;
for (int i = 0; i < 1000; i++) {
- random.nextBytes(bytes);
- checkBytes(bytes, random.nextInt(bytes.length));
+ checkBytes(RandomUtils.nextBytes(len), RandomUtils.nextInt(0, len));
}
}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java
index 414754092f9..3d6d38f3d3b 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java
@@ -37,7 +37,6 @@
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* Test {@link ChunkBuffer} implementations.
@@ -49,7 +48,7 @@ private static int nextInt(int n) {
@Test
@Timeout(1)
- public void testImplWithByteBuffer() {
+ void testImplWithByteBuffer() throws IOException {
runTestImplWithByteBuffer(1);
runTestImplWithByteBuffer(1 << 10);
for (int i = 0; i < 10; i++) {
@@ -57,7 +56,7 @@ public void testImplWithByteBuffer() {
}
}
- private static void runTestImplWithByteBuffer(int n) {
+ private static void runTestImplWithByteBuffer(int n) throws IOException {
final byte[] expected = new byte[n];
ThreadLocalRandom.current().nextBytes(expected);
runTestImpl(expected, 0, ChunkBuffer.allocate(n));
@@ -65,7 +64,7 @@ private static void runTestImplWithByteBuffer(int n) {
@Test
@Timeout(1)
- public void testIncrementalChunkBuffer() {
+ void testIncrementalChunkBuffer() throws IOException {
runTestIncrementalChunkBuffer(1, 1);
runTestIncrementalChunkBuffer(4, 8);
runTestIncrementalChunkBuffer(16, 1 << 10);
@@ -76,7 +75,7 @@ public void testIncrementalChunkBuffer() {
}
}
- private static void runTestIncrementalChunkBuffer(int increment, int n) {
+ private static void runTestIncrementalChunkBuffer(int increment, int n) throws IOException {
final byte[] expected = new byte[n];
ThreadLocalRandom.current().nextBytes(expected);
runTestImpl(expected, increment,
@@ -85,7 +84,7 @@ private static void runTestIncrementalChunkBuffer(int increment, int n) {
@Test
@Timeout(1)
- public void testImplWithList() {
+ void testImplWithList() throws IOException {
runTestImplWithList(4, 8);
runTestImplWithList(16, 1 << 10);
for (int i = 0; i < 10; i++) {
@@ -95,7 +94,7 @@ public void testImplWithList() {
}
}
- private static void runTestImplWithList(int count, int n) {
+ private static void runTestImplWithList(int count, int n) throws IOException {
final byte[] expected = new byte[n];
ThreadLocalRandom.current().nextBytes(expected);
@@ -117,7 +116,7 @@ private static void runTestImplWithList(int count, int n) {
runTestImpl(expected, -1, impl);
}
- private static void runTestImpl(byte[] expected, int bpc, ChunkBuffer impl) {
+ private static void runTestImpl(byte[] expected, int bpc, ChunkBuffer impl) throws IOException {
final int n = expected.length;
System.out.println("n=" + n + ", impl=" + impl);
@@ -207,33 +206,13 @@ private static void assertToByteString(
"offset=" + offset + ", length=" + length);
}
- private static void assertWrite(byte[] expected, ChunkBuffer impl) {
+ private static void assertWrite(byte[] expected, ChunkBuffer impl) throws IOException {
impl.rewind();
assertEquals(0, impl.position());
ByteArrayOutputStream output = new ByteArrayOutputStream(expected.length);
-
- try {
- impl.writeTo(new MockGatheringChannel(Channels.newChannel(output)));
- } catch (IOException e) {
- fail("Unexpected error: " + e);
- }
-
+ impl.writeTo(new MockGatheringChannel(Channels.newChannel(output)));
assertArrayEquals(expected, output.toByteArray());
assertFalse(impl.hasRemaining());
}
-
- private static String toString(byte[] arr) {
- if (arr == null || arr.length == 0) {
- return "";
- }
-
- StringBuilder sb = new StringBuilder();
- for (byte b : arr) {
- sb.append(Character.forDigit((b >> 4) & 0xF, 16))
- .append(Character.forDigit((b & 0xF), 16))
- .append(" ");
- }
- return sb.deleteCharAt(sb.length() - 1).toString();
- }
}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
index c30f788397a..92754c9fa69 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
@@ -442,11 +442,9 @@ public static Builder newGetBlockRequestBuilder(
/**
* Verify the response against the request.
*
- * @param request - Request
* @param response - Response
*/
- public static void verifyGetBlock(ContainerCommandRequestProto request,
- ContainerCommandResponseProto response, int expectedChunksCount) {
+ public static void verifyGetBlock(ContainerCommandResponseProto response, int expectedChunksCount) {
assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
assertEquals(expectedChunksCount,
response.getGetBlock().getBlockData().getChunksCount());
@@ -495,23 +493,6 @@ public static ContainerCommandRequestProto getCloseContainer(
Pipeline pipeline, long containerID) throws IOException {
return getCloseContainer(pipeline, containerID, null);
}
- /**
- * Returns a simple request without traceId.
- * @param pipeline - pipeline
- * @param containerID - ID of the container.
- * @return ContainerCommandRequestProto without traceId.
- */
- public static ContainerCommandRequestProto getRequestWithoutTraceId(
- Pipeline pipeline, long containerID) throws IOException {
- Preconditions.checkNotNull(pipeline);
- return ContainerCommandRequestProto.newBuilder()
- .setCmdType(ContainerProtos.Type.CloseContainer)
- .setContainerID(containerID)
- .setCloseContainer(
- ContainerProtos.CloseContainerRequestProto.getDefaultInstance())
- .setDatanodeUuid(pipeline.getFirstNode().getUuidString())
- .build();
- }
/**
* Returns a delete container request.
@@ -535,14 +516,6 @@ public static ContainerCommandRequestProto getDeleteContainer(
.build();
}
- private static void sleep(long milliseconds) {
- try {
- Thread.sleep(milliseconds);
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- }
- }
-
public static BlockID getTestBlockID(long containerID) {
return new BlockID(containerID, UniqueId.next());
}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java
deleted file mode 100644
index 62b8e6ac50b..00000000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java
+++ /dev/null
@@ -1,208 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lock;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.ozone.test.GenericTestUtils;
-import org.apache.hadoop.util.Daemon;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.Timeout;
-
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-/**
- * Test-cases to test LockManager.
- */
-class TestLockManager {
-
- @Test
- @Timeout(1)
- void testWriteLockWithDifferentResource() {
- final LockManager manager =
- new LockManager<>(new OzoneConfiguration());
- manager.writeLock("/resourceOne");
- // This should work, as they are different resource.
- assertDoesNotThrow(() -> manager.writeLock("/resourceTwo"));
- manager.writeUnlock("/resourceOne");
- manager.writeUnlock("/resourceTwo");
- }
-
- @Test
- void testWriteLockWithSameResource() throws Exception {
- final LockManager manager =
- new LockManager<>(new OzoneConfiguration());
- final AtomicBoolean gotLock = new AtomicBoolean(false);
- manager.writeLock("/resourceOne");
- new Thread(() -> {
- manager.writeLock("/resourceOne");
- gotLock.set(true);
- manager.writeUnlock("/resourceOne");
- }).start();
- // Let's give some time for the other thread to run
- Thread.sleep(100);
- // Since the other thread is trying to get write lock on same object,
- // it will wait.
- assertFalse(gotLock.get());
- manager.writeUnlock("/resourceOne");
- // Since we have released the write lock, the other thread should have
- // the lock now
- // Let's give some time for the other thread to run
- Thread.sleep(100);
- assertTrue(gotLock.get());
- }
-
- @Test
- @Timeout(1)
- void testReadLockWithDifferentResource() {
- final LockManager manager =
- new LockManager<>(new OzoneConfiguration());
- manager.readLock("/resourceOne");
- assertDoesNotThrow(() -> manager.readLock("/resourceTwo"));
- manager.readUnlock("/resourceOne");
- manager.readUnlock("/resourceTwo");
- }
-
- @Test
- void testReadLockWithSameResource() throws Exception {
- final LockManager manager =
- new LockManager<>(new OzoneConfiguration());
- final AtomicBoolean gotLock = new AtomicBoolean(false);
- manager.readLock("/resourceOne");
- new Thread(() -> {
- manager.readLock("/resourceOne");
- gotLock.set(true);
- manager.readUnlock("/resourceOne");
- }).start();
- // Let's give some time for the other thread to run
- Thread.sleep(100);
- // Since the new thread is trying to get read lock, it should work.
- assertTrue(gotLock.get());
- manager.readUnlock("/resourceOne");
- }
-
- @Test
- void testWriteReadLockWithSameResource() throws Exception {
- final LockManager manager =
- new LockManager<>(new OzoneConfiguration());
- final AtomicBoolean gotLock = new AtomicBoolean(false);
- manager.writeLock("/resourceOne");
- new Thread(() -> {
- manager.readLock("/resourceOne");
- gotLock.set(true);
- manager.readUnlock("/resourceOne");
- }).start();
- // Let's give some time for the other thread to run
- Thread.sleep(100);
- // Since the other thread is trying to get read lock on same object,
- // it will wait.
- assertFalse(gotLock.get());
- manager.writeUnlock("/resourceOne");
- // Since we have released the write lock, the other thread should have
- // the lock now
- // Let's give some time for the other thread to run
- Thread.sleep(100);
- assertTrue(gotLock.get());
- }
-
- @Test
- void testReadWriteLockWithSameResource() throws Exception {
- final LockManager manager =
- new LockManager<>(new OzoneConfiguration());
- final AtomicBoolean gotLock = new AtomicBoolean(false);
- manager.readLock("/resourceOne");
- new Thread(() -> {
- manager.writeLock("/resourceOne");
- gotLock.set(true);
- manager.writeUnlock("/resourceOne");
- }).start();
- // Let's give some time for the other thread to run
- Thread.sleep(100);
- // Since the other thread is trying to get write lock on same object,
- // it will wait.
- assertFalse(gotLock.get());
- manager.readUnlock("/resourceOne");
- // Since we have released the read lock, the other thread should have
- // the lock now
- // Let's give some time for the other thread to run
- Thread.sleep(100);
- assertTrue(gotLock.get());
- }
-
- @Test
- void testMultiReadWriteLockWithSameResource() throws Exception {
- final LockManager manager =
- new LockManager<>(new OzoneConfiguration());
- final AtomicBoolean gotLock = new AtomicBoolean(false);
- manager.readLock("/resourceOne");
- manager.readLock("/resourceOne");
- new Thread(() -> {
- manager.writeLock("/resourceOne");
- gotLock.set(true);
- manager.writeUnlock("/resourceOne");
- }).start();
- // Let's give some time for the other thread to run
- Thread.sleep(100);
- // Since the other thread is trying to get write lock on same object,
- // it will wait.
- assertFalse(gotLock.get());
- manager.readUnlock("/resourceOne");
- //We have only released one read lock, we still hold another read lock.
- Thread.sleep(100);
- assertFalse(gotLock.get());
- manager.readUnlock("/resourceOne");
- // Since we have released the read lock, the other thread should have
- // the lock now
- // Let's give some time for the other thread to run
- Thread.sleep(100);
- assertTrue(gotLock.get());
- }
-
- @Test
- void testConcurrentWriteLockWithDifferentResource() throws Exception {
- OzoneConfiguration conf = new OzoneConfiguration();
- final int count = 100;
- final LockManager manager = new LockManager<>(conf);
- final int sleep = 10;
- final AtomicInteger done = new AtomicInteger();
- for (int i = 0; i < count; i++) {
- final Integer id = i;
- Daemon d1 = new Daemon(() -> {
- try {
- manager.writeLock(id);
- Thread.sleep(sleep);
- } catch (InterruptedException e) {
- e.printStackTrace();
- } finally {
- manager.writeUnlock(id);
- }
- done.getAndIncrement();
- });
- d1.setName("Locker-" + i);
- d1.start();
- }
- GenericTestUtils.waitFor(() -> done.get() == count, 100,
- 10 * count * sleep);
- assertEquals(count, done.get());
- }
-}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/package-info.java
deleted file mode 100644
index cf4eb657f29..00000000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.lock;
-/*
- This package contains the lock related test classes.
- */
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestLayoutVersionInstanceFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestLayoutVersionInstanceFactory.java
index a40eecc62b7..028a82b4fc6 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestLayoutVersionInstanceFactory.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestLayoutVersionInstanceFactory.java
@@ -169,17 +169,12 @@ private LayoutVersionManager getMockLvm(int mlv, int slv) {
* Mock Interface.
*/
interface MockInterface {
- String mockMethod();
}
/**
* Mock Impl v1.
*/
static class MockClassV1 implements MockInterface {
- @Override
- public String mockMethod() {
- return getClass().getSimpleName();
- }
}
/**
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/NativeCRC32Wrapper.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/util/NativeCRC32Wrapper.java
similarity index 100%
rename from hadoop-hdds/common/src/main/java/org/apache/hadoop/util/NativeCRC32Wrapper.java
rename to hadoop-hdds/common/src/test/java/org/apache/hadoop/util/NativeCRC32Wrapper.java
diff --git a/hadoop-hdds/config/pom.xml b/hadoop-hdds/config/pom.xml
index e372c4b3558..fb72f93570b 100644
--- a/hadoop-hdds/config/pom.xml
+++ b/hadoop-hdds/config/pom.xml
@@ -29,7 +29,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
jar
- false
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationTarget.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationTarget.java
index 7ac5d885e39..d4bfb360b9c 100644
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationTarget.java
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationTarget.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hdds.conf;
-import java.time.temporal.TemporalUnit;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hdds.conf.TimeDurationUtil.ParsedTimeDuration;
@@ -53,10 +52,6 @@ default void setTimeDuration(String name, long value, TimeUnit unit) {
set(name, value + ParsedTimeDuration.unitFor(unit).suffix());
}
- default void setTimeDuration(String name, long value, TemporalUnit unit) {
- set(name, value + ParsedTimeDuration.unitFor(unit).suffix());
- }
-
default void setStorageSize(String name, long value, StorageUnit unit) {
set(name, value + unit.getShortName());
}
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/TimeDurationUtil.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/TimeDurationUtil.java
index c8775fbf94b..c9b8a1d2c17 100644
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/TimeDurationUtil.java
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/TimeDurationUtil.java
@@ -207,14 +207,5 @@ static ParsedTimeDuration unitFor(TimeUnit unit) {
}
return null;
}
-
- static ParsedTimeDuration unitFor(TemporalUnit unit) {
- for (ParsedTimeDuration ptd : values()) {
- if (ptd.temporalUnit() == unit) {
- return ptd;
- }
- }
- return null;
- }
}
}
diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml
index 058274e5668..13973c871e6 100644
--- a/hadoop-hdds/container-service/pom.xml
+++ b/hadoop-hdds/container-service/pom.xml
@@ -28,7 +28,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
Apache Ozone HDDS Container Service
jar
- false
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index 3cd0477ffd7..f59622cb0fa 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -82,6 +82,7 @@
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_WORKERS;
import static org.apache.hadoop.ozone.conf.OzoneServiceConfig.DEFAULT_SHUTDOWN_HOOK_PRIORITY;
import static org.apache.hadoop.ozone.common.Storage.StorageState.INITIALIZED;
+import static org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig.REPLICATION_STREAMS_LIMIT_KEY;
import static org.apache.hadoop.security.UserGroupInformation.getCurrentUser;
import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.HDDS_DATANODE_BLOCK_DELETE_THREAD_MAX;
import static org.apache.hadoop.util.ExitUtil.terminate;
@@ -291,7 +292,9 @@ public void start() {
.register(HDDS_DATANODE_BLOCK_DELETE_THREAD_MAX,
this::reconfigBlockDeleteThreadMax)
.register(OZONE_BLOCK_DELETING_SERVICE_WORKERS,
- this::reconfigDeletingServiceWorkers);
+ this::reconfigDeletingServiceWorkers)
+ .register(REPLICATION_STREAMS_LIMIT_KEY,
+ this::reconfigReplicationStreamsLimit);
datanodeStateMachine = new DatanodeStateMachine(datanodeDetails, conf,
dnCertClient, secretKeyClient, this::terminateDatanode, dnCRLStore,
@@ -667,4 +670,12 @@ private String reconfigDeletingServiceWorkers(String value) {
.setPoolSize(Integer.parseInt(value));
return value;
}
+
+ private String reconfigReplicationStreamsLimit(String value) {
+ getConf().set(REPLICATION_STREAMS_LIMIT_KEY, value);
+
+ getDatanodeStateMachine().getContainer().getReplicationServer()
+ .setPoolSize(Integer.parseInt(value));
+ return value;
+ }
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
index 94b51223228..3c202ba60a8 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
@@ -41,7 +41,7 @@
import java.util.concurrent.atomic.AtomicLong;
import org.yaml.snakeyaml.Yaml;
-import javax.annotation.Nullable;
+import jakarta.annotation.Nullable;
import static org.apache.hadoop.ozone.OzoneConsts.CHECKSUM;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ID;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index ddb21937710..f20615d23f8 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -75,7 +75,9 @@
import java.util.Optional;
import java.util.Set;
import java.util.TreeMap;
+import java.util.concurrent.TimeUnit;
+import static org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder;
import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.malformedRequest;
import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.unsupportedRequest;
import static org.apache.hadoop.ozone.container.common.interfaces.Container.ScanResult;
@@ -101,6 +103,7 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor {
private String clusterId;
private ContainerMetrics metrics;
private final TokenVerifier tokenVerifier;
+ private long slowOpThresholdMs;
/**
* Constructs an OzoneContainer that receives calls from
@@ -121,6 +124,7 @@ public HddsDispatcher(ConfigurationSource config, ContainerSet contSet,
HddsConfigKeys.HDDS_CONTAINER_CLOSE_THRESHOLD_DEFAULT);
this.tokenVerifier = tokenVerifier != null ? tokenVerifier
: new NoopTokenVerifier();
+ this.slowOpThresholdMs = getSlowOpThresholdMs(conf);
protocolMetrics =
new ProtocolMessageMetrics<>(
@@ -196,6 +200,7 @@ private ContainerCommandResponseProto dispatchRequest(
AuditAction action = getAuditAction(msg.getCmdType());
EventType eventType = getEventType(msg);
Map params = getAuditParams(msg);
+ PerformanceStringBuilder perf = new PerformanceStringBuilder();
ContainerType containerType;
ContainerCommandResponseProto responseProto = null;
@@ -326,10 +331,11 @@ private ContainerCommandResponseProto dispatchRequest(
audit(action, eventType, params, AuditEventStatus.FAILURE, ex);
return ContainerUtils.logAndReturnError(LOG, ex, msg);
}
+ perf.appendPreOpLatencyMs(Time.monotonicNow() - startTime);
responseProto = handler.handle(msg, container, dispatcherContext);
+ long oPLatencyMS = Time.monotonicNow() - startTime;
if (responseProto != null) {
- metrics.incContainerOpsLatencies(cmdType,
- Time.monotonicNow() - startTime);
+ metrics.incContainerOpsLatencies(cmdType, oPLatencyMS);
// If the request is of Write Type and the container operation
// is unsuccessful, it implies the applyTransaction on the container
@@ -402,6 +408,8 @@ private ContainerCommandResponseProto dispatchRequest(
audit(action, eventType, params, AuditEventStatus.FAILURE,
new Exception(responseProto.getMessage()));
}
+ perf.appendOpLatencyMs(oPLatencyMS);
+ performanceAudit(action, params, perf, oPLatencyMS);
return responseProto;
} else {
@@ -412,6 +420,13 @@ private ContainerCommandResponseProto dispatchRequest(
}
}
+ private long getSlowOpThresholdMs(ConfigurationSource config) {
+ return config.getTimeDuration(
+ HddsConfigKeys.HDDS_DATANODE_SLOW_OP_WARNING_THRESHOLD_KEY,
+ HddsConfigKeys.HDDS_DATANODE_SLOW_OP_WARNING_THRESHOLD_DEFAULT,
+ TimeUnit.MILLISECONDS);
+ }
+
private void updateBCSID(Container container,
DispatcherContext dispatcherContext, Type cmdType) {
if (dispatcherContext != null && (cmdType == Type.PutBlock
@@ -682,6 +697,26 @@ private void audit(AuditAction action, EventType eventType,
}
}
+ private void performanceAudit(AuditAction action, Map params,
+ PerformanceStringBuilder performance, long opLatencyMs) {
+ if (isOperationSlow(opLatencyMs)) {
+ AuditMessage msg =
+ buildAuditMessageForPerformance(action, params, performance);
+ AUDIT.logPerformance(msg);
+ }
+ }
+
+ public AuditMessage buildAuditMessageForPerformance(AuditAction op,
+ Map auditMap, PerformanceStringBuilder performance) {
+ return new AuditMessage.Builder()
+ .setUser(null)
+ .atIp(null)
+ .forOperation(op)
+ .withParams(auditMap)
+ .setPerformance(performance)
+ .build();
+ }
+
//TODO: use GRPC to fetch user and ip details
@Override
public AuditMessage buildAuditMessageForSuccess(AuditAction op,
@@ -846,6 +881,8 @@ private static Map getAuditParams(
case ReadChunk:
auditParams.put("blockData",
BlockID.getFromProtobuf(msg.getReadChunk().getBlockID()).toString());
+ auditParams.put("blockDataSize",
+ String.valueOf(msg.getReadChunk().getChunkData().getLen()));
return auditParams;
case DeleteChunk:
@@ -858,6 +895,8 @@ private static Map getAuditParams(
auditParams.put("blockData",
BlockID.getFromProtobuf(msg.getWriteChunk().getBlockID())
.toString());
+ auditParams.put("blockDataSize",
+ String.valueOf(msg.getWriteChunk().getChunkData().getLen()));
return auditParams;
case ListChunk:
@@ -874,6 +913,8 @@ private static Map getAuditParams(
auditParams.put("blockData",
BlockData.getFromProtoBuf(msg.getPutSmallFile()
.getBlock().getBlockData()).toString());
+ auditParams.put("blockDataSize",
+ String.valueOf(msg.getPutSmallFile().getChunkInfo().getLen()));
} catch (IOException ex) {
if (LOG.isTraceEnabled()) {
LOG.trace("Encountered error parsing BlockData from protobuf: "
@@ -911,4 +952,7 @@ private static Map getAuditParams(
}
+ private boolean isOperationSlow(long opLatencyMs) {
+ return opLatencyMs >= slowOpThresholdMs;
+ }
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
index 13f7ad61502..6bbf8e47946 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
@@ -893,18 +893,21 @@ public Map getCommandStatusMap() {
}
/**
- * Updates status of a pending status command.
+ * Updates the command status of a pending command.
* @param cmdId command id
* @param cmdStatusUpdater Consumer to update command status.
- * @return true if command status updated successfully else false.
+ * @return true if command status updated successfully else if the command
+ * associated with the command id does not exist in the context.
*/
public boolean updateCommandStatus(Long cmdId,
Consumer cmdStatusUpdater) {
- if (cmdStatusMap.containsKey(cmdId)) {
- cmdStatusUpdater.accept(cmdStatusMap.get(cmdId));
- return true;
- }
- return false;
+ CommandStatus updatedCommandStatus = cmdStatusMap.computeIfPresent(cmdId,
+ (key, value) -> {
+ cmdStatusUpdater.accept(value);
+ return value;
+ }
+ );
+ return updatedCommandStatus != null;
}
public void configureHeartbeatFrequency() {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index 6d119b17b3b..fcc611ea3f1 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -25,12 +25,11 @@
import java.util.Collection;
import java.util.Collections;
import java.util.List;
-import java.util.Map;
import java.util.Objects;
-import java.util.Set;
import java.util.UUID;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingDeque;
@@ -126,6 +125,27 @@
public final class XceiverServerRatis implements XceiverServerSpi {
private static final Logger LOG = LoggerFactory
.getLogger(XceiverServerRatis.class);
+
+ private static class ActivePipelineContext {
+ /** The current datanode is the current leader of the pipeline. */
+ private final boolean isPipelineLeader;
+ /** The heartbeat containing pipeline close action has been triggered. */
+ private final boolean isPendingClose;
+
+ ActivePipelineContext(boolean isPipelineLeader, boolean isPendingClose) {
+ this.isPipelineLeader = isPipelineLeader;
+ this.isPendingClose = isPendingClose;
+ }
+
+ public boolean isPipelineLeader() {
+ return isPipelineLeader;
+ }
+
+ public boolean isPendingClose() {
+ return isPendingClose;
+ }
+ }
+
private static final AtomicLong CALL_ID_COUNTER = new AtomicLong();
private static final List DEFAULT_PRIORITY_LIST =
new ArrayList<>(
@@ -151,11 +171,8 @@ private static long nextCallId() {
private final ConfigurationSource conf;
// TODO: Remove the gids set when Ratis supports an api to query active
// pipelines
- private final Set raftGids = ConcurrentHashMap.newKeySet();
+ private final ConcurrentMap activePipelines = new ConcurrentHashMap<>();
private final RaftPeerId raftPeerId;
- // pipelines for which I am the leader
- private final Map groupLeaderMap =
- new ConcurrentHashMap<>();
// Timeout used while calling submitRequest directly.
private final long requestTimeout;
private final boolean shouldDeleteRatisLogDirectory;
@@ -731,11 +748,11 @@ private void handlePipelineFailure(RaftGroupId groupId,
}
triggerPipelineClose(groupId, msg,
- ClosePipelineInfo.Reason.PIPELINE_FAILED, false);
+ ClosePipelineInfo.Reason.PIPELINE_FAILED);
}
private void triggerPipelineClose(RaftGroupId groupId, String detail,
- ClosePipelineInfo.Reason reasonCode, boolean triggerHB) {
+ ClosePipelineInfo.Reason reasonCode) {
PipelineID pipelineID = PipelineID.valueOf(groupId.getUuid());
ClosePipelineInfo.Builder closePipelineInfo =
ClosePipelineInfo.newBuilder()
@@ -749,9 +766,12 @@ private void triggerPipelineClose(RaftGroupId groupId, String detail,
.build();
if (context != null) {
context.addPipelineActionIfAbsent(action);
- // wait for the next HB timeout or right away?
- if (triggerHB) {
+ if (!activePipelines.get(groupId).isPendingClose()) {
+ // if pipeline close action has not been triggered before, we need trigger pipeline close immediately to
+ // prevent SCM to allocate blocks on the failed pipeline
context.getParent().triggerHeartbeat();
+ activePipelines.computeIfPresent(groupId,
+ (key, value) -> new ActivePipelineContext(value.isPipelineLeader(), true));
}
}
LOG.error("pipeline Action {} on pipeline {}.Reason : {}",
@@ -761,7 +781,7 @@ private void triggerPipelineClose(RaftGroupId groupId, String detail,
@Override
public boolean isExist(HddsProtos.PipelineID pipelineId) {
- return raftGids.contains(
+ return activePipelines.containsKey(
RaftGroupId.valueOf(PipelineID.getFromProtobuf(pipelineId).getId()));
}
@@ -785,9 +805,11 @@ public List getPipelineReport() {
for (RaftGroupId groupId : gids) {
HddsProtos.PipelineID pipelineID = PipelineID
.valueOf(groupId.getUuid()).getProtobuf();
+ boolean isLeader = activePipelines.getOrDefault(groupId,
+ new ActivePipelineContext(false, false)).isPipelineLeader();
reports.add(PipelineReport.newBuilder()
.setPipelineID(pipelineID)
- .setIsLeader(groupLeaderMap.getOrDefault(groupId, Boolean.FALSE))
+ .setIsLeader(isLeader)
.setBytesWritten(calculatePipelineBytesWritten(pipelineID))
.build());
}
@@ -877,7 +899,7 @@ void handleApplyTransactionFailure(RaftGroupId groupId,
"Ratis Transaction failure in datanode " + dnId + " with role " + role
+ " .Triggering pipeline close action.";
triggerPipelineClose(groupId, msg,
- ClosePipelineInfo.Reason.STATEMACHINE_TRANSACTION_FAILED, true);
+ ClosePipelineInfo.Reason.STATEMACHINE_TRANSACTION_FAILED);
}
/**
* The fact that the snapshot contents cannot be used to actually catch up
@@ -913,7 +935,7 @@ public void handleNodeLogFailure(RaftGroupId groupId, Throwable t) {
: t.getMessage();
triggerPipelineClose(groupId, msg,
- ClosePipelineInfo.Reason.PIPELINE_LOG_FAILED, true);
+ ClosePipelineInfo.Reason.PIPELINE_LOG_FAILED);
}
public long getMinReplicatedIndex(PipelineID pipelineID) throws IOException {
@@ -930,13 +952,12 @@ public Collection getRaftPeersInPipeline(PipelineID pipelineId) throws
}
public void notifyGroupRemove(RaftGroupId gid) {
- raftGids.remove(gid);
- // Remove any entries for group leader map
- groupLeaderMap.remove(gid);
+ // Remove Group ID entry from the active pipeline map
+ activePipelines.remove(gid);
}
void notifyGroupAdd(RaftGroupId gid) {
- raftGids.add(gid);
+ activePipelines.put(gid, new ActivePipelineContext(false, false));
sendPipelineReport();
}
@@ -946,7 +967,9 @@ void handleLeaderChangedNotification(RaftGroupMemberId groupMemberId,
"leaderId: {}", groupMemberId.getGroupId(), raftPeerId1);
// Save the reported leader to be sent with the report to SCM
boolean leaderForGroup = this.raftPeerId.equals(raftPeerId1);
- groupLeaderMap.put(groupMemberId.getGroupId(), leaderForGroup);
+ activePipelines.compute(groupMemberId.getGroupId(),
+ (key, value) -> value == null ? new ActivePipelineContext(leaderForGroup, false) :
+ new ActivePipelineContext(leaderForGroup, value.isPendingClose()));
if (context != null && leaderForGroup) {
// Publish new report from leader
sendPipelineReport();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
index 1e0d2ecd3ad..0a2375b4f44 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
@@ -32,7 +32,7 @@
import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater
.newUpdater;
-import javax.annotation.Nullable;
+import jakarta.annotation.Nullable;
import java.security.AccessController;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
index 44bd4cf19a4..d8ba919cefb 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
@@ -41,7 +41,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.annotation.Nullable;
+import jakarta.annotation.Nullable;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_NAME;
import static org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil.initPerDiskDBStore;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
index 44ae1c0e795..d9d5a667b30 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
@@ -36,7 +36,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.annotation.Nullable;
+import jakarta.annotation.Nullable;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolumeChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolumeChecker.java
index eddc77f18e4..4917810bd97 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolumeChecker.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolumeChecker.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.ozone.container.common.volume;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
@@ -50,7 +50,7 @@
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.checkerframework.checker.nullness.qual.Nullable;
+import jakarta.annotation.Nullable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java
index 2b8b19176ff..991f105d15b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java
@@ -28,11 +28,11 @@
import org.apache.hadoop.hdfs.server.datanode.checker.Checkable;
import org.apache.hadoop.util.Timer;
-import org.checkerframework.checker.nullness.qual.Nullable;
+import jakarta.annotation.Nullable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java
index 0d55c084b3d..42e2ed5758e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java
@@ -25,7 +25,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.annotation.Nullable;
+import jakarta.annotation.Nullable;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java
index b862f832d76..9dedd65565f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECContainerOperationClient.java
@@ -37,7 +37,7 @@
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State;
-import org.jetbrains.annotations.NotNull;
+import jakarta.annotation.Nonnull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -68,7 +68,7 @@ public ECContainerOperationClient(ConfigurationSource conf,
this(createClientManager(conf, certificateClient));
}
- @NotNull
+ @Nonnull
private static XceiverClientManager createClientManager(
ConfigurationSource conf, CertificateClient certificateClient)
throws IOException {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java
index 24e76821f9c..234439a00c2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java
@@ -112,6 +112,7 @@ public class ECReconstructionCoordinator implements Closeable {
private final ContainerClientMetrics clientMetrics;
private final ECReconstructionMetrics metrics;
private final StateContext context;
+ private final OzoneClientConfig ozoneClientConfig;
public ECReconstructionCoordinator(
ConfigurationSource conf, CertificateClient certificateClient,
@@ -125,10 +126,10 @@ public ECReconstructionCoordinator(
ThreadFactory threadFactory = new ThreadFactoryBuilder()
.setNameFormat(threadNamePrefix + "ec-reconstruct-reader-TID-%d")
.build();
+ ozoneClientConfig = conf.getObject(OzoneClientConfig.class);
this.ecReconstructExecutor =
new ThreadPoolExecutor(EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE,
- conf.getObject(OzoneClientConfig.class)
- .getEcReconstructStripeReadPoolLimit(),
+ ozoneClientConfig.getEcReconstructStripeReadPoolLimit(),
60,
TimeUnit.SECONDS,
new SynchronousQueue<>(),
@@ -222,16 +223,15 @@ public void reconstructECContainerGroup(long containerID,
private ECBlockOutputStream getECBlockOutputStream(
BlockLocationInfo blockLocationInfo, DatanodeDetails datanodeDetails,
- ECReplicationConfig repConfig, int replicaIndex,
- OzoneClientConfig configuration) throws IOException {
+ ECReplicationConfig repConfig, int replicaIndex) throws IOException {
StreamBufferArgs streamBufferArgs =
- StreamBufferArgs.getDefaultStreamBufferArgs(repConfig, configuration);
+ StreamBufferArgs.getDefaultStreamBufferArgs(repConfig, ozoneClientConfig);
return new ECBlockOutputStream(
blockLocationInfo.getBlockID(),
containerOperationClient.getXceiverClientManager(),
containerOperationClient.singleNodePipeline(datanodeDetails,
repConfig, replicaIndex),
- BufferPool.empty(), configuration,
+ BufferPool.empty(), ozoneClientConfig,
blockLocationInfo.getToken(), clientMetrics, streamBufferArgs);
}
@@ -277,15 +277,14 @@ public void reconstructECBlockGroup(BlockLocationInfo blockLocationInfo,
ECBlockOutputStream[] targetBlockStreams =
new ECBlockOutputStream[toReconstructIndexes.size()];
ByteBuffer[] bufs = new ByteBuffer[toReconstructIndexes.size()];
- OzoneClientConfig configuration = new OzoneClientConfig();
try {
for (int i = 0; i < toReconstructIndexes.size(); i++) {
int replicaIndex = toReconstructIndexes.get(i);
DatanodeDetails datanodeDetails =
targetMap.get(replicaIndex);
targetBlockStreams[i] = getECBlockOutputStream(blockLocationInfo,
- datanodeDetails, repConfig, replicaIndex,
- configuration);
+ datanodeDetails, repConfig, replicaIndex
+ );
bufs[i] = byteBufferPool.getBuffer(false, repConfig.getEcChunkSize());
// Make sure it's clean. Don't want to reuse the erroneously returned
// buffers from the pool.
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 6e817fdce98..59009ef9dfe 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -629,7 +629,6 @@ ContainerCommandResponseProto handleGetBlock(
try {
BlockID blockID = BlockID.getFromProtobuf(
request.getGetBlock().getBlockID());
- checkContainerIsHealthy(kvContainer, blockID, Type.GetBlock);
responseData = blockManager.getBlock(kvContainer, blockID)
.getProtoBufMessage();
final long numBytes = responseData.getSerializedSize();
@@ -670,8 +669,6 @@ ContainerCommandResponseProto handleGetCommittedBlockLength(
try {
BlockID blockID = BlockID
.getFromProtobuf(request.getGetCommittedBlockLength().getBlockID());
- checkContainerIsHealthy(kvContainer, blockID,
- Type.GetCommittedBlockLength);
BlockUtils.verifyBCSId(kvContainer, blockID);
blockLength = blockManager.getCommittedBlockLength(kvContainer, blockID);
} catch (StorageContainerException ex) {
@@ -758,7 +755,6 @@ ContainerCommandResponseProto handleReadChunk(
.getChunkData());
Preconditions.checkNotNull(chunkInfo);
- checkContainerIsHealthy(kvContainer, blockID, Type.ReadChunk);
BlockUtils.verifyBCSId(kvContainer, blockID);
if (dispatcherContext == null) {
dispatcherContext = DispatcherContext.getHandleReadChunk();
@@ -796,25 +792,6 @@ ContainerCommandResponseProto handleReadChunk(
return getReadChunkResponse(request, data, byteBufferToByteString);
}
- /**
- * Throw an exception if the container is unhealthy.
- *
- * @throws StorageContainerException if the container is unhealthy.
- */
- @VisibleForTesting
- void checkContainerIsHealthy(KeyValueContainer kvContainer, BlockID blockID,
- Type cmd) {
- kvContainer.readLock();
- try {
- if (kvContainer.getContainerData().getState() == State.UNHEALTHY) {
- LOG.warn("{} request {} for UNHEALTHY container {} replica", cmd,
- blockID, kvContainer.getContainerData().getContainerID());
- }
- } finally {
- kvContainer.readUnlock();
- }
- }
-
/**
* Handle Delete Chunk operation. Calls ChunkManager to process the request.
*/
@@ -860,6 +837,7 @@ ContainerCommandResponseProto handleWriteChunk(
WriteChunkRequestProto writeChunk = request.getWriteChunk();
BlockID blockID = BlockID.getFromProtobuf(writeChunk.getBlockID());
ContainerProtos.ChunkInfo chunkInfoProto = writeChunk.getChunkData();
+
ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto);
Preconditions.checkNotNull(chunkInfo);
@@ -978,7 +956,6 @@ ContainerCommandResponseProto handleGetSmallFile(
try {
BlockID blockID = BlockID.getFromProtobuf(getSmallFileReq.getBlock()
.getBlockID());
- checkContainerIsHealthy(kvContainer, blockID, Type.GetSmallFile);
BlockData responseData = blockManager.getBlock(kvContainer, blockID);
ContainerProtos.ChunkInfo chunkInfoProto = null;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
index 49d54b78c90..6a1d5533cf2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
@@ -38,7 +38,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import java.io.IOException;
import java.util.EnumMap;
import java.util.Map;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
index 36200d890aa..c5a59da537e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
@@ -32,14 +32,12 @@
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
-import org.apache.hadoop.hdds.utils.db.managed.ManagedStatistics;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList;
import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator;
import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
import org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile;
import org.rocksdb.InfoLogLevel;
-import org.rocksdb.StatsLevel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -49,9 +47,6 @@
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE;
import static org.apache.hadoop.hdds.utils.db.DBStoreBuilder.HDDS_DEFAULT_DB_PROFILE;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF;
/**
* Implementation of the {@link DatanodeStore} interface that contains
@@ -119,16 +114,6 @@ public void start(ConfigurationSource config)
options.setMaxTotalWalSize(maxWalSize);
}
- String rocksDbStat = config.getTrimmed(
- OZONE_METADATA_STORE_ROCKSDB_STATISTICS,
- OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT);
-
- if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) {
- ManagedStatistics statistics = new ManagedStatistics();
- statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat));
- options.setStatistics(statistics);
- }
-
DatanodeConfiguration dc =
config.getObject(DatanodeConfiguration.class);
// Config user log files
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreWithIncrementalChunkList.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreWithIncrementalChunkList.java
index 51e45335008..84000ba2fb9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreWithIncrementalChunkList.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreWithIncrementalChunkList.java
@@ -94,12 +94,16 @@ private void reconcilePartialChunks(
LOG.debug("blockData={}, lastChunk={}",
blockData.getChunks(), lastChunk.getChunks());
Preconditions.checkState(lastChunk.getChunks().size() == 1);
- ContainerProtos.ChunkInfo lastChunkInBlockData =
- blockData.getChunks().get(blockData.getChunks().size() - 1);
- Preconditions.checkState(
- lastChunkInBlockData.getOffset() + lastChunkInBlockData.getLen()
- == lastChunk.getChunks().get(0).getOffset(),
- "chunk offset does not match");
+ if (!blockData.getChunks().isEmpty()) {
+ ContainerProtos.ChunkInfo lastChunkInBlockData =
+ blockData.getChunks().get(blockData.getChunks().size() - 1);
+ if (lastChunkInBlockData != null) {
+ Preconditions.checkState(
+ lastChunkInBlockData.getOffset() + lastChunkInBlockData.getLen()
+ == lastChunk.getChunks().get(0).getOffset(),
+ "chunk offset does not match");
+ }
+ }
// append last partial chunk to the block data
List chunkInfos =
@@ -136,7 +140,7 @@ private static boolean shouldAppendLastChunk(boolean endOfBlock,
public void putBlockByID(BatchOperation batch, boolean incremental,
long localID, BlockData data, KeyValueContainerData containerData,
boolean endOfBlock) throws IOException {
- if (!incremental && !isPartialChunkList(data)) {
+ if (!incremental || !isPartialChunkList(data)) {
// Case (1) old client: override chunk list.
getBlockDataTable().putWithBatch(
batch, containerData.getBlockKey(localID), data);
@@ -151,14 +155,21 @@ public void putBlockByID(BatchOperation batch, boolean incremental,
private void moveLastChunkToBlockData(BatchOperation batch, long localID,
BlockData data, KeyValueContainerData containerData) throws IOException {
+ // if data has no chunks, fetch the last chunk info from lastChunkInfoTable
+ if (data.getChunks().isEmpty()) {
+ BlockData lastChunk = getLastChunkInfoTable().get(containerData.getBlockKey(localID));
+ if (lastChunk != null) {
+ reconcilePartialChunks(lastChunk, data);
+ }
+ }
// if eob or if the last chunk is full,
// the 'data' is full so append it to the block table's chunk info
// and then remove from lastChunkInfo
BlockData blockData = getBlockDataTable().get(
containerData.getBlockKey(localID));
if (blockData == null) {
- // Case 2.1 if the block did not have full chunks before,
- // the block's chunk is what received from client this time.
+ // Case 2.1 if the block did not have full chunks before
+ // the block's chunk is what received from client this time, plus the chunks in lastChunkInfoTable
blockData = data;
} else {
// case 2.2 the block already has some full chunks
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
index edbff14aca8..1685d1c5fe2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
@@ -78,18 +78,18 @@ public class ContainerReader implements Runnable {
private final ConfigurationSource config;
private final File hddsVolumeDir;
private final MutableVolumeSet volumeSet;
- private final boolean shouldDeleteRecovering;
+ private final boolean shouldDelete;
public ContainerReader(
MutableVolumeSet volSet, HddsVolume volume, ContainerSet cset,
- ConfigurationSource conf, boolean shouldDeleteRecovering) {
+ ConfigurationSource conf, boolean shouldDelete) {
Preconditions.checkNotNull(volume);
this.hddsVolume = volume;
this.hddsVolumeDir = hddsVolume.getHddsRootDir();
this.containerSet = cset;
this.config = conf;
this.volumeSet = volSet;
- this.shouldDeleteRecovering = shouldDeleteRecovering;
+ this.shouldDelete = shouldDelete;
}
@Override
@@ -148,7 +148,7 @@ public void readVolume(File hddsVolumeRootDir) {
LOG.info("Start to verify containers on volume {}", hddsVolumeRootDir);
File currentDir = new File(idDir, Storage.STORAGE_DIR_CURRENT);
File[] containerTopDirs = currentDir.listFiles();
- if (containerTopDirs != null) {
+ if (containerTopDirs != null && containerTopDirs.length > 0) {
for (File containerTopDir : containerTopDirs) {
if (containerTopDir.isDirectory()) {
File[] containerDirs = containerTopDir.listFiles();
@@ -214,7 +214,7 @@ public void verifyAndFixupContainerData(ContainerData containerData)
KeyValueContainer kvContainer = new KeyValueContainer(kvContainerData,
config);
if (kvContainer.getContainerState() == RECOVERING) {
- if (shouldDeleteRecovering) {
+ if (shouldDelete) {
kvContainer.markContainerUnhealthy();
LOG.info("Stale recovering container {} marked UNHEALTHY",
kvContainerData.getContainerID());
@@ -223,7 +223,9 @@ public void verifyAndFixupContainerData(ContainerData containerData)
return;
}
if (kvContainer.getContainerState() == DELETED) {
- cleanupContainer(hddsVolume, kvContainer);
+ if (shouldDelete) {
+ cleanupContainer(hddsVolume, kvContainer);
+ }
return;
}
try {
@@ -232,8 +234,10 @@ public void verifyAndFixupContainerData(ContainerData containerData)
if (e.getResult() != ContainerProtos.Result.CONTAINER_EXISTS) {
throw e;
}
- resolveDuplicate((KeyValueContainer) containerSet.getContainer(
- kvContainer.getContainerData().getContainerID()), kvContainer);
+ if (shouldDelete) {
+ resolveDuplicate((KeyValueContainer) containerSet.getContainer(
+ kvContainer.getContainerData().getContainerID()), kvContainer);
+ }
}
} else {
throw new StorageContainerException("Container File is corrupted. " +
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index f050c96a459..aef3965dcd4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -169,7 +169,6 @@ public OzoneContainer(
containerSet = new ContainerSet(recoveringContainerTimeout);
metadataScanner = null;
- buildContainerSet();
metrics = ContainerMetrics.create(conf);
handlers = Maps.newHashMap();
@@ -286,9 +285,10 @@ public GrpcTlsConfig getTlsClientConfig() {
}
/**
- * Build's container map.
+ * Build's container map after volume format.
*/
- private void buildContainerSet() {
+ @VisibleForTesting
+ public void buildContainerSet() {
Iterator volumeSetIterator = volumeSet.getVolumesList()
.iterator();
ArrayList volumeThreads = new ArrayList<>();
@@ -442,6 +442,8 @@ public void start(String clusterId) throws IOException {
return;
}
+ buildContainerSet();
+
// Start background volume checks, which will begin after the configured
// delay.
volumeChecker.start();
@@ -584,4 +586,8 @@ public BlockDeletingService getBlockDeletingService() {
return blockDeletingService;
}
+ public ReplicationServer getReplicationServer() {
+ return replicationServer;
+ }
+
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java
index 82aa975066c..1929c16089b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java
@@ -30,6 +30,7 @@
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
@@ -41,7 +42,7 @@
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker;
import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
-import org.jetbrains.annotations.NotNull;
+import jakarta.annotation.Nonnull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -64,10 +65,12 @@ public class ContainerImporter {
private final Set importContainerProgress
= Collections.synchronizedSet(new HashSet<>());
- public ContainerImporter(@NotNull ConfigurationSource conf,
- @NotNull ContainerSet containerSet,
- @NotNull ContainerController controller,
- @NotNull MutableVolumeSet volumeSet) {
+ private final ConfigurationSource conf;
+
+ public ContainerImporter(@Nonnull ConfigurationSource conf,
+ @Nonnull ContainerSet containerSet,
+ @Nonnull ContainerController controller,
+ @Nonnull MutableVolumeSet volumeSet) {
this.containerSet = containerSet;
this.controller = controller;
this.volumeSet = volumeSet;
@@ -79,6 +82,7 @@ public ContainerImporter(@NotNull ConfigurationSource conf,
containerSize = (long) conf.getStorageSize(
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
+ this.conf = conf;
}
public boolean isAllowedContainerImport(long containerID) {
@@ -112,14 +116,14 @@ public void importContainer(long containerID, Path tarFilePath,
}
KeyValueContainerData containerData;
- TarContainerPacker packer = new TarContainerPacker(compression);
+ TarContainerPacker packer = getPacker(compression);
try (FileInputStream input = new FileInputStream(tarFilePath.toFile())) {
byte[] containerDescriptorYaml =
packer.unpackContainerDescriptor(input);
- containerData = (KeyValueContainerData) ContainerDataYaml
- .readContainer(containerDescriptorYaml);
+ containerData = getKeyValueContainerData(containerDescriptorYaml);
}
+ ContainerUtils.verifyChecksum(containerData, conf);
containerData.setVolume(targetVolume);
try (FileInputStream input = new FileInputStream(tarFilePath.toFile())) {
@@ -154,4 +158,19 @@ public static Path getUntarDirectory(HddsVolume hddsVolume)
return Paths.get(hddsVolume.getVolumeRootDir())
.resolve(CONTAINER_COPY_TMP_DIR).resolve(CONTAINER_COPY_DIR);
}
+
+ protected KeyValueContainerData getKeyValueContainerData(
+ byte[] containerDescriptorYaml) throws IOException {
+ return (KeyValueContainerData) ContainerDataYaml
+ .readContainer(containerDescriptorYaml);
+ }
+
+ protected Set getImportContainerProgress() {
+ return this.importContainerProgress;
+ }
+
+ protected TarContainerPacker getPacker(CopyContainerCompression compression) {
+ return new TarContainerPacker(compression);
+ }
+
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcOutputStream.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcOutputStream.java
index 2d0955d47e5..8506364f983 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcOutputStream.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcOutputStream.java
@@ -24,7 +24,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import java.io.IOException;
import java.io.OutputStream;
import java.util.concurrent.atomic.AtomicBoolean;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java
index 3feb5747486..d2407a61d0b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java
@@ -23,6 +23,7 @@
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.hdds.conf.Config;
import org.apache.hadoop.hdds.conf.ConfigGroup;
@@ -154,6 +155,31 @@ public int getPort() {
return port;
}
+ public void setPoolSize(int size) {
+ if (size <= 0) {
+ throw new IllegalArgumentException("Pool size must be positive.");
+ }
+
+ int currentCorePoolSize = executor.getCorePoolSize();
+
+ // In ThreadPoolExecutor, maximumPoolSize must always be greater than or
+ // equal to the corePoolSize. We must make sure this invariant holds when
+ // changing the pool size. Therefore, we take into account whether the
+ // new size is greater or smaller than the current core pool size.
+ if (size > currentCorePoolSize) {
+ executor.setMaximumPoolSize(size);
+ executor.setCorePoolSize(size);
+ } else {
+ executor.setCorePoolSize(size);
+ executor.setMaximumPoolSize(size);
+ }
+ }
+
+ @VisibleForTesting
+ public ThreadPoolExecutor getExecutor() {
+ return executor;
+ }
+
/**
* Replication-related configuration.
*/
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
index e8d80e5dc92..fd8cf05b294 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
@@ -72,24 +72,20 @@ RegisteredCommand register(DatanodeDetails datanodeDetails,
* TODO: Cleanup and update tests, HDDS-9642.
*
* @param datanodeDetails - Datanode ID.
- * @param layoutVersionInfo - Layout Version Proto.
* @return Commands to be sent to the datanode.
*/
- default List processHeartbeat(DatanodeDetails datanodeDetails,
- LayoutVersionProto layoutVersionInfo) {
- return processHeartbeat(datanodeDetails, layoutVersionInfo, null);
+ default List processHeartbeat(DatanodeDetails datanodeDetails) {
+ return processHeartbeat(datanodeDetails, null);
};
/**
* Send heartbeat to indicate the datanode is alive and doing well.
* @param datanodeDetails - Datanode ID.
- * @param layoutVersionInfo - Layout Version Proto.
* @param queueReport - The CommandQueueReportProto report from the
* heartbeating datanode.
* @return Commands to be sent to the datanode.
*/
List processHeartbeat(DatanodeDetails datanodeDetails,
- LayoutVersionProto layoutVersionInfo,
CommandQueueReportProto queueReport);
/**
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/hdds/datanode/metadata/TestDatanodeCRLStoreImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/hdds/datanode/metadata/TestDatanodeCRLStoreImpl.java
index 8d3de5218af..f73f14f0c27 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/hdds/datanode/metadata/TestDatanodeCRLStoreImpl.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/hdds/datanode/metadata/TestDatanodeCRLStoreImpl.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hdds.datanode.metadata;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.security.SecurityConfig;
@@ -27,13 +26,13 @@
import org.apache.hadoop.hdds.security.x509.crl.CRLInfo;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.ozone.test.GenericTestUtils;
import org.bouncycastle.asn1.x509.CRLReason;
import org.bouncycastle.cert.X509CertificateHolder;
import org.bouncycastle.cert.X509v2CRLBuilder;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.security.KeyPair;
@@ -47,6 +46,7 @@
* Test class for {@link DatanodeCRLStoreImpl}.
*/
public class TestDatanodeCRLStoreImpl {
+ @TempDir
private File testDir;
private OzoneConfiguration conf;
private DatanodeCRLStore dnCRLStore;
@@ -56,7 +56,6 @@ public class TestDatanodeCRLStoreImpl {
@BeforeEach
public void setUp() throws Exception {
- testDir = GenericTestUtils.getRandomizedTestDir();
conf = new OzoneConfiguration();
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath());
dnCRLStore = new DatanodeCRLStoreImpl(conf);
@@ -71,7 +70,6 @@ public void destroyDbStore() throws Exception {
if (dnCRLStore.getStore() != null) {
dnCRLStore.getStore().close();
}
- FileUtil.fullyDelete(testDir);
}
@Test
public void testCRLStore() throws Exception {
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java
index 8a3921d7953..cc88940611a 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java
@@ -24,7 +24,6 @@
import java.util.List;
import java.util.UUID;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -39,11 +38,8 @@
import org.apache.hadoop.ozone.container.keyvalue.ContainerTestVersionInfo;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
-import org.apache.ozone.test.GenericTestUtils;
import org.apache.hadoop.util.ServicePlugin;
-import org.junit.jupiter.api.AfterEach;
-
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_TOKEN_ENABLED;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES;
@@ -57,6 +53,7 @@
import static org.junit.jupiter.api.Assertions.assertNull;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
import org.slf4j.Logger;
@@ -68,6 +65,7 @@
public class TestHddsDatanodeService {
+ @TempDir
private File testDir;
private static final Logger LOG =
LoggerFactory.getLogger(TestHddsDatanodeService.class);
@@ -92,7 +90,6 @@ public void setUp() throws IOException {
conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES,
serverAddresses.toArray(new String[0]));
- testDir = GenericTestUtils.getRandomizedTestDir();
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath());
conf.set(OZONE_SCM_NAMES, "localhost");
conf.setClass(OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY, MockService.class,
@@ -109,11 +106,6 @@ public void setUp() throws IOException {
conf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, volumeDir);
}
- @AfterEach
- public void tearDown() {
- FileUtil.fullyDelete(testDir);
- }
-
@Test
public void testStartup() {
service.start(conf);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
index 95d136cdfca..10d2bc91a71 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
@@ -29,7 +29,6 @@
import java.util.List;
import java.util.concurrent.Callable;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -59,23 +58,24 @@
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import org.apache.ozone.test.tag.Flaky;
import org.bouncycastle.cert.X509CertificateHolder;
-import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
/**
* Test class for {@link HddsDatanodeService}.
*/
public class TestHddsSecureDatanodeInit {
+ @TempDir
private static File testDir;
private static OzoneConfiguration conf;
private static HddsDatanodeService service;
@@ -96,7 +96,6 @@ public class TestHddsSecureDatanodeInit {
@BeforeAll
public static void setUp() throws Exception {
- testDir = GenericTestUtils.getRandomizedTestDir();
conf = new OzoneConfiguration();
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath());
//conf.set(ScmConfigKeys.OZONE_SCM_NAMES, "localhost");
@@ -143,11 +142,6 @@ SCMSecurityProtocolClientSideTranslatorPB createScmSecurityClient()
scmClient = mock(SCMSecurityProtocolClientSideTranslatorPB.class);
}
- @AfterAll
- public static void tearDown() {
- FileUtil.fullyDelete(testDir);
- }
-
@BeforeEach
public void setUpDNCertClient() throws IOException {
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
index e04d8f00493..33bc4a85166 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
@@ -73,7 +73,7 @@
import java.util.UUID;
import java.util.concurrent.atomic.AtomicLong;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
index a7f6f537048..7917a4ce55c 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
@@ -22,7 +22,6 @@
import java.net.InetSocketAddress;
import java.net.ServerSocket;
-import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
@@ -40,12 +39,9 @@
import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB;
import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolServerSideTranslatorPB;
-import org.apache.ozone.test.GenericTestUtils;
import com.google.protobuf.BlockingService;
-import static org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric;
-import static org.apache.logging.log4j.util.StackLocatorUtil.getCallerClass;
import static org.mockito.Mockito.mock;
/**
@@ -125,15 +121,6 @@ public static InetSocketAddress getReuseableAddress() throws IOException {
}
}
- public static OzoneConfiguration getConf() {
- String name = getCallerClass(2).getSimpleName()
- + "-" + randomAlphanumeric(10);
- File testDir = GenericTestUtils.getTestDir(name);
- Runtime.getRuntime().addShutdownHook(new Thread(
- () -> FileUtils.deleteQuietly(testDir)));
- return getConf(testDir);
- }
-
public static OzoneConfiguration getConf(File testDir) {
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY,
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index b408ec201de..bc56141fb08 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -19,7 +19,6 @@
import com.google.common.collect.Lists;
-import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.BlockID;
@@ -76,6 +75,7 @@
import org.junit.jupiter.api.Assumptions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.io.TempDir;
import org.slf4j.LoggerFactory;
import java.io.File;
@@ -111,7 +111,7 @@
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -121,6 +121,7 @@
@Timeout(30)
public class TestBlockDeletingService {
+ @TempDir
private File testRoot;
private String scmId;
private String datanodeUuid;
@@ -134,12 +135,6 @@ public class TestBlockDeletingService {
@BeforeEach
public void init() throws IOException {
CodecBuffer.enableLeakDetection();
-
- testRoot = GenericTestUtils
- .getTestDir(TestBlockDeletingService.class.getSimpleName());
- if (testRoot.exists()) {
- FileUtils.cleanDirectory(testRoot);
- }
scmId = UUID.randomUUID().toString();
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testRoot.getAbsolutePath());
@@ -152,7 +147,6 @@ public void init() throws IOException {
@AfterEach
public void cleanup() throws IOException {
BlockUtils.shutdownCache(conf);
- FileUtils.deleteDirectory(testRoot);
CodecBuffer.assertNoLeaks();
}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
index 208f521ec36..2381209bb6b 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
@@ -33,7 +33,6 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
-import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
@@ -145,7 +144,7 @@ public void testContainerCacheEviction() throws Exception {
}
@Test
- public void testConcurrentDBGet() throws Exception {
+ void testConcurrentDBGet() throws Exception {
File root = new File(testRoot);
root.mkdirs();
root.deleteOnExit();
@@ -172,11 +171,7 @@ public void testConcurrentDBGet() throws Exception {
futureList.add(executorService.submit(task));
futureList.add(executorService.submit(task));
for (Future future: futureList) {
- try {
- future.get();
- } catch (InterruptedException | ExecutionException e) {
- fail("Should get the DB instance");
- }
+ future.get();
}
ReferenceCountedDB db = cache.getDB(1, "RocksDB",
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
index a7291e9018f..5738f5c1106 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
@@ -28,8 +28,6 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -52,10 +50,10 @@
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_RPC_TIMEOUT;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
@@ -70,17 +68,18 @@ public class TestDatanodeStateMachine {
LoggerFactory.getLogger(TestDatanodeStateMachine.class);
// Changing it to 1, as current code checks for multiple scm directories,
// and fail if exists
- private final int scmServerCount = 1;
+ private static final int SCM_SERVER_COUNT = 1;
private List serverAddresses;
private List scmServers;
private List mockServers;
private ExecutorService executorService;
private OzoneConfiguration conf;
+ @TempDir
private File testRoot;
@BeforeEach
- public void setUp() throws Exception {
- conf = SCMTestUtils.getConf();
+ void setUp() throws Exception {
+ conf = SCMTestUtils.getConf(testRoot);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500,
TimeUnit.MILLISECONDS);
conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
@@ -92,7 +91,7 @@ public void setUp() throws Exception {
serverAddresses = new ArrayList<>();
scmServers = new ArrayList<>();
mockServers = new ArrayList<>();
- for (int x = 0; x < scmServerCount; x++) {
+ for (int x = 0; x < SCM_SERVER_COUNT; x++) {
int port = SCMTestUtils.getReuseableAddress().getPort();
String address = "127.0.0.1";
serverAddresses.add(address + ":" + port);
@@ -105,22 +104,6 @@ public void setUp() throws Exception {
conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES,
serverAddresses.toArray(new String[0]));
- String path = GenericTestUtils
- .getTempPath(TestDatanodeStateMachine.class.getSimpleName());
- testRoot = new File(path);
- if (!testRoot.mkdirs()) {
- LOG.info("Required directories {} already exist.", testRoot);
- }
-
- File dataDir = new File(testRoot, "data");
- conf.set(HDDS_DATANODE_DIR_KEY, dataDir.getAbsolutePath());
- if (!dataDir.mkdirs()) {
- LOG.info("Data dir create failed.");
- }
- conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
- new File(testRoot, "scm").getAbsolutePath());
- path = new File(testRoot, "datanodeID").getAbsolutePath();
- conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR, path);
executorService = HadoopExecutors.newCachedThreadPool(
new ThreadFactoryBuilder().setDaemon(true)
.setNameFormat("TestDataNodeStateMachineThread-%d").build());
@@ -149,8 +132,6 @@ public void tearDown() throws Exception {
}
} catch (Exception e) {
//ignore all exception from the shutdown
- } finally {
- FileUtil.fullyDelete(testRoot);
}
}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java
index 32b1fc284bf..e00df0579d3 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java
@@ -31,7 +31,6 @@
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertSame;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* Test DatanodeStoreCache.
@@ -43,7 +42,7 @@ public class TestDatanodeStoreCache {
private OzoneConfiguration conf = new OzoneConfiguration();
@Test
- public void testBasicOperations() throws IOException {
+ void testBasicOperations() throws IOException {
DatanodeStoreCache cache = DatanodeStoreCache.getInstance();
String dbPath1 = Files.createDirectory(folder.resolve("basic1"))
.toFile().toString();
@@ -71,11 +70,7 @@ public void testBasicOperations() throws IOException {
assertEquals(1, cache.size());
// test remove non-exist
- try {
- cache.removeDB(dbPath1);
- } catch (Exception e) {
- fail("Should not throw " + e);
- }
+ cache.removeDB(dbPath1);
// test shutdown
cache.shutdownCache();
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
index 2465b03a68b..2235b23ce88 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
@@ -72,8 +72,7 @@
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -370,13 +369,8 @@ public void testReadDeletedBlockChunkInfo(String schemaVersion)
for (Table.KeyValue chunkListKV: deletedBlocks) {
preUpgradeBlocks.add(chunkListKV.getKey());
- try {
- chunkListKV.getValue();
- fail("No exception thrown when trying to retrieve old " +
- "deleted blocks values as chunk lists.");
- } catch (IOException ex) {
- // Exception thrown as expected.
- }
+ assertThrows(IOException.class, () -> chunkListKV.getValue(),
+ "No exception thrown when trying to retrieve old deleted blocks values as chunk lists.");
}
assertEquals(TestDB.NUM_DELETED_BLOCKS, preUpgradeBlocks.size());
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
index a828c1e6924..0c4612b79fa 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
@@ -75,7 +75,7 @@
import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMMIT_STAGE;
import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java
index 84dfa3b2464..644ee014e9f 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java
@@ -57,8 +57,8 @@
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.RECOVERING;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.UNHEALTHY;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.mockito.ArgumentMatchers.anyList;
-import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.Mockito.anyList;
+import static org.mockito.Mockito.anyLong;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
index 4ccfb2e35de..ec78398824e 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
@@ -43,7 +43,6 @@
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* This class tests create/read .container files.
@@ -205,41 +204,35 @@ public void testIncorrectContainerFile(ContainerLayoutVersion layout) {
@ContainerLayoutTestInfo.ContainerTest
- public void testCheckBackWardCompatibilityOfContainerFile(
- ContainerLayoutVersion layout) {
+ void testCheckBackWardCompatibilityOfContainerFile(
+ ContainerLayoutVersion layout) throws Exception {
setLayoutVersion(layout);
// This test is for if we upgrade, and then .container files added by new
// server will have new fields added to .container file, after a while we
// decided to rollback. Then older ozone can read .container files
// created or not.
- try {
- String containerFile = "additionalfields.container";
- //Get file from resources folder
- ClassLoader classLoader = getClass().getClassLoader();
- File file = new File(classLoader.getResource(containerFile).getFile());
- KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
- .readContainerFile(file);
- ContainerUtils.verifyChecksum(kvData, conf);
+ String containerFile = "additionalfields.container";
+ //Get file from resources folder
+ ClassLoader classLoader = getClass().getClassLoader();
+ File file = new File(classLoader.getResource(containerFile).getFile());
+ KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
+ .readContainerFile(file);
+ ContainerUtils.verifyChecksum(kvData, conf);
- //Checking the Container file data is consistent or not
- assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, kvData
- .getState());
- assertEquals(CONTAINER_DB_TYPE, kvData.getContainerDBType());
- assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData
- .getContainerType());
- assertEquals(9223372036854775807L, kvData.getContainerID());
- assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData
- .getChunksPath());
- assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData
- .getMetadataPath());
- assertEquals(FILE_PER_CHUNK, kvData.getLayoutVersion());
- assertEquals(2, kvData.getMetadata().size());
-
- } catch (Exception ex) {
- ex.printStackTrace();
- fail("testCheckBackWardCompatibilityOfContainerFile failed");
- }
+ //Checking the Container file data is consistent or not
+ assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, kvData
+ .getState());
+ assertEquals(CONTAINER_DB_TYPE, kvData.getContainerDBType());
+ assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData
+ .getContainerType());
+ assertEquals(9223372036854775807L, kvData.getContainerID());
+ assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData
+ .getChunksPath());
+ assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData
+ .getMetadataPath());
+ assertEquals(FILE_PER_CHUNK, kvData.getLayoutVersion());
+ assertEquals(2, kvData.getMetadata().size());
}
/**
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
index cd5f6c0f9b6..890bca18cb1 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
@@ -40,8 +40,8 @@
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.container.common.impl.BlockDeletingService.ContainerBlockInfo;
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.io.TempDir;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -54,6 +54,8 @@
* The class for testing container deletion choosing policy.
*/
public class TestContainerDeletionChoosingPolicy {
+ @TempDir
+ private File tempFile;
private String path;
private OzoneContainer ozoneContainer;
private ContainerSet containerSet;
@@ -63,23 +65,15 @@ public class TestContainerDeletionChoosingPolicy {
private static final int SERVICE_TIMEOUT_IN_MILLISECONDS = 0;
private static final int SERVICE_INTERVAL_IN_MILLISECONDS = 1000;
- private ContainerLayoutVersion layoutVersion;
-
- public void setLayoutVersion(ContainerLayoutVersion layout) {
- this.layoutVersion = layout;
- }
-
@BeforeEach
public void init() throws Throwable {
conf = new OzoneConfiguration();
- path = GenericTestUtils
- .getTempPath(TestContainerDeletionChoosingPolicy.class.getSimpleName());
+ path = tempFile.getPath();
}
@ContainerLayoutTestInfo.ContainerTest
public void testRandomChoosingPolicy(ContainerLayoutVersion layout)
throws IOException {
- setLayoutVersion(layout);
File containerDir = new File(path);
if (containerDir.exists()) {
FileUtils.deleteDirectory(new File(path));
@@ -143,7 +137,6 @@ public void testRandomChoosingPolicy(ContainerLayoutVersion layout)
@ContainerLayoutTestInfo.ContainerTest
public void testTopNOrderedChoosingPolicy(ContainerLayoutVersion layout)
throws IOException {
- setLayoutVersion(layout);
File containerDir = new File(path);
if (containerDir.exists()) {
FileUtils.deleteDirectory(new File(path));
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index fcc48fef1bd..3ff8f9e625d 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -73,7 +73,6 @@
import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
-import org.apache.ozone.test.GenericTestUtils;
import com.google.common.collect.Maps;
import org.apache.commons.io.FileUtils;
@@ -82,6 +81,7 @@
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.io.TempDir;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -98,7 +98,6 @@
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assumptions.assumeFalse;
import static org.junit.jupiter.api.Assumptions.assumeTrue;
@@ -113,6 +112,8 @@ public class TestContainerPersistence {
private static final String SCM_ID = UUID.randomUUID().toString();
private static final Logger LOGGER =
LoggerFactory.getLogger(TestContainerPersistence.class);
+ @TempDir
+ private static File hddsFile;
private static String hddsPath;
private static OzoneConfiguration conf;
private static VolumeChoosingPolicy volumeChoosingPolicy;
@@ -138,8 +139,7 @@ private void initSchemaAndVersionInfo(ContainerTestVersionInfo versionInfo) {
@BeforeAll
public static void init() {
conf = new OzoneConfiguration();
- hddsPath = GenericTestUtils
- .getTempPath(TestContainerPersistence.class.getSimpleName());
+ hddsPath = hddsFile.getPath();
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, hddsPath);
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, hddsPath);
volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy();
@@ -248,12 +248,8 @@ public void testCreateDuplicateContainer(ContainerTestVersionInfo versionInfo)
long testContainerID = getTestContainerID();
Container container = addContainer(containerSet, testContainerID);
- try {
- containerSet.addContainer(container);
- fail("Expected Exception not thrown.");
- } catch (IOException ex) {
- assertNotNull(ex);
- }
+ IOException ex = assertThrows(IOException.class, () -> containerSet.addContainer(container));
+ assertNotNull(ex);
}
@ContainerTestVersionInfo.ContainerTest
@@ -544,7 +540,7 @@ public void testGetContainerReports(ContainerTestVersionInfo versionInfo)
long actualContainerID = report.getContainerID();
assertTrue(containerIDs.remove(actualContainerID));
}
- assertTrue(containerIDs.isEmpty());
+ assertThat(containerIDs).isEmpty();
}
/**
@@ -585,7 +581,7 @@ public void testListContainer(ContainerTestVersionInfo versionInfo)
}
// Assert that we listed all the keys that we had put into
// container.
- assertTrue(testMap.isEmpty());
+ assertThat(testMap).isEmpty();
}
private ChunkInfo writeChunkHelper(BlockID blockID) throws IOException {
@@ -799,26 +795,23 @@ public void testPutBlockWithInvalidBCSId(ContainerTestVersionInfo versionInfo)
blockData.setBlockCommitSequenceId(4);
blockManager.putBlock(container, blockData);
BlockData readBlockData;
- try {
+ StorageContainerException sce = assertThrows(StorageContainerException.class, () -> {
blockID1.setBlockCommitSequenceId(5);
// read with bcsId higher than container bcsId
blockManager.
getBlock(container, blockID1);
- fail("Expected exception not thrown");
- } catch (StorageContainerException sce) {
- assertSame(UNKNOWN_BCSID, sce.getResult());
- }
+ });
+ assertSame(UNKNOWN_BCSID, sce.getResult());
- try {
+ sce = assertThrows(StorageContainerException.class, () -> {
blockID1.setBlockCommitSequenceId(4);
// read with bcsId lower than container bcsId but greater than committed
// bcsId.
blockManager.
getBlock(container, blockID1);
- fail("Expected exception not thrown");
- } catch (StorageContainerException sce) {
- assertSame(BCSID_MISMATCH, sce.getResult());
- }
+ });
+ assertSame(BCSID_MISMATCH, sce.getResult());
+
readBlockData = blockManager.
getBlock(container, blockData.getBlockID());
ChunkInfo readChunk =
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
index bd035632403..95df6c647f8 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
@@ -20,7 +20,6 @@
import com.google.common.collect.Maps;
import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.BlockID;
@@ -63,6 +62,7 @@
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -87,7 +87,7 @@
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
@@ -101,6 +101,8 @@
public class TestHddsDispatcher {
private static final Logger LOG = LoggerFactory.getLogger(
TestHddsDispatcher.class);
+ @TempDir
+ private File testDir;
public static final IncrementalReportSender NO_OP_ICR_SENDER =
c -> {
@@ -110,11 +112,10 @@ public class TestHddsDispatcher {
public void testContainerCloseActionWhenFull(
ContainerLayoutVersion layout) throws IOException {
- String testDir = GenericTestUtils.getTempPath(
- TestHddsDispatcher.class.getSimpleName());
+ String testDirPath = testDir.getPath();
OzoneConfiguration conf = new OzoneConfiguration();
- conf.set(HDDS_DATANODE_DIR_KEY, testDir);
- conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
+ conf.set(HDDS_DATANODE_DIR_KEY, testDirPath);
+ conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath);
DatanodeDetails dd = randomDatanodeDetails();
MutableVolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf,
null, StorageVolume.VolumeType.DATA_VOLUME, null);
@@ -160,22 +161,20 @@ public void testContainerCloseActionWhenFull(
} finally {
volumeSet.shutdown();
ContainerMetrics.remove();
- FileUtils.deleteDirectory(new File(testDir));
}
}
@ContainerLayoutTestInfo.ContainerTest
public void testContainerCloseActionWhenVolumeFull(
ContainerLayoutVersion layoutVersion) throws Exception {
- String testDir = GenericTestUtils.getTempPath(
- TestHddsDispatcher.class.getSimpleName());
+ String testDirPath = testDir.getPath();
OzoneConfiguration conf = new OzoneConfiguration();
conf.setStorageSize(HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE,
100.0, StorageUnit.BYTES);
DatanodeDetails dd = randomDatanodeDetails();
HddsVolume.Builder volumeBuilder =
- new HddsVolume.Builder(testDir).datanodeUuid(dd.getUuidString())
+ new HddsVolume.Builder(testDirPath).datanodeUuid(dd.getUuidString())
.conf(conf).usageCheckFactory(MockSpaceUsageCheckFactory.NONE);
// state of cluster : available (140) > 100 ,datanode volume
// utilisation threshold not yet reached. container creates are successful.
@@ -237,19 +236,17 @@ public void testContainerCloseActionWhenVolumeFull(
} finally {
volumeSet.shutdown();
ContainerMetrics.remove();
- FileUtils.deleteDirectory(new File(testDir));
}
}
@Test
public void testCreateContainerWithWriteChunk() throws IOException {
- String testDir =
- GenericTestUtils.getTempPath(TestHddsDispatcher.class.getSimpleName());
+ String testDirPath = testDir.getPath();
try {
UUID scmId = UUID.randomUUID();
OzoneConfiguration conf = new OzoneConfiguration();
- conf.set(HDDS_DATANODE_DIR_KEY, testDir);
- conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
+ conf.set(HDDS_DATANODE_DIR_KEY, testDirPath);
+ conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath);
DatanodeDetails dd = randomDatanodeDetails();
HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf);
ContainerCommandRequestProto writeChunkRequest =
@@ -292,19 +289,17 @@ public void testCreateContainerWithWriteChunk() throws IOException {
}
} finally {
ContainerMetrics.remove();
- FileUtils.deleteDirectory(new File(testDir));
}
}
@Test
public void testContainerNotFoundWithCommitChunk() throws IOException {
- String testDir =
- GenericTestUtils.getTempPath(TestHddsDispatcher.class.getSimpleName());
+ String testDirPath = testDir.getPath();
try {
UUID scmId = UUID.randomUUID();
OzoneConfiguration conf = new OzoneConfiguration();
- conf.set(HDDS_DATANODE_DIR_KEY, testDir);
- conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
+ conf.set(HDDS_DATANODE_DIR_KEY, testDirPath);
+ conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath);
DatanodeDetails dd = randomDatanodeDetails();
HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf);
ContainerCommandRequestProto writeChunkRequest =
@@ -329,19 +324,17 @@ public void testContainerNotFoundWithCommitChunk() throws IOException {
+ " does not exist");
} finally {
ContainerMetrics.remove();
- FileUtils.deleteDirectory(new File(testDir));
}
}
@Test
public void testWriteChunkWithCreateContainerFailure() throws IOException {
- String testDir = GenericTestUtils.getTempPath(
- TestHddsDispatcher.class.getSimpleName());
+ String testDirPath = testDir.getPath();
try {
UUID scmId = UUID.randomUUID();
OzoneConfiguration conf = new OzoneConfiguration();
- conf.set(HDDS_DATANODE_DIR_KEY, testDir);
- conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
+ conf.set(HDDS_DATANODE_DIR_KEY, testDirPath);
+ conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath);
DatanodeDetails dd = randomDatanodeDetails();
HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf);
ContainerCommandRequestProto writeChunkRequest = getWriteChunkRequest(
@@ -366,19 +359,17 @@ public void testWriteChunkWithCreateContainerFailure() throws IOException {
+ " creation failed , Result: DISK_OUT_OF_SPACE");
} finally {
ContainerMetrics.remove();
- FileUtils.deleteDirectory(new File(testDir));
}
}
@Test
public void testDuplicateWriteChunkAndPutBlockRequest() throws IOException {
- String testDir = GenericTestUtils.getTempPath(
- TestHddsDispatcher.class.getSimpleName());
+ String testDirPath = testDir.getPath();
try {
UUID scmId = UUID.randomUUID();
OzoneConfiguration conf = new OzoneConfiguration();
- conf.set(HDDS_DATANODE_DIR_KEY, testDir);
- conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
+ conf.set(HDDS_DATANODE_DIR_KEY, testDirPath);
+ conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath);
DatanodeDetails dd = randomDatanodeDetails();
HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf);
ContainerCommandRequestProto writeChunkRequest = getWriteChunkRequest(
@@ -426,7 +417,6 @@ public void testDuplicateWriteChunkAndPutBlockRequest() throws IOException {
}
} finally {
ContainerMetrics.remove();
- FileUtils.deleteDirectory(new File(testDir));
}
}
@@ -548,11 +538,10 @@ private ContainerCommandRequestProto getReadChunkRequest(
@Test
public void testValidateToken() throws Exception {
- final String testDir = GenericTestUtils.getRandomizedTempPath();
try {
final OzoneConfiguration conf = new OzoneConfiguration();
- conf.set(HDDS_DATANODE_DIR_KEY, testDir);
- conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
+ conf.set(HDDS_DATANODE_DIR_KEY, testDir.getPath());
+ conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.getPath());
final DatanodeDetails dd = randomDatanodeDetails();
final UUID scmId = UUID.randomUUID();
@@ -611,7 +600,6 @@ public void verify(String user, Token> token,
}
} finally {
ContainerMetrics.remove();
- FileUtils.deleteDirectory(new File(testDir));
}
}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java
index a4568414fc9..43aadc37c04 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java
@@ -24,8 +24,8 @@
import org.junit.jupiter.api.Test;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
index f05a8c6dfe3..219645c8edc 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
@@ -43,8 +43,8 @@
import static org.apache.hadoop.ozone.OzoneConsts.GB;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestClosePipelineCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestClosePipelineCommandHandler.java
index d161f5537ae..ac60fba1ae9 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestClosePipelineCommandHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestClosePipelineCommandHandler.java
@@ -45,9 +45,9 @@
import java.util.List;
import java.util.stream.Collectors;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyBoolean;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyBoolean;
+import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.lenient;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCreatePipelineCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCreatePipelineCommandHandler.java
index bfce4065d08..4e9005979b9 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCreatePipelineCommandHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCreatePipelineCommandHandler.java
@@ -48,7 +48,7 @@
import java.util.Collections;
import java.util.List;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java
index 15ac94056b8..90ed4ca4cc9 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java
@@ -73,8 +73,8 @@
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
index 8b7241f7851..09fa8a99177 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
@@ -25,7 +25,7 @@
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java
index 9ee0d17dde6..4718df3ae3f 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.ozone.container.common.volume;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory;
import org.apache.hadoop.hdds.fs.MockSpaceUsageSource;
@@ -29,8 +28,9 @@
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
-import java.io.File;
+import java.nio.file.Path;
import java.time.Duration;
import java.util.ArrayList;
import java.util.HashMap;
@@ -38,7 +38,6 @@
import java.util.Map;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_CHOOSING_POLICY;
-import static org.apache.ozone.test.GenericTestUtils.getTestDir;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -52,35 +51,34 @@ public class TestCapacityVolumeChoosingPolicy {
private final List volumes = new ArrayList<>();
private static final OzoneConfiguration CONF = new OzoneConfiguration();
- private static final String BASE_DIR =
- getTestDir(TestCapacityVolumeChoosingPolicy.class.getSimpleName())
- .getAbsolutePath();
- private static final String VOLUME_1 = BASE_DIR + "disk1";
- private static final String VOLUME_2 = BASE_DIR + "disk2";
- private static final String VOLUME_3 = BASE_DIR + "disk3";
+ @TempDir
+ private Path baseDir;
@BeforeEach
public void setup() throws Exception {
+ String volume1 = baseDir + "disk1";
+ String volume2 = baseDir + "disk2";
+ String volume3 = baseDir + "disk3";
policy = new CapacityVolumeChoosingPolicy();
SpaceUsageSource source1 = MockSpaceUsageSource.fixed(500, 100);
SpaceUsageCheckFactory factory1 = MockSpaceUsageCheckFactory.of(
source1, Duration.ZERO, SpaceUsagePersistence.None.INSTANCE);
- HddsVolume vol1 = new HddsVolume.Builder(VOLUME_1)
+ HddsVolume vol1 = new HddsVolume.Builder(volume1)
.conf(CONF)
.usageCheckFactory(factory1)
.build();
SpaceUsageSource source2 = MockSpaceUsageSource.fixed(500, 200);
SpaceUsageCheckFactory factory2 = MockSpaceUsageCheckFactory.of(
source2, Duration.ZERO, SpaceUsagePersistence.None.INSTANCE);
- HddsVolume vol2 = new HddsVolume.Builder(VOLUME_2)
+ HddsVolume vol2 = new HddsVolume.Builder(volume2)
.conf(CONF)
.usageCheckFactory(factory2)
.build();
SpaceUsageSource source3 = MockSpaceUsageSource.fixed(500, 300);
SpaceUsageCheckFactory factory3 = MockSpaceUsageCheckFactory.of(
source3, Duration.ZERO, SpaceUsagePersistence.None.INSTANCE);
- HddsVolume vol3 = new HddsVolume.Builder(VOLUME_3)
+ HddsVolume vol3 = new HddsVolume.Builder(volume3)
.conf(CONF)
.usageCheckFactory(factory3)
.build();
@@ -94,9 +92,6 @@ public void setup() throws Exception {
@AfterEach
public void cleanUp() {
volumes.forEach(HddsVolume::shutdown);
- FileUtil.fullyDelete(new File(VOLUME_1));
- FileUtil.fullyDelete(new File(VOLUME_2));
- FileUtil.fullyDelete(new File(VOLUME_3));
}
@Test
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
index 72f1d451b52..cc6fe87e19d 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
@@ -18,12 +18,11 @@
package org.apache.hadoop.ozone.container.common.volume;
-import java.io.File;
+import java.nio.file.Path;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory;
import org.apache.hadoop.hdds.fs.MockSpaceUsageSource;
@@ -35,8 +34,8 @@
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
-import static org.apache.ozone.test.GenericTestUtils.getTestDir;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -50,27 +49,26 @@ public class TestRoundRobinVolumeChoosingPolicy {
private final List volumes = new ArrayList<>();
private static final OzoneConfiguration CONF = new OzoneConfiguration();
- private static final String BASE_DIR =
- getTestDir(TestRoundRobinVolumeChoosingPolicy.class.getSimpleName())
- .getAbsolutePath();
- private static final String VOLUME_1 = BASE_DIR + "disk1";
- private static final String VOLUME_2 = BASE_DIR + "disk2";
+ @TempDir
+ private Path baseDir;
@BeforeEach
public void setup() throws Exception {
+ String volume1 = baseDir + "disk1";
+ String volume2 = baseDir + "disk2";
policy = new RoundRobinVolumeChoosingPolicy();
SpaceUsageSource source1 = MockSpaceUsageSource.fixed(500, 100);
SpaceUsageCheckFactory factory1 = MockSpaceUsageCheckFactory.of(
source1, Duration.ZERO, SpaceUsagePersistence.None.INSTANCE);
- HddsVolume vol1 = new HddsVolume.Builder(VOLUME_1)
+ HddsVolume vol1 = new HddsVolume.Builder(volume1)
.conf(CONF)
.usageCheckFactory(factory1)
.build();
SpaceUsageSource source2 = MockSpaceUsageSource.fixed(500, 200);
SpaceUsageCheckFactory factory2 = MockSpaceUsageCheckFactory.of(
source2, Duration.ZERO, SpaceUsagePersistence.None.INSTANCE);
- HddsVolume vol2 = new HddsVolume.Builder(VOLUME_2)
+ HddsVolume vol2 = new HddsVolume.Builder(volume2)
.conf(CONF)
.usageCheckFactory(factory2)
.build();
@@ -83,8 +81,6 @@ public void setup() throws Exception {
@AfterEach
public void cleanUp() {
volumes.forEach(HddsVolume::shutdown);
- FileUtil.fullyDelete(new File(VOLUME_1));
- FileUtil.fullyDelete(new File(VOLUME_2));
}
@Test
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
index 9d90659552e..1159d4277c7 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
@@ -20,22 +20,18 @@
import java.io.IOException;
import org.apache.commons.io.FileUtils;
-import org.junit.jupiter.api.Timeout;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
-import org.apache.ozone.test.GenericTestUtils;
import org.apache.ozone.test.GenericTestUtils.LogCapturer;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
import static org.apache.hadoop.ozone.container.common.volume.HddsVolume
.HDDS_VOLUME_DIR;
import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assumptions.assumeThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
@@ -44,9 +40,13 @@
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.io.TempDir;
+import org.slf4j.LoggerFactory;
import java.io.File;
import java.lang.reflect.Method;
+import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
@@ -59,12 +59,13 @@ public class TestVolumeSet {
private OzoneConfiguration conf;
private MutableVolumeSet volumeSet;
- private final String baseDir = MiniDFSCluster.getBaseDirectory();
- private final String volume1 = baseDir + "disk1";
- private final String volume2 = baseDir + "disk2";
- private final List volumes = new ArrayList<>();
- private static final String DUMMY_IP_ADDR = "0.0.0.0";
+ @TempDir
+ private Path baseDir;
+
+ private String volume1;
+ private String volume2;
+ private final List volumes = new ArrayList<>();
private void initializeVolumeSet() throws Exception {
volumeSet = new MutableVolumeSet(UUID.randomUUID().toString(), conf,
@@ -74,6 +75,9 @@ private void initializeVolumeSet() throws Exception {
@BeforeEach
public void setup() throws Exception {
conf = new OzoneConfiguration();
+ volume1 = baseDir.resolve("disk1").toString();
+ volume2 = baseDir.resolve("disk2").toString();
+
String dataDirKey = volume1 + "," + volume2;
volumes.add(volume1);
volumes.add(volume2);
@@ -94,8 +98,6 @@ public void shutdown() throws IOException {
FileUtils.deleteDirectory(volume.getStorageDir());
}
volumeSet.shutdown();
-
- FileUtil.fullyDelete(new File(baseDir));
}
private boolean checkVolumeExistsInVolumeSet(String volumeRoot) {
@@ -115,11 +117,11 @@ public void testVolumeSetInitialization() throws Exception {
// VolumeSet initialization should add volume1 and volume2 to VolumeSet
assertEquals(volumesList.size(), volumes.size(),
- "VolumeSet intialization is incorrect");
+ "VolumeSet initialization is incorrect");
assertTrue(checkVolumeExistsInVolumeSet(volume1),
- "VolumeSet not initailized correctly");
+ "VolumeSet not initialized correctly");
assertTrue(checkVolumeExistsInVolumeSet(volume2),
- "VolumeSet not initailized correctly");
+ "VolumeSet not initialized correctly");
}
@Test
@@ -128,7 +130,7 @@ public void testAddVolume() {
assertEquals(2, volumeSet.getVolumesList().size());
// Add a volume to VolumeSet
- String volume3 = baseDir + "disk3";
+ String volume3 = baseDir.resolve("disk3").toString();
boolean success = volumeSet.addVolume(volume3);
assertTrue(success);
@@ -223,31 +225,21 @@ public void testShutdown() throws Exception {
}
@Test
- public void testFailVolumes() throws Exception {
- MutableVolumeSet volSet = null;
- File readOnlyVolumePath = new File(baseDir);
+ void testFailVolumes(@TempDir File readOnlyVolumePath, @TempDir File volumePath) throws Exception {
//Set to readonly, so that this volume will be failed
- readOnlyVolumePath.setReadOnly();
- File volumePath = GenericTestUtils.getRandomizedTestDir();
+ assumeThat(readOnlyVolumePath.setReadOnly()).isTrue();
OzoneConfiguration ozoneConfig = new OzoneConfiguration();
ozoneConfig.set(HDDS_DATANODE_DIR_KEY, readOnlyVolumePath.getAbsolutePath()
+ "," + volumePath.getAbsolutePath());
ozoneConfig.set(HddsConfigKeys.OZONE_METADATA_DIRS,
volumePath.getAbsolutePath());
- volSet = new MutableVolumeSet(UUID.randomUUID().toString(), ozoneConfig,
+ MutableVolumeSet volSet = new MutableVolumeSet(UUID.randomUUID().toString(), ozoneConfig,
null, StorageVolume.VolumeType.DATA_VOLUME, null);
assertEquals(1, volSet.getFailedVolumesList().size());
assertEquals(readOnlyVolumePath, volSet.getFailedVolumesList().get(0)
.getStorageDir());
- //Set back to writable
- try {
- readOnlyVolumePath.setWritable(true);
- volSet.shutdown();
- } finally {
- FileUtil.fullyDelete(volumePath);
- }
-
+ volSet.shutdown();
}
@Test
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
index 27e1195a24b..e3c610bfe47 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
@@ -40,7 +40,6 @@
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.ozone.container.common.ContainerTestUtils;
-import org.apache.hadoop.ozone.container.common.TestDatanodeStateMachine;
import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
@@ -49,7 +48,6 @@
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.ozone.test.GenericTestUtils;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.Timer;
@@ -59,9 +57,11 @@
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.io.TempDir;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import static org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
@@ -78,6 +78,8 @@
public class TestVolumeSetDiskChecks {
public static final Logger LOG = LoggerFactory.getLogger(
TestVolumeSetDiskChecks.class);
+ @TempDir
+ private File dir;
private OzoneConfiguration conf = null;
@@ -217,21 +219,21 @@ private OzoneConfiguration getConfWithDataNodeDirs(int numDirs) {
final OzoneConfiguration ozoneConf = new OzoneConfiguration();
final List dirs = new ArrayList<>();
for (int i = 0; i < numDirs; ++i) {
- dirs.add(GenericTestUtils.getRandomizedTestDir().getPath());
+ dirs.add(new File(dir, randomAlphanumeric(10)).toString());
}
ozoneConf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY,
String.join(",", dirs));
final List metaDirs = new ArrayList<>();
for (int i = 0; i < numDirs; ++i) {
- metaDirs.add(GenericTestUtils.getRandomizedTestDir().getPath());
+ metaDirs.add(new File(dir, randomAlphanumeric(10)).toString());
}
ozoneConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
String.join(",", metaDirs));
final List dbDirs = new ArrayList<>();
for (int i = 0; i < numDirs; ++i) {
- dbDirs.add(GenericTestUtils.getRandomizedTestDir().getPath());
+ dbDirs.add(new File(dir, randomAlphanumeric(10)).toString());
}
ozoneConf.set(OzoneConfigKeys.HDDS_DATANODE_CONTAINER_DB_DIR,
String.join(",", dbDirs));
@@ -264,8 +266,7 @@ public void testVolumeFailure() throws IOException {
ContainerSet conSet = new ContainerSet(20);
when(ozoneContainer.getContainerSet()).thenReturn(conSet);
- String path = GenericTestUtils
- .getTempPath(TestDatanodeStateMachine.class.getSimpleName());
+ String path = dir.getPath();
File testRoot = new File(path);
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
index 52316c43264..49ddd5f674d 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
@@ -28,7 +28,6 @@
import java.util.UUID;
import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -46,7 +45,6 @@
import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.ozone.test.GenericTestUtils;
import static java.util.stream.Collectors.toList;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
@@ -60,6 +58,7 @@
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
+import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
@@ -75,6 +74,7 @@ public class TestKeyValueBlockIterator {
private KeyValueContainerData containerData;
private MutableVolumeSet volumeSet;
private OzoneConfiguration conf;
+ @TempDir
private File testRoot;
private DBHandle db;
private ContainerLayoutVersion layout;
@@ -110,7 +110,6 @@ private static List provideTestData() {
}
public void setup() throws Exception {
- testRoot = GenericTestUtils.getRandomizedTestDir();
conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testRoot.getAbsolutePath());
volumeSet = new MutableVolumeSet(datanodeID, clusterID, conf, null,
@@ -135,7 +134,6 @@ public void tearDown() throws Exception {
db.cleanup();
BlockUtils.shutdownCache(conf);
volumeSet.shutdown();
- FileUtil.fullyDelete(testRoot);
}
@ParameterizedTest
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index d340ffe77c4..15d0faefdf9 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -51,10 +51,8 @@
import org.apache.hadoop.ozone.container.metadata.AbstractDatanodeStore;
import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
import org.apache.hadoop.ozone.container.replication.CopyContainerCompression;
-import org.apache.ozone.test.GenericTestUtils;
import org.apache.hadoop.util.DiskChecker;
-import org.assertj.core.api.Fail;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
@@ -98,8 +96,8 @@
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assumptions.assumeTrue;
-import static org.mockito.ArgumentMatchers.anyList;
-import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.Mockito.anyList;
+import static org.mockito.Mockito.anyLong;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.when;
@@ -110,7 +108,7 @@
public class TestKeyValueContainer {
@TempDir
- private Path folder;
+ private File folder;
private String scmId = UUID.randomUUID().toString();
private VolumeSet volumeSet;
@@ -229,7 +227,7 @@ public void testNextVolumeTriedOnWriteFailure(
ContainerTestVersionInfo versionInfo) throws Exception {
init(versionInfo);
String volumeDirPath =
- Files.createDirectory(folder.resolve("volumeDir"))
+ Files.createDirectory(folder.toPath().resolve("volumeDir"))
.toFile().getAbsolutePath();
HddsVolume newVolume = new HddsVolume.Builder(volumeDirPath)
.conf(CONF).datanodeUuid(datanodeId.toString()).build();
@@ -276,7 +274,7 @@ public void testEmptyContainerImportExport(
//destination path
File exportTar = Files.createFile(
- folder.resolve("export.tar")).toFile();
+ folder.toPath().resolve("export.tar")).toFile();
TarContainerPacker packer = new TarContainerPacker(NO_COMPRESSION);
//export the container
try (FileOutputStream fos = new FileOutputStream(exportTar)) {
@@ -309,7 +307,7 @@ public void testUnhealthyContainerImportExport(
keyValueContainer.update(data.getMetadata(), true);
//destination path
- File exportTar = Files.createFile(folder.resolve("export.tar")).toFile();
+ File exportTar = Files.createFile(folder.toPath().resolve("export.tar")).toFile();
TarContainerPacker packer = new TarContainerPacker(NO_COMPRESSION);
//export the container
try (FileOutputStream fos = new FileOutputStream(exportTar)) {
@@ -347,7 +345,7 @@ public void testContainerImportExport(ContainerTestVersionInfo versionInfo)
//destination path
File folderToExport = Files.createFile(
- folder.resolve("export.tar")).toFile();
+ folder.toPath().resolve("export.tar")).toFile();
for (CopyContainerCompression compr : CopyContainerCompression.values()) {
TarContainerPacker packer = new TarContainerPacker(compr);
@@ -394,15 +392,14 @@ public void testContainerImportExport(ContainerTestVersionInfo versionInfo)
containerData.getBytesUsed());
//Can't overwrite existing container
- try {
+ KeyValueContainer finalContainer = container;
+ assertThrows(IOException.class, () -> {
try (FileInputStream fis = new FileInputStream(folderToExport)) {
- container.importContainerData(fis, packer);
+ finalContainer.importContainerData(fis, packer);
}
- fail("Container is imported twice. Previous files are overwritten");
- } catch (IOException ex) {
- //all good
- assertTrue(container.getContainerFile().exists());
- }
+ }, "Container is imported twice. Previous files are overwritten");
+ //all good
+ assertTrue(container.getContainerFile().exists());
//Import failure should cleanup the container directory
containerData =
@@ -416,18 +413,18 @@ public void testContainerImportExport(ContainerTestVersionInfo versionInfo)
containerVolume = volumeChoosingPolicy.chooseVolume(
StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()), 1);
container.populatePathFields(scmId, containerVolume);
- try {
- FileInputStream fis = new FileInputStream(folderToExport);
- fis.close();
- container.importContainerData(fis, packer);
- fail("Container import should fail");
- } catch (Exception ex) {
- assertInstanceOf(IOException.class, ex);
- } finally {
- File directory =
- new File(container.getContainerData().getContainerPath());
- assertFalse(directory.exists());
- }
+ KeyValueContainer finalContainer1 = container;
+ assertThrows(IOException.class, () -> {
+ try {
+ FileInputStream fis = new FileInputStream(folderToExport);
+ fis.close();
+ finalContainer1.importContainerData(fis, packer);
+ } finally {
+ File directory =
+ new File(finalContainer1.getContainerData().getContainerPath());
+ assertFalse(directory.exists());
+ }
+ });
}
}
@@ -524,7 +521,7 @@ public void concurrentExport(ContainerTestVersionInfo versionInfo)
.mapToObj(i -> new Thread(() -> {
try {
File file = Files.createFile(
- folder.resolve("concurrent" + i + ".tar")).toFile();
+ folder.toPath().resolve("concurrent" + i + ".tar")).toFile();
try (OutputStream out = Files.newOutputStream(file.toPath())) {
keyValueContainer.exportContainerData(out, packer);
}
@@ -814,13 +811,13 @@ public void testKeyValueDataProtoBufMsg(ContainerTestVersionInfo versionInfo)
}
@ContainerTestVersionInfo.ContainerTest
- public void testAutoCompactionSmallSstFile(
+ void testAutoCompactionSmallSstFile(
ContainerTestVersionInfo versionInfo) throws Exception {
init(versionInfo);
assumeTrue(isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3));
// Create a new HDDS volume
String volumeDirPath =
- Files.createDirectory(folder.resolve("volumeDir")).toFile()
+ Files.createDirectory(folder.toPath().resolve("volumeDir")).toFile()
.getAbsolutePath();
HddsVolume newVolume = new HddsVolume.Builder(volumeDirPath)
.conf(CONF).datanodeUuid(datanodeId.toString()).build();
@@ -859,7 +856,7 @@ public void testAutoCompactionSmallSstFile(
if (volume == newVolume) {
File folderToExport =
Files.createFile(
- folder.resolve(containerId + "_exported.tar.gz")).toFile();
+ folder.toPath().resolve(containerId + "_exported.tar.gz")).toFile();
TarContainerPacker packer = new TarContainerPacker(NO_COMPRESSION);
//export the container
try (FileOutputStream fos = new FileOutputStream(folderToExport)) {
@@ -905,8 +902,6 @@ public void testAutoCompactionSmallSstFile(
List fileMetaDataList2 =
((RDBStore)(dnStore.getStore())).getDb().getLiveFilesMetaData();
assertThat(fileMetaDataList2.size()).isLessThan(fileMetaDataList1.size());
- } catch (Exception e) {
- Fail.fail("TestAutoCompactionSmallSstFile failed");
} finally {
// clean up
for (KeyValueContainer c : containerList) {
@@ -929,7 +924,7 @@ public void testIsEmptyContainerStateWhileImport(
//destination path
File folderToExport = Files.createFile(
- folder.resolve("export.tar")).toFile();
+ folder.toPath().resolve("export.tar")).toFile();
for (CopyContainerCompression compr : CopyContainerCompression.values()) {
TarContainerPacker packer = new TarContainerPacker(compr);
@@ -978,7 +973,7 @@ public void testIsEmptyContainerStateWhileImportWithoutBlock(
//destination path
File folderToExport = Files.createFile(
- folder.resolve("export.tar")).toFile();
+ folder.toPath().resolve("export.tar")).toFile();
for (CopyContainerCompression compr : CopyContainerCompression.values()) {
TarContainerPacker packer = new TarContainerPacker(compr);
@@ -1022,14 +1017,8 @@ public void testIsEmptyContainerStateWhileImportWithoutBlock(
public void testImportV2ReplicaToV3HddsVolume(
ContainerTestVersionInfo versionInfo) throws Exception {
init(versionInfo);
- final String testDir = GenericTestUtils.getTempPath(
- TestKeyValueContainer.class.getSimpleName() + "-"
- + UUID.randomUUID());
- try {
- testMixedSchemaImport(testDir, false);
- } finally {
- FileUtils.deleteDirectory(new File(testDir));
- }
+ final String testDir = folder.getPath();
+ testMixedSchemaImport(testDir, false);
}
/**
@@ -1039,14 +1028,8 @@ public void testImportV2ReplicaToV3HddsVolume(
public void testImportV3ReplicaToV2HddsVolume(
ContainerTestVersionInfo versionInfo) throws Exception {
init(versionInfo);
- final String testDir = GenericTestUtils.getTempPath(
- TestKeyValueContainer.class.getSimpleName() + "-"
- + UUID.randomUUID());
- try {
- testMixedSchemaImport(testDir, true);
- } finally {
- FileUtils.deleteDirectory(new File(testDir));
- }
+ final String testDir = folder.getPath();
+ testMixedSchemaImport(testDir, true);
}
private void testMixedSchemaImport(String dir,
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
index 4145509413d..60dfe8509bd 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java
@@ -41,8 +41,8 @@
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.anyLong;
+import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java
index 51ecb322243..9c531069e9c 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.ozone.container.keyvalue;
import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -34,8 +33,8 @@
import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
-import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.io.TempDir;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -63,6 +62,7 @@ public class TestKeyValueContainerIntegrityChecks {
private ContainerLayoutTestInfo containerLayoutTestInfo;
private MutableVolumeSet volumeSet;
private OzoneConfiguration conf;
+ @TempDir
private File testRoot;
private ChunkManager chunkManager;
private String clusterID = UUID.randomUUID().toString();
@@ -87,7 +87,6 @@ void initTestData(ContainerTestVersionInfo versionInfo) throws Exception {
private void setup() throws Exception {
LOG.info("Testing layout:{}", containerLayoutTestInfo.getLayout());
- this.testRoot = GenericTestUtils.getRandomizedTestDir();
conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testRoot.getAbsolutePath());
containerLayoutTestInfo.updateConfig(conf);
@@ -101,7 +100,6 @@ private void setup() throws Exception {
public void teardown() {
BlockUtils.shutdownCache(conf);
volumeSet.shutdown();
- FileUtil.fullyDelete(testRoot);
}
protected ContainerLayoutVersion getChunkLayout() {
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java
index 0b63ab1796d..61083fa73fa 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java
@@ -43,8 +43,8 @@
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.UNHEALTHY;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.mockito.ArgumentMatchers.anyList;
-import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.Mockito.anyList;
+import static org.mockito.Mockito.anyLong;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
index 28c0a8092a0..2637f1922c6 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
@@ -62,7 +62,7 @@
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java
index 1e326ce3ee0..565c3c94408 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java
@@ -50,7 +50,7 @@
import static org.apache.hadoop.ozone.container.ContainerTestHelper.getTestBlockID;
import static org.apache.hadoop.ozone.container.ContainerTestHelper.getWriteChunkRequest;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.atMostOnce;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
index 9ebc55a4b80..c44682203e2 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java
@@ -382,7 +382,7 @@ private File writeDbFile(
private File writeSingleFile(Path parentPath, String fileName,
String content) throws IOException {
- Path path = parentPath.resolve(fileName);
+ Path path = parentPath.resolve(fileName).normalize();
Files.createDirectories(path.getParent());
File file = path.toFile();
FileOutputStream fileStream = new FileOutputStream(file);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java
index 14b47a57c3a..1a1158a210f 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java
@@ -70,7 +70,7 @@ class TestChunkUtils {
private static final Random RANDOM = new Random();
@TempDir
- private Path tempDir;
+ private File tempDir;
static ChunkBuffer readData(File file, long off, long len)
throws StorageContainerException {
@@ -84,7 +84,7 @@ void concurrentReadOfSameFile() throws Exception {
String s = "Hello World";
byte[] array = s.getBytes(UTF_8);
ChunkBuffer data = ChunkBuffer.wrap(ByteBuffer.wrap(array));
- Path tempFile = tempDir.resolve("concurrent");
+ Path tempFile = tempDir.toPath().resolve("concurrent");
int len = data.limit();
int offset = 0;
File file = tempFile.toFile();
@@ -136,7 +136,7 @@ void concurrentProcessing() throws Exception {
0, TimeUnit.SECONDS, new LinkedBlockingQueue<>());
AtomicInteger processed = new AtomicInteger();
for (int i = 0; i < threads; i++) {
- Path path = tempDir.resolve(String.valueOf(i));
+ Path path = tempDir.toPath().resolve(String.valueOf(i));
executor.execute(() -> {
try {
ChunkUtils.processFileExclusively(path, () -> {
@@ -166,7 +166,7 @@ void serialRead() throws IOException {
String s = "Hello World";
byte[] array = s.getBytes(UTF_8);
ChunkBuffer data = ChunkBuffer.wrap(ByteBuffer.wrap(array));
- Path tempFile = tempDir.resolve("serial");
+ Path tempFile = tempDir.toPath().resolve("serial");
File file = tempFile.toFile();
int len = data.limit();
int offset = 0;
@@ -185,7 +185,7 @@ void serialRead() throws IOException {
@Test
void validateChunkForOverwrite() throws IOException {
- Path tempFile = tempDir.resolve("overwrite");
+ Path tempFile = tempDir.toPath().resolve("overwrite");
FileUtils.write(tempFile.toFile(), "test", UTF_8);
assertTrue(
@@ -226,7 +226,7 @@ void readMissingFile() {
@Test
void testReadData() throws Exception {
- final File dir = GenericTestUtils.getTestDir("testReadData");
+ final File dir = new File(tempDir, "testReadData");
try {
assertTrue(dir.mkdirs());
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java
index 0ca0d267b88..0c373cb0dbf 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java
@@ -44,8 +44,8 @@
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.mockito.ArgumentMatchers.anyList;
-import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.Mockito.anyList;
+import static org.mockito.Mockito.anyLong;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java
index 38a01e46900..26d959e8860 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java
@@ -45,6 +45,7 @@
import java.util.List;
import java.util.UUID;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion;
@@ -83,6 +84,7 @@ private void initTest(ContainerTestVersionInfo versionInfo)
this.schemaVersion = versionInfo.getSchemaVersion();
this.config = new OzoneConfiguration();
ContainerTestVersionInfo.setTestSchemaVersion(schemaVersion, config);
+ config.setBoolean(OZONE_CHUNK_LIST_INCREMENTAL, true);
initilaze();
}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java
index 304bfa7f206..36d71655192 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java
@@ -37,7 +37,7 @@
import static org.apache.hadoop.ozone.container.ContainerTestHelper.setDataChecksum;
import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
/**
* Test for FilePerBlockStrategy.
@@ -48,7 +48,7 @@ public class TestFilePerBlockStrategy extends CommonChunkManagerTestCases {
public void testDeletePartialChunkWithOffsetUnsupportedRequest() {
// GIVEN
ChunkManager chunkManager = createTestSubject();
- try {
+ StorageContainerException e = assertThrows(StorageContainerException.class, () -> {
KeyValueContainer container = getKeyValueContainer();
BlockID blockID = getBlockID();
chunkManager.writeChunk(container, blockID,
@@ -58,12 +58,8 @@ public void testDeletePartialChunkWithOffsetUnsupportedRequest() {
// WHEN
chunkManager.deleteChunk(container, blockID, chunkInfo);
-
- // THEN
- fail("testDeleteChunkUnsupportedRequest");
- } catch (StorageContainerException ex) {
- assertEquals(ContainerProtos.Result.UNSUPPORTED_REQUEST, ex.getResult());
- }
+ });
+ assertEquals(ContainerProtos.Result.UNSUPPORTED_REQUEST, e.getResult());
}
/**
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerDataScanner.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerDataScanner.java
index fb79fafe2ba..e34bfe9396a 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerDataScanner.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerDataScanner.java
@@ -42,8 +42,8 @@
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.atLeastOnce;
import static org.mockito.Mockito.atMostOnce;
import static org.mockito.Mockito.never;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerMetadataScanner.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerMetadataScanner.java
index 302e2d2d138..9bb3a382c60 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerMetadataScanner.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestBackgroundContainerMetadataScanner.java
@@ -42,7 +42,7 @@
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.atLeastOnce;
import static org.mockito.Mockito.atMostOnce;
import static org.mockito.Mockito.never;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
index d48f0d3314e..7f38eab785b 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
@@ -70,8 +70,8 @@
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.mockito.ArgumentMatchers.anyList;
-import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.Mockito.anyList;
+import static org.mockito.Mockito.anyLong;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -358,7 +358,7 @@ public void testContainerReaderWithInvalidDbPath(
hddsVolume1, containerSet1, conf, true);
containerReader.readVolume(hddsVolume1.getHddsRootDir());
assertEquals(0, containerSet1.containerCount());
- assertTrue(dnLogs.getOutput().contains("Container DB file is missing"));
+ assertThat(dnLogs.getOutput()).contains("Container DB file is missing");
}
@ContainerTestVersionInfo.ContainerTest
@@ -558,8 +558,10 @@ public void testMarkedDeletedContainerCleared(
// add db entry for the container ID 101 for V3
baseCount = addDbEntry(containerData);
}
+
+ // verify container data and perform cleanup
ContainerReader containerReader = new ContainerReader(volumeSet,
- hddsVolume, containerSet, conf, false);
+ hddsVolume, containerSet, conf, true);
containerReader.run();
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScannersAbstract.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScannersAbstract.java
index 3a3a96cc429..d892e916ce6 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScannersAbstract.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScannersAbstract.java
@@ -45,8 +45,8 @@
import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.getUnhealthyScanResult;
import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScannerConfiguration.CONTAINER_SCAN_MIN_GAP_DEFAULT;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOnDemandContainerDataScanner.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOnDemandContainerDataScanner.java
index acf04edd643..8334c7b078c 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOnDemandContainerDataScanner.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOnDemandContainerDataScanner.java
@@ -46,8 +46,8 @@
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.atLeastOnce;
import static org.mockito.Mockito.atMostOnce;
import static org.mockito.Mockito.never;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index af6bd5d17f3..497418dcdcb 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -153,6 +153,7 @@ public void testBuildContainerMap(ContainerTestVersionInfo versionInfo)
OzoneContainer ozoneContainer = ContainerTestUtils
.getOzoneContainer(datanodeDetails, conf);
+ ozoneContainer.buildContainerSet();
ContainerSet containerset = ozoneContainer.getContainerSet();
assertEquals(numTestContainers, containerset.containerCount());
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/GrpcOutputStreamTest.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/GrpcOutputStreamTest.java
index a47dffd6d38..231a90eb0bd 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/GrpcOutputStreamTest.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/GrpcOutputStreamTest.java
@@ -36,7 +36,7 @@
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java
index d9ff2364f32..1b989e6bc7f 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java
@@ -21,7 +21,9 @@
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
+import java.util.HashSet;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Semaphore;
@@ -41,8 +43,10 @@
import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker;
import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
import org.apache.ozone.test.GenericTestUtils;
+import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
@@ -52,7 +56,10 @@
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
/**
@@ -138,6 +145,42 @@ void importSameContainerWhenFirstInProgress() throws Exception {
semaphore.release();
}
+ @Test
+ public void testInconsistentChecksumContainerShouldThrowError() throws Exception {
+ // create container
+ long containerId = 1;
+ KeyValueContainerData containerData = spy(new KeyValueContainerData(containerId,
+ ContainerLayoutVersion.FILE_PER_BLOCK, 100, "test", "test"));
+ // mock to return different checksum
+ when(containerData.getChecksum()).thenReturn("checksum1", "checksum2");
+ doNothing().when(containerData).setChecksumTo0ByteArray();
+ // create containerImporter object
+ ContainerController controllerMock = mock(ContainerController.class);
+ ContainerSet containerSet = new ContainerSet(0);
+ MutableVolumeSet volumeSet = new MutableVolumeSet("test", conf, null,
+ StorageVolume.VolumeType.DATA_VOLUME, null);
+ ContainerImporter containerImporter = spy(new ContainerImporter(conf,
+ containerSet, controllerMock, volumeSet));
+
+ TarContainerPacker packer = mock(TarContainerPacker.class);
+ when(packer.unpackContainerDescriptor(any())).thenReturn("test".getBytes(
+ StandardCharsets.UTF_8));
+ when(containerImporter.getPacker(any())).thenReturn(packer);
+
+ doReturn(containerData).when(containerImporter).getKeyValueContainerData(any(byte[].class));
+ when(containerImporter.getImportContainerProgress()).thenReturn(new HashSet<>());
+
+ File tarFile = File.createTempFile("temp_" + System
+ .currentTimeMillis(), ".tar");
+
+ StorageContainerException scException =
+ assertThrows(StorageContainerException.class,
+ () -> containerImporter.importContainer(containerId,
+ tarFile.toPath(), null, NO_COMPRESSION));
+ Assertions.assertTrue(scException.getMessage().
+ contains("Container checksum error"));
+ }
+
private File containerTarFile(
long containerId, ContainerData containerData) throws IOException {
File yamlFile = new File(tempDir, "container.yaml");
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcContainerUploader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcContainerUploader.java
index fb6b8ffa357..2208a4536f8 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcContainerUploader.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcContainerUploader.java
@@ -35,7 +35,7 @@
import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestPushReplicator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestPushReplicator.java
index b678f6e4042..91fd29681cc 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestPushReplicator.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestPushReplicator.java
@@ -39,8 +39,8 @@
import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION;
import static org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand.toTarget;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
index 833b8de4e3f..26c6853b64a 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.container.replication;
+import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
@@ -65,8 +66,9 @@
import org.apache.ozone.test.TestClock;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.io.TempDir;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import static com.google.common.util.concurrent.MoreExecutors.newDirectExecutorService;
import static java.util.Collections.emptyList;
@@ -81,9 +83,9 @@
import static org.junit.jupiter.api.Assertions.fail;
import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority.LOW;
import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority.NORMAL;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyList;
-import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyList;
+import static org.mockito.Mockito.anyLong;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -276,7 +278,8 @@ public void slowDownload() {
}
@ContainerLayoutTestInfo.ContainerTest
- public void testDownloadAndImportReplicatorFailure() throws IOException {
+ public void testDownloadAndImportReplicatorFailure(ContainerLayoutVersion layout,
+ @TempDir File tempFile) throws IOException {
OzoneConfiguration conf = new OzoneConfiguration();
ReplicationSupervisor supervisor = ReplicationSupervisor.newBuilder()
@@ -294,9 +297,7 @@ public void testDownloadAndImportReplicatorFailure() throws IOException {
any(Path.class), any()))
.thenReturn(res);
- final String testDir = GenericTestUtils.getTempPath(
- TestReplicationSupervisor.class.getSimpleName() +
- "-" + UUID.randomUUID());
+ final String testDir = tempFile.getPath();
MutableVolumeSet volumeSet = mock(MutableVolumeSet.class);
when(volumeSet.getVolumesList())
.thenReturn(singletonList(
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java
index 160c690bd4a..f054358b35b 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java
@@ -35,7 +35,7 @@
import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/stream/TestDirstreamClientHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/stream/TestDirstreamClientHandler.java
index 78dc44338d6..d3907a6031d 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/stream/TestDirstreamClientHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/stream/TestDirstreamClientHandler.java
@@ -19,12 +19,9 @@
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
-import org.apache.commons.io.FileUtils;
-import org.apache.ozone.test.GenericTestUtils;
-import org.jetbrains.annotations.NotNull;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
+import jakarta.annotation.Nonnull;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
@@ -39,18 +36,9 @@
*/
public class TestDirstreamClientHandler {
+ @TempDir
private Path tmpDir;
- @BeforeEach
- public void init() {
- tmpDir = GenericTestUtils.getRandomizedTestDir().toPath();
- }
-
- @AfterEach
- public void destroy() throws IOException {
- FileUtils.deleteDirectory(tmpDir.toFile());
- }
-
@Test
public void oneFileStream() throws IOException {
@@ -129,7 +117,7 @@ public void splitContent() throws IOException {
assertEquals("yyy", getContent("bsd.txt"));
}
- @NotNull
+ @Nonnull
private String getContent(String name) throws IOException {
return new String(Files.readAllBytes(tmpDir.resolve(name)),
StandardCharsets.UTF_8);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java
index 2a59eee8db9..383e76dcc72 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java
@@ -73,7 +73,7 @@
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
diff --git a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml
index baa9fa4e67d..3a69c793c26 100644
--- a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml
+++ b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml
@@ -209,12 +209,6 @@
-
-
-
-
-
-
diff --git a/hadoop-hdds/docs/content/concept/Datanodes.md b/hadoop-hdds/docs/content/concept/Datanodes.md
index 1149eba2b0d..47c09a798fc 100644
--- a/hadoop-hdds/docs/content/concept/Datanodes.md
+++ b/hadoop-hdds/docs/content/concept/Datanodes.md
@@ -48,7 +48,7 @@ the key to the Ozone Manager. Ozone manager returns the list of Ozone blocks
that make up that key.
An Ozone block contains the container ID and a local ID. The figure below
-shows the logical layout out of Ozone block.
+shows the logical layout of the Ozone block.

diff --git a/hadoop-hdds/docs/content/concept/OzoneManager.md b/hadoop-hdds/docs/content/concept/OzoneManager.md
index 31f6a428107..f58a4cd8f45 100644
--- a/hadoop-hdds/docs/content/concept/OzoneManager.md
+++ b/hadoop-hdds/docs/content/concept/OzoneManager.md
@@ -31,7 +31,7 @@ Ozone Manager (OM) is the namespace manager for Ozone.
This means that when you want to write some data, you ask Ozone
Manager for a block and Ozone Manager gives you a block and remembers that
information. When you want to read that file back, you need to find the
-address of the block and Ozone Manager returns it you.
+address of the block and the Ozone Manager returns it to you.
Ozone Manager also allows users to organize keys under a volume and bucket.
Volumes and buckets are part of the namespace and managed by Ozone Manager.
diff --git a/hadoop-hdds/docs/content/concept/Recon.md b/hadoop-hdds/docs/content/concept/Recon.md
index 235d6c314fb..608a37c39b4 100644
--- a/hadoop-hdds/docs/content/concept/Recon.md
+++ b/hadoop-hdds/docs/content/concept/Recon.md
@@ -153,7 +153,7 @@ ozone.recon.db.dir | none | Directory where the Recon Server stores its metadata
ozone.recon.om.db.dir | none | Directory where the Recon Server stores its OM snapshot DB.
ozone.recon.om.snapshot
.task.interval.delay | 10m | Interval in MINUTES by Recon to request OM DB Snapshot / delta updates.
ozone.recon.task
.missingcontainer.interval | 300s | Time interval of the periodic check for Unhealthy Containers in the cluster.
-ozone.recon.task
.safemode.wait.threshold | 300s | Max time for Recon to wait before it exit out of safe or warmup mode.
+ozone.recon.task
.safemode.wait.threshold | 300s | Max time for Recon to wait before it exits out of safe or warmup mode.
ozone.recon.sql.db.jooq.dialect | DERBY | Please refer to [SQL Dialect](https://www.jooq.org/javadoc/latest/org.jooq/org/jooq/SQLDialect.html) to specify a different dialect.
ozone.recon.sql.db.jdbc.url | jdbc:derby:${ozone.recon.db.dir}
/ozone_recon_derby.db | Recon SQL database jdbc url.
ozone.recon.sql.db.username | none | Recon SQL database username.
diff --git a/hadoop-hdds/docs/content/concept/StorageContainerManager.md b/hadoop-hdds/docs/content/concept/StorageContainerManager.md
index 8922f89bc5d..860e69a77da 100644
--- a/hadoop-hdds/docs/content/concept/StorageContainerManager.md
+++ b/hadoop-hdds/docs/content/concept/StorageContainerManager.md
@@ -43,7 +43,7 @@ read and write these blocks directly.
2. SCM keeps track of all the block
replicas. If there is a loss of data node or a disk, SCM
-detects it and instructs data nodes make copies of the
+detects it and instructs data nodes to make copies of the
missing blocks to ensure high availability.
3. **SCM's Certificate Authority** is in
diff --git a/hadoop-hdds/docs/content/feature/Decommission.zh.md b/hadoop-hdds/docs/content/feature/Decommission.zh.md
new file mode 100644
index 00000000000..ad959469b95
--- /dev/null
+++ b/hadoop-hdds/docs/content/feature/Decommission.zh.md
@@ -0,0 +1,96 @@
+---
+title: "Decommissioning"
+weight: 1
+menu:
+ main:
+ parent: 特性
+summary: Decommissioning of SCM, OM and Datanode.
+---
+
+
+# DataNode Decommission
+
+DataNode Decommission是从Ozone集群中删除现有DataNode的过程中,同时确保新数据不会被写入正在Decommission的DataNode。当你启动DataNode Decommission的操作时候,Ozone会自动确保在Decommission完成之前,该数据节点上的所有Storage containers都在另一个DataNode上创建了额外的副本。因此,DataNode在Decommission完成后可以继续运行,并可用于读取,但不能用于写入,直到手动停止DataNode的服务。
+
+当我们启动Decommission时,这个操作首先要检查节点的当前状态,理想情况下应该是 "IN_SERVICE",然后将其状态更改为 "DECOMMISSIONING",并启动Decommission的流程:
+
+1. 首先它会触发一个事件,关闭节点上的所有Pipelines,同时关闭所有Containers。
+
+2. 然后获取节点上的Container信息,并检查是否需要新的副本。如果需要,创建新的副本的任务就会被调度起来。
+
+3. 复制任务被调度后,节点仍处于待处理状态,直到复制任务完成。
+
+4. 在此阶段,节点将完成Decommission的过程,然后节点状态将更改为 "DECOMMISSIONED"。
+
+要检查DataNode的当前状态,可以执行以下命令,
+```shell
+ozone admin datanode list
+```
+
+要decommission某台datanode的时候,可以执行下面的命令,
+
+```shell
+ozone admin datanode decommission [-hV] [-id=]
+ [--scm=] [...]
+```
+您可以输入多个主机,以便一起Decommission多个DataNode。
+
+**Note:** 要Recommission某台DataNode的时候,可在命令行执行以下命令,
+```shell
+ozone admin datanode recommission [-hV] [-id=]
+ [--scm=] [...]
+```
+
+# OM Decommission
+
+Ozone Manager(OM)Decommissioning是指从 OM HA Ring 中从容地(gracefully)移除一个 OM 的过程。
+
+要Decommission OM 并将这个节点从 OM HA ring中移除,需要执行以下步骤。
+1. 将要被Decommission的 OM 节点的 _OM NodeId_ 添加到所有其他 OM 的 _ozone-site.xml_ 中的 _ozone.om.decommissioned.nodes._ 属性中。
+2. 运行以下命令Decommission这台 OM 节点.
+```shell
+ozone admin om decommission -id= -nodeid= -hostname= [optional --force]
+```
+ _force选项将跳过检查 _ozone-site.xml_ 中的 OM 配置是否已更新,并将Decommission节点添加至 _**ozone.om.decommissioned.nodes**_ 配置中. **Note -** 建议在Decommissioning一个 OM 节点之前bootstrap另一个 OM 节点,以保持OM的高可用性(HA).
+
+# SCM Decommission
+
+存储容器管理器 (SCM) Decommissioning 是允许您从容地(gracefully)将一个 SCM 从 SCM HA Ring 中移除的过程。
+
+在Decommission一台SCM,并将其从SCM HA ring中移除时,需要执行以下步骤。
+```shell
+ozone admin scm decommission [-hV] [--service-id=] -nodeid=
+```
+执行以下命令可获得 "nodeId": **"ozone admin scm roles "**
+
+### Leader SCM
+如果需要decommission **leader** SCM, 您必须先将leader的角色转移到另一个 scm,然后再Decommission这个节点。
+
+您可以使用以下的命令来转移leader的角色,
+```shell
+ozone admin scm transfer [--service-id=] -n=
+```
+在Leader的角色成功地转移之后,您可以继续decommission的操作。
+
+### Primordial SCM
+如果要decommission **primordial** scm,必须更改 _ozone.scm.primordial.node.id_ 的属性,使其指向不同的 SCM,然后再继续decommissioning。
+
+### 注意
+在运行SCM decommissioning的操作期间,应手动删除decommissioned SCM的私钥。私钥可在 _hdds.metadata.dir_ 中找到。
+
+在支持证书吊销之前(HDDS-8399),需要手动删除decommissioned SCM上的证书。
diff --git a/hadoop-hdds/docs/content/feature/OM-HA.md b/hadoop-hdds/docs/content/feature/OM-HA.md
index 3e4048c51c3..3872c387335 100644
--- a/hadoop-hdds/docs/content/feature/OM-HA.md
+++ b/hadoop-hdds/docs/content/feature/OM-HA.md
@@ -23,13 +23,13 @@ summary: HA setup for Ozone Manager to avoid any single point of failure.
limitations under the License.
-->
-Ozone has two metadata-manager nodes (*Ozone Manager* for key space management and *Storage Container Management* for block space management) and multiple storage nodes (Datanode). Data is replicated between Datanodes with the help of RAFT consensus algorithm.
+Ozone has two metadata-manager nodes (*Ozone Manager* for key space management and *Storage Container Manager* for block space management) and multiple storage nodes (Datanode). Data is replicated between Datanodes with the help of RAFT consensus algorithm.
To avoid any single point of failure the metadata-manager nodes also should have a HA setup.
Both Ozone Manager and Storage Container Manager supports HA. In this mode the internal state is replicated via RAFT (with Apache Ratis)
-This document explain the HA setup of Ozone Manager (OM) HA, please check [this page]({{< ref "SCM-HA" >}}) for SCM HA. While they can be setup for HA independently, a reliable, full HA setup requires enabling HA for both services.
+This document explains the HA setup of Ozone Manager (OM) HA, please check [this page]({{< ref "SCM-HA" >}}) for SCM HA. While they can be setup for HA independently, a reliable, full HA setup requires enabling HA for both services.
## Ozone Manager HA
@@ -104,18 +104,18 @@ hdfs dfs -ls ofs://cluster1/volume/bucket/prefix/
Raft can guarantee the replication of any request if the request is persisted to the RAFT log on the majority of the nodes. To achieve high throughput with Ozone Manager, it returns with the response even if the request is persisted only to the RAFT logs.
-RocksDB instance are updated by a background thread with batching transactions (so called "double buffer" as when one of the buffers is used to commit the data the other one collects all the new requests for the next commit.) To make all data available for the next request even if the background process is not yet wrote them the key data is cached in the memory.
+RocksDB instance are updated by a background thread with batching transactions (so called "double buffer" as when one of the buffers is used to commit the data the other one collects all the new requests for the next commit.) To make all data available for the next request even if the background process has not yet written them, the key data is cached in the memory.

-The details of this approach discussed in a separated [design doc]({{< ref "design/omha.md" >}}) but it's integral part of the OM HA design.
+The details of this approach are discussed in a separate [design doc]({{< ref "design/omha.md" >}}) but it's an integral part of the OM HA design.
## OM Bootstrap
To convert a non-HA OM to be HA or to add new OM nodes to existing HA OM ring, new OM node(s) need to be bootstrapped.
Before bootstrapping a new OM node, all the existing OM's on-disk configuration file (ozone-site.xml) must be updated with the configuration details
-of the new OM such nodeId, address, port etc. Note that the existing OM's need not be restarted. They will reload the configuration from disk when
+of the new OM such as nodeId, address, port etc. Note that the existing OM's need not be restarted. They will reload the configuration from disk when
they receive a bootstrap request from the bootstrapping node.
To bootstrap an OM, the following command needs to be run:
diff --git a/hadoop-hdds/docs/content/feature/Topology.zh.md b/hadoop-hdds/docs/content/feature/Topology.zh.md
new file mode 100644
index 00000000000..a366e3a2473
--- /dev/null
+++ b/hadoop-hdds/docs/content/feature/Topology.zh.md
@@ -0,0 +1,108 @@
+---
+title: "拓扑感知能力"
+weight: 2
+menu:
+ main:
+ parent: 特性
+summary: 机架感知配置可以提高读/写性能
+---
+
+
+Ozone可以使用拓扑相关信息(例如机架位置)来优化读写管道。要获得完全的机架感知集群,Ozone需要三种不同的配置。
+
+ 1. 拓扑信息应由 Ozone 配置。
+ 2. 当Ozone为特定管道/容器选择3个不同的数据节点时,拓扑相关信息就会被使用.(写入)
+ 3. 当Ozone读取一个Key时,它应该优先从最近的节点读取。
+
+
+
+Ozone 对OPEN容器(写)使用 RAFT 复制,对CLOSED的、不可变的容器(冷数据)使用异步复制。由于RAFT需要低延迟的网络,因此只有CLOSED容器才能使用拓扑感知布置。有关OPEN与CLOSED容器的更多信息,请参阅[容器]({{< ref "concept/Containers.zh.md">}}) 页面。
+
+
+
+## 拓扑层次结构
+
+拓扑层次结构可使用 net.topology.node.switch.mapping.impl 配置键进行配置。此配置应定义 org.apache.hadoop.net.CachedDNSToSwitchMapping 的实现。由于这是一个 Hadoop 类,因此该配置与 Hadoop 配置完全相同。
+
+### 静态列表
+
+静态列表可借助 ```TableMapping``` 进行配置::
+
+```XML
+
+ net.topology.node.switch.mapping.impl
+ org.apache.hadoop.net.TableMapping
+
+
+ net.topology.table.file.name
+ /opt/hadoop/compose/ozone-topology/network-config
+
+```
+
+第二个配置选项应指向一个文本文件。文件格式为两列文本文件,各列之间用空格隔开。第一列是 IP 地址,第二列指定地址映射的机架。如果找不到与集群中主机相对应的条目,则会使用 /default-rack。
+
+### 动态列表
+
+机架信息可借助外部脚本识别:
+
+
+```XML
+
+ net.topology.node.switch.mapping.impl
+ org.apache.hadoop.net.ScriptBasedMapping
+
+
+ net.topology.script.file.name
+ /usr/local/bin/rack.sh
+
+```
+
+如果使用外部脚本,则需要在配置文件中使用 net.topology.script.file.name 参数来指定。与 java 类不同,外部拓扑脚本不包含在 Ozone 发行版中,而是由管理员提供。Fork 拓扑脚本时,Ozone 会向 ARGV 发送多个 IP 地址。发送给拓扑脚本的 IP 地址数量由 net.topology.script.number.args 控制,默认为 100。如果将 net.topology.script.number.args 改为 1,则每个提交的 IP 地址都会Fork一个拓扑脚本。
+
+## 写入路径
+
+CLOSED容器放置可以通过 `ozone.scm.container.placement.impl` 配置键进行配置。 可用的容器放置策略可在 `org.apache.hdds.scm.container.placement` 包中找到。[包](https://github.com/apache/ozone/tree/master/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms).
+
+默认情况下, CLOSED容器使用 `SCMContainerPlacementRandom` 放置策略,该策略不支持拓扑感知。为了启用拓扑感知,可配置 `SCMContainerPlacementRackAware` 作为CLOSED容器放置策略:
+
+```XML
+
+ ozone.scm.container.placement.impl
+ org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRackAware
+
+```
+
+这种放置策略符合 HDFS 中使用的算法。在默认的 3 个副本中,两个副本位于同一个机架上,第三个副本位于不同的机架上。
+
+这种实现方式适用于"/机架/节点 "这样的网络拓扑结构。如果网络拓扑结构的层数较多,则不建议使用此方法。
+
+## 读取路径
+
+最后,读取路径也应配置为从最近的 pipeline 读取数据。
+
+```XML
+
+ ozone.network.topology.aware.read
+ true
+
+```
+
+## 参考文献
+
+ * 关于 `net.topology.node.switch.mapping.impl` 的 Hadoop 文档: https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/RackAwareness.html
+ * [设计文档]({{< ref path="design/topology.md" lang="en">}})
\ No newline at end of file
diff --git a/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.zh.md b/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.zh.md
new file mode 100644
index 00000000000..cd3eb5fbdc5
--- /dev/null
+++ b/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.zh.md
@@ -0,0 +1,70 @@
+---
+title: "在DataNode上合并Container的RocksDB"
+weight: 2
+menu:
+ main:
+ parent: 特性
+summary: Ozone DataNode Container模式简介V3
+---
+
+
+在 Ozone 中,用户数据被分割成blocks并存储在 HDDS Container中。Container是 Ozone/HDDS 的基本复制单元。每个Container都有自己的元数据和数据, 数据以文件形式保存在磁盘上,元数据保存在RocksDB中。
+
+目前,数据节点上的每个Container都有一个RocksDB。随着用户数据的不断增长,一个DataNode上将会有成百上千个RocksDB实例。在一个JVM中管理如此多的RocksDB实例是一个巨大的挑战。
+
+与当前使用方法不同,"Merge Container RocksDB in DN"功能将为每个Volume只使用一个RocksDB,并在此RocksDB中保存所有Container的元数据。
+
+## 配置
+
+这主要是DataNode的功能,不需要太多配置。
+
+如果更倾向于为每个Container使用一个RocksDB的模式,那么这下面的配置可以禁用上面所介绍的功能。请注意,一旦启用该功能,强烈建议以后不要再禁用。
+
+```XML
+
+ hdds.datanode.container.schema.v3.enabled
+ false
+ Disable or enable this feature.
+
+```
+
+无需任何特殊配置,单个RocksDB将会被创建在"hdds.datanode.dir"中所配置的数据卷下。
+
+对于一些有高性能要求的高级集群管理员,他/她可以利用快速存储来保存RocksDB。在这种情况下,请配置下面这两个属性。
+
+```XML
+
+ hdds.datanode.container.db.dir
+
+ This setting is optional. Specify where the per-disk rocksdb instances will be stored.
+
+
+ hdds.datanode.failed.db.volumes.tolerated
+ -1
+ The number of db volumes that are allowed to fail before a datanode stops offering service.
+ Default -1 means unlimited, but we should have at least one good volume left.
+
+```
+
+### 向后兼容性
+
+Existing containers each has one RocksDB for them will be still accessible after this feature is enabled. All container data will co-exist in an existing Ozone cluster.
+
+## 参考文献
+
+ * [设计文档]({{< ref path="design/dn-merge-rocksdb.md" lang="en">}})
\ No newline at end of file
diff --git a/hadoop-hdds/erasurecode/pom.xml b/hadoop-hdds/erasurecode/pom.xml
index a632c65254c..14511a160ce 100644
--- a/hadoop-hdds/erasurecode/pom.xml
+++ b/hadoop-hdds/erasurecode/pom.xml
@@ -30,7 +30,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
jar
- false
diff --git a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/TestCodecRegistry.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/TestCodecRegistry.java
index fcdbacbec10..f4e17945194 100644
--- a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/TestCodecRegistry.java
+++ b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/TestCodecRegistry.java
@@ -31,9 +31,10 @@
import java.util.List;
import java.util.Set;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Test CodecRegistry.
@@ -44,10 +45,8 @@ public class TestCodecRegistry {
public void testGetCodecs() {
Set codecs = CodecRegistry.getInstance().getCodecNames();
assertEquals(2, codecs.size());
- assertTrue(
- codecs.contains(ECReplicationConfig.EcCodec.RS.name().toLowerCase()));
- assertTrue(
- codecs.contains(ECReplicationConfig.EcCodec.XOR.name().toLowerCase()));
+ assertThat(codecs).contains(ECReplicationConfig.EcCodec.RS.name().toLowerCase());
+ assertThat(codecs).contains(ECReplicationConfig.EcCodec.XOR.name().toLowerCase());
}
@Test
@@ -55,14 +54,14 @@ public void testGetCoders() {
List coders = CodecRegistry.getInstance().
getCoders(ECReplicationConfig.EcCodec.RS.name().toLowerCase());
assertEquals(2, coders.size());
- assertTrue(coders.get(0) instanceof NativeRSRawErasureCoderFactory);
- assertTrue(coders.get(1) instanceof RSRawErasureCoderFactory);
+ assertInstanceOf(NativeRSRawErasureCoderFactory.class, coders.get(0));
+ assertInstanceOf(RSRawErasureCoderFactory.class, coders.get(1));
coders = CodecRegistry.getInstance().
getCoders(ECReplicationConfig.EcCodec.XOR.name().toLowerCase());
assertEquals(2, coders.size());
- assertTrue(coders.get(0) instanceof NativeXORRawErasureCoderFactory);
- assertTrue(coders.get(1) instanceof XORRawErasureCoderFactory);
+ assertInstanceOf(NativeXORRawErasureCoderFactory.class, coders.get(0));
+ assertInstanceOf(XORRawErasureCoderFactory.class, coders.get(1));
}
@Test
@@ -108,8 +107,8 @@ public String getCodecName() {
List rsCoders = CodecRegistry.getInstance().
getCoders(ECReplicationConfig.EcCodec.RS.name().toLowerCase());
assertEquals(2, rsCoders.size());
- assertTrue(rsCoders.get(0) instanceof NativeRSRawErasureCoderFactory);
- assertTrue(rsCoders.get(1) instanceof RSRawErasureCoderFactory);
+ assertInstanceOf(NativeRSRawErasureCoderFactory.class, rsCoders.get(0));
+ assertInstanceOf(RSRawErasureCoderFactory.class, rsCoders.get(1));
// check RS coder names
String[] rsCoderNames = CodecRegistry.getInstance().
@@ -139,21 +138,21 @@ public void testGetCoderByName() {
RawErasureCoderFactory coder = CodecRegistry.getInstance().
getCoderByName(ECReplicationConfig.EcCodec.RS.name().toLowerCase(),
RSRawErasureCoderFactory.CODER_NAME);
- assertTrue(coder instanceof RSRawErasureCoderFactory);
+ assertInstanceOf(RSRawErasureCoderFactory.class, coder);
coder = CodecRegistry.getInstance()
.getCoderByName(ECReplicationConfig.EcCodec.RS.name().toLowerCase(),
NativeRSRawErasureCoderFactory.CODER_NAME);
- assertTrue(coder instanceof NativeRSRawErasureCoderFactory);
+ assertInstanceOf(NativeRSRawErasureCoderFactory.class, coder);
coder = CodecRegistry.getInstance()
.getCoderByName(ECReplicationConfig.EcCodec.XOR.name().toLowerCase(),
XORRawErasureCoderFactory.CODER_NAME);
- assertTrue(coder instanceof XORRawErasureCoderFactory);
+ assertInstanceOf(XORRawErasureCoderFactory.class, coder);
coder = CodecRegistry.getInstance()
.getCoderByName(ECReplicationConfig.EcCodec.XOR.name().toLowerCase(),
NativeXORRawErasureCoderFactory.CODER_NAME);
- assertTrue(coder instanceof NativeXORRawErasureCoderFactory);
+ assertInstanceOf(NativeXORRawErasureCoderFactory.class, coder);
}
}
diff --git a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRawCoderBase.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRawCoderBase.java
index 1f8cff43385..8bea0bf7b9c 100644
--- a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRawCoderBase.java
+++ b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRawCoderBase.java
@@ -24,9 +24,9 @@
import java.io.IOException;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
/**
@@ -93,13 +93,9 @@ protected void testCoding(boolean usingDirectBuffer) {
protected void testCodingWithBadInput(boolean usingDirectBuffer) {
this.usingDirectBuffer = usingDirectBuffer;
prepareCoders(true);
-
- try {
- performTestCoding(baseChunkSize, false, true, false);
- fail("Encoding test with bad input should fail");
- } catch (Exception e) {
- // Expected
- }
+ assertThrows(Exception.class,
+ () -> performTestCoding(baseChunkSize, false, true, false),
+ "Encoding test with bad input should fail");
}
/**
@@ -109,13 +105,9 @@ protected void testCodingWithBadInput(boolean usingDirectBuffer) {
protected void testCodingWithBadOutput(boolean usingDirectBuffer) {
this.usingDirectBuffer = usingDirectBuffer;
prepareCoders(true);
-
- try {
- performTestCoding(baseChunkSize, false, false, true);
- fail("Decoding test with bad output should fail");
- } catch (Exception e) {
- // Expected
- }
+ assertThrows(Exception.class,
+ () -> performTestCoding(baseChunkSize, false, false, true),
+ "Decoding test with bad output should fail");
}
/**
@@ -132,30 +124,19 @@ void testAfterRelease() throws Exception {
final ECChunk[] parity = prepareParityChunksForEncoding();
IOException ioException = assertThrows(IOException.class,
() -> encoder.encode(data, parity));
- assertTrue(ioException.getMessage().contains("closed"));
+ assertThat(ioException.getMessage()).contains("closed");
decoder.release();
final ECChunk[] in = prepareInputChunksForDecoding(data, parity);
final ECChunk[] out = prepareOutputChunksForDecoding();
ioException = assertThrows(IOException.class,
() -> decoder.decode(in, getErasedIndexesForDecoding(), out));
- assertTrue(ioException.getMessage().contains("closed"));
+ assertThat(ioException.getMessage()).contains("closed");
}
@Test
public void testCodingWithErasingTooMany() {
- try {
- testCoding(true);
- fail("Decoding test erasing too many should fail");
- } catch (Exception e) {
- // Expected
- }
-
- try {
- testCoding(false);
- fail("Decoding test erasing too many should fail");
- } catch (Exception e) {
- // Expected
- }
+ assertThrows(Exception.class, () -> testCoding(true), "Decoding test erasing too many should fail");
+ assertThrows(Exception.class, () -> testCoding(false), "Decoding test erasing too many should fail");
}
@Test
diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml
index 8ad0d11d021..5ead355066d 100644
--- a/hadoop-hdds/framework/pom.xml
+++ b/hadoop-hdds/framework/pom.xml
@@ -30,7 +30,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
jar
- false
@@ -113,7 +112,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
org.bouncycastle
- bcprov-jdk15on
+ bcprov-jdk18on
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
index 330cfae30b2..84a0fa4886c 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicatedReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.GetScmInfoResponseProto;
@@ -55,6 +56,9 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerReplicasRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainersOnDecomNodeRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainersOnDecomNodeProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainersOnDecomNodeResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerTokenRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerTokenResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineBatchRequestProto;
@@ -89,6 +93,8 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMCloseContainerResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartMaintenanceNodesRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartMaintenanceNodesResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SingleNodeQueryRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SingleNodeQueryResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartReplicationManagerRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopReplicationManagerRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerRequestProto;
@@ -114,6 +120,7 @@
import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer;
import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages;
import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.ProtobufUtils;
import java.io.Closeable;
import java.io.IOException;
@@ -123,6 +130,7 @@
import java.util.Map;
import java.util.Optional;
import java.util.function.Consumer;
+import java.util.UUID;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.EC;
import static org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMCloseContainerResponseProto.Status.CONTAINER_ALREADY_CLOSED;
@@ -455,6 +463,23 @@ public void deleteContainer(long containerID)
}
+ @Override
+ public Map> getContainersOnDecomNode(DatanodeDetails dn) throws IOException {
+ GetContainersOnDecomNodeRequestProto request = GetContainersOnDecomNodeRequestProto.newBuilder()
+ .setDatanodeDetails(dn.getProtoBufMessage()).build();
+ GetContainersOnDecomNodeResponseProto response = submitRequest(Type.GetContainersOnDecomNode,
+ builder -> builder.setGetContainersOnDecomNodeRequest(request)).getGetContainersOnDecomNodeResponse();
+ Map> containerMap = new HashMap<>();
+ for (ContainersOnDecomNodeProto containersProto : response.getContainersOnDecomNodeList()) {
+ List containerIds = new ArrayList<>();
+ for (HddsProtos.ContainerID id : containersProto.getIdList()) {
+ containerIds.add(ContainerID.getFromProtobuf(id));
+ }
+ containerMap.put(containersProto.getName(), containerIds);
+ }
+ return containerMap;
+ }
+
/**
* Queries a list of Nodes based on their operational state or health state.
* Passing a null for either value acts as a wildcard for that state.
@@ -486,6 +511,18 @@ public List queryNode(
return response.getDatanodesList();
}
+ @Override
+ public HddsProtos.Node queryNode(UUID uuid) throws IOException {
+ SingleNodeQueryRequestProto request = SingleNodeQueryRequestProto.newBuilder()
+ .setUuid(ProtobufUtils.toProtobuf(uuid))
+ .build();
+ SingleNodeQueryResponseProto response =
+ submitRequest(Type.SingleNodeQuery,
+ builder -> builder.setSingleNodeQueryRequest(request))
+ .getSingleNodeQueryResponse();
+ return response.getDatanode();
+ }
+
/**
* Attempts to decommission the list of nodes.
* @param nodes The list of hostnames or hostname:ports to decommission
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyVerifierClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyVerifierClient.java
index 08ed39d7f4b..5bc9cd9d06c 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyVerifierClient.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyVerifierClient.java
@@ -19,7 +19,7 @@
import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
-import javax.annotation.Nullable;
+import jakarta.annotation.Nullable;
import java.util.UUID;
/**
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java
index 3a1df1bd865..b78604643e5 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java
@@ -25,7 +25,7 @@
import org.apache.hadoop.hdds.utils.db.DelegatedCodec;
import org.apache.hadoop.hdds.utils.db.Proto2Codec;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import java.io.IOException;
import java.io.Serializable;
import java.security.cert.X509Certificate;
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/profile/DefaultProfile.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/profile/DefaultProfile.java
index d565eedae43..765cf96e2f9 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/profile/DefaultProfile.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/profile/DefaultProfile.java
@@ -46,7 +46,6 @@
import java.util.stream.Collectors;
import java.util.stream.Stream;
-import static org.apache.hadoop.hdds.function.Predicates.yesBi;
import static org.bouncycastle.asn1.x509.KeyPurposeId.id_kp_clientAuth;
import static org.bouncycastle.asn1.x509.KeyPurposeId.id_kp_serverAuth;
@@ -61,9 +60,9 @@ public class DefaultProfile implements PKIProfile {
private static final BiPredicate
VALIDATE_KEY_USAGE = DefaultProfile::validateKeyUsage;
private static final BiPredicate
- VALIDATE_AUTHORITY_KEY_IDENTIFIER = yesBi();
- private static final BiPredicate VALIDATE_LOGO_TYPE =
- yesBi();
+ VALIDATE_AUTHORITY_KEY_IDENTIFIER = (t, u) -> true;
+ private static final BiPredicate VALIDATE_LOGO_TYPE
+ = (t, u) -> true;
private static final Logger LOG =
LoggerFactory.getLogger(DefaultProfile.class);
private static final BiPredicate
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
index 403295aebf2..d3db81c71b6 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
@@ -245,8 +245,11 @@ private synchronized void readCertificateFile(Path filePath) {
updateCachedData(fileName, CAType.SUBORDINATE, this::updateCachedSubCAId);
updateCachedData(fileName, CAType.ROOT, this::updateCachedRootCAId);
- getLogger().info("Added certificate {} from file: {}.", cert,
+ getLogger().info("Added certificate {} from file: {}.", readCertSerialId,
filePath.toAbsolutePath());
+ if (getLogger().isDebugEnabled()) {
+ getLogger().debug("Certificate: {}", cert);
+ }
} catch (java.security.cert.CertificateException
| IOException | IndexOutOfBoundsException e) {
getLogger().error("Error reading certificate from file: {}.",
@@ -487,7 +490,6 @@ private X509Certificate getCertificateFromScm(String certId)
* @param data - Data to sign.
* @throws CertificateException - on Error.
*/
- @Override
public byte[] signData(byte[] data) throws CertificateException {
try {
Signature sign = Signature.getInstance(securityConfig.getSignatureAlgo(),
@@ -582,7 +584,6 @@ public CertificateSignRequest.Builder getCSRBuilder()
* @param caType - Is CA certificate.
* @throws CertificateException - on Error.
*/
- @Override
public void storeCertificate(String pemEncodedCert,
CAType caType) throws CertificateException {
CertificateCodec certificateCodec = new CertificateCodec(securityConfig,
@@ -992,7 +993,6 @@ public List getCAList() {
}
}
- @Override
public List listCA() throws IOException {
pemEncodedCACertsLock.lock();
try {
@@ -1024,8 +1024,7 @@ public List updateCAList() throws IOException {
public synchronized KeyStoresFactory getServerKeyStoresFactory()
throws CertificateException {
if (serverKeyStoresFactory == null) {
- serverKeyStoresFactory = SecurityUtil.getServerKeyStoresFactory(
- securityConfig, this, true);
+ serverKeyStoresFactory = SecurityUtil.getServerKeyStoresFactory(this, true);
}
return serverKeyStoresFactory;
}
@@ -1034,8 +1033,7 @@ public synchronized KeyStoresFactory getServerKeyStoresFactory()
public KeyStoresFactory getClientKeyStoresFactory()
throws CertificateException {
if (clientKeyStoresFactory == null) {
- clientKeyStoresFactory = SecurityUtil.getClientKeyStoresFactory(
- securityConfig, this, true);
+ clientKeyStoresFactory = SecurityUtil.getClientKeyStoresFactory(this, true);
}
return clientKeyStoresFactory;
}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java
index 02a9d12ebda..134c841e697 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java
@@ -24,7 +24,7 @@
import org.apache.hadoop.hdds.utils.db.Codec;
import org.apache.hadoop.hdds.utils.db.DelegatedCodec;
import org.apache.hadoop.hdds.utils.db.Proto2Codec;
-import org.jetbrains.annotations.NotNull;
+import jakarta.annotation.Nonnull;
import java.security.cert.CRLException;
import java.security.cert.X509CRL;
@@ -139,7 +139,7 @@ public Instant getRevocationTime() {
* from being compared to this object.
*/
@Override
- public int compareTo(@NotNull CRLInfo o) {
+ public int compareTo(@Nonnull CRLInfo o) {
return this.compare(this, o);
}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java
index 5f34e8dfe03..96fb2a7fd91 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java
@@ -95,10 +95,10 @@ public static PublicKey getPublicKey(byte[] encodedKey,
}
public static KeyStoresFactory getServerKeyStoresFactory(
- SecurityConfig securityConfig, CertificateClient client,
+ CertificateClient client,
boolean requireClientAuth) throws CertificateException {
PemFileBasedKeyStoresFactory factory =
- new PemFileBasedKeyStoresFactory(securityConfig, client);
+ new PemFileBasedKeyStoresFactory(client);
try {
factory.init(KeyStoresFactory.Mode.SERVER, requireClientAuth);
} catch (IOException | GeneralSecurityException e) {
@@ -109,10 +109,10 @@ public static KeyStoresFactory getServerKeyStoresFactory(
}
public static KeyStoresFactory getClientKeyStoresFactory(
- SecurityConfig securityConfig, CertificateClient client,
+ CertificateClient client,
boolean requireClientAuth) throws CertificateException {
PemFileBasedKeyStoresFactory factory =
- new PemFileBasedKeyStoresFactory(securityConfig, client);
+ new PemFileBasedKeyStoresFactory(client);
try {
factory.init(KeyStoresFactory.Mode.CLIENT, requireClientAuth);
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteStringCodec.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteStringCodec.java
index 9928d90570f..997bdf6cf2e 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteStringCodec.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/ByteStringCodec.java
@@ -20,7 +20,7 @@
import com.google.protobuf.ByteString;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
/**
* Codec to serialize/deserialize a {@link ByteString}.
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
index fe495e7b061..32fcbfec6e4 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
@@ -83,6 +83,9 @@ public final class DBStoreBuilder {
// The column family options that will be used for any column families
// added by name only (without specifying options).
private ManagedColumnFamilyOptions defaultCfOptions;
+ // Initialize the Statistics instance if ROCKSDB_STATISTICS enabled
+ private ManagedStatistics statistics;
+
private String dbname;
private Path dbPath;
private String dbJmxBeanNameName;
@@ -188,6 +191,11 @@ private void setDBOptionsProps(ManagedDBOptions dbOptions) {
if (maxNumberOfOpenFiles != null) {
dbOptions.setMaxOpenFiles(maxNumberOfOpenFiles);
}
+ if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) {
+ statistics = new ManagedStatistics();
+ statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat));
+ dbOptions.setStatistics(statistics);
+ }
}
/**
@@ -217,7 +225,7 @@ public DBStore build() throws IOException {
throw new IOException("The DB destination directory should exist.");
}
- return new RDBStore(dbFile, rocksDBOption, writeOptions, tableConfigs,
+ return new RDBStore(dbFile, rocksDBOption, statistics, writeOptions, tableConfigs,
registry.build(), openReadOnly, maxFSSnapshots, dbJmxBeanNameName,
enableCompactionDag, maxDbUpdatesSizeThreshold, createCheckpointDirs,
configuration, threadNamePrefix);
@@ -413,13 +421,6 @@ protected void log(InfoLogLevel infoLogLevel, String s) {
dbOptions.setWalTtlSeconds(rocksDBConfiguration.getWalTTL());
dbOptions.setWalSizeLimitMB(rocksDBConfiguration.getWalSizeLimit());
- // Create statistics.
- if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) {
- ManagedStatistics statistics = new ManagedStatistics();
- statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat));
- dbOptions.setStatistics(statistics);
- }
-
return dbOptions;
}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
index 71cd3716e56..6760eb47f48 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
@@ -26,7 +26,6 @@
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Collection;
-import java.util.HashMap;
import java.util.Map;
import java.util.Set;
@@ -37,6 +36,7 @@
import org.apache.hadoop.hdds.utils.db.RocksDatabase.ColumnFamily;
import org.apache.hadoop.hdds.utils.db.managed.ManagedCompactRangeOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedStatistics;
import org.apache.hadoop.hdds.utils.db.managed.ManagedTransactionLogIterator;
import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteOptions;
import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer;
@@ -78,10 +78,11 @@ public class RDBStore implements DBStore {
// number in request to avoid increase in heap memory.
private final long maxDbUpdatesSizeThreshold;
private final ManagedDBOptions dbOptions;
+ private final ManagedStatistics statistics;
private final String threadNamePrefix;
@SuppressWarnings("parameternumber")
- public RDBStore(File dbFile, ManagedDBOptions dbOptions,
+ public RDBStore(File dbFile, ManagedDBOptions dbOptions, ManagedStatistics statistics,
ManagedWriteOptions writeOptions, Set families,
CodecRegistry registry, boolean readOnly, int maxFSSnapshots,
String dbJmxBeanName, boolean enableCompactionDag,
@@ -98,6 +99,7 @@ public RDBStore(File dbFile, ManagedDBOptions dbOptions,
codecRegistry = registry;
dbLocation = dbFile;
this.dbOptions = dbOptions;
+ this.statistics = statistics;
try {
if (enableCompactionDag) {
@@ -120,8 +122,8 @@ public RDBStore(File dbFile, ManagedDBOptions dbOptions,
if (dbJmxBeanName == null) {
dbJmxBeanName = dbFile.getName();
}
- metrics = RocksDBStoreMetrics.create(dbOptions.statistics(), db,
- dbJmxBeanName);
+ // Use statistics instead of dbOptions.statistics() to avoid repeated init.
+ metrics = RocksDBStoreMetrics.create(statistics, db, dbJmxBeanName);
if (metrics == null) {
LOG.warn("Metrics registration failed during RocksDB init, " +
"db path :{}", dbJmxBeanName);
@@ -198,6 +200,7 @@ public String getSnapshotsParentDir() {
return snapshotsParentDir;
}
+ @Override
public RocksDBCheckpointDiffer getRocksDBCheckpointDiffer() {
return rocksDBCheckpointDiffer;
}
@@ -231,6 +234,9 @@ public void close() throws IOException {
RocksDBCheckpointDifferHolder
.invalidateCacheEntry(rocksDBCheckpointDiffer.getMetadataDir());
}
+ if (statistics != null) {
+ IOUtils.close(LOG, statistics);
+ }
IOUtils.close(LOG, db);
}
@@ -344,13 +350,7 @@ public File getDbLocation() {
@Override
public Map getTableNames() {
- Map tableNames = new HashMap<>();
- StringCodec stringCodec = StringCodec.get();
-
- for (ColumnFamily columnFamily : getColumnFamilies()) {
- tableNames.put(columnFamily.getID(), columnFamily.getName(stringCodec));
- }
- return tableNames;
+ return db.getColumnFamilyNames();
}
public Collection getColumnFamilies() {
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
index 4dd1042fde2..19f60d914f3 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java
@@ -36,6 +36,7 @@
import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteOptions;
import org.apache.ozone.rocksdiff.RocksDiffUtils;
import org.apache.ratis.util.UncheckedAutoCloseable;
+import org.apache.ratis.util.MemoizedSupplier;
import org.rocksdb.ColumnFamilyDescriptor;
import org.rocksdb.ColumnFamilyHandle;
import org.rocksdb.Holder;
@@ -51,7 +52,6 @@
import java.nio.ByteBuffer;
import java.nio.file.Path;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
@@ -66,7 +66,6 @@
import java.util.stream.Stream;
import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.hadoop.hdds.StringUtils.bytes2String;
import static org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions.closeDeeply;
import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator.managed;
import static org.apache.hadoop.hdds.utils.db.managed.ManagedTransactionLogIterator.managed;
@@ -86,10 +85,14 @@ public final class RocksDatabase implements Closeable {
}
private static final ManagedReadOptions DEFAULT_READ_OPTION =
new ManagedReadOptions();
- private static Map> dbNameToCfHandleMap =
- new HashMap<>();
- private final StackTraceElement[] stackTrace;
+ static String bytes2String(byte[] bytes) {
+ return StringCodec.get().fromPersistedFormat(bytes);
+ }
+
+ static String bytes2String(ByteBuffer bytes) {
+ return StringCodec.get().decode(bytes);
+ }
static IOException toIOException(Object name, String op, RocksDBException e) {
return HddsServerUtil.toIOException(name + ": Failed to " + op, e);
@@ -158,15 +161,7 @@ static RocksDatabase open(File dbFile, ManagedDBOptions dbOptions,
db = ManagedRocksDB.open(dbOptions, dbFile.getAbsolutePath(),
descriptors, handles);
}
- dbNameToCfHandleMap.put(db.get().getName(), handles);
- // init a column family map.
- AtomicLong counter = new AtomicLong(0);
- for (ColumnFamilyHandle h : handles) {
- final ColumnFamily f = new ColumnFamily(h, counter);
- columnFamilies.put(f.getName(), f);
- }
- return new RocksDatabase(dbFile, db, dbOptions, writeOptions,
- descriptors, Collections.unmodifiableMap(columnFamilies), counter);
+ return new RocksDatabase(dbFile, db, dbOptions, writeOptions, descriptors, handles);
} catch (RocksDBException e) {
close(columnFamilies, db, descriptors, writeOptions, dbOptions);
throw toIOException(RocksDatabase.class, "open " + dbFile, e);
@@ -260,17 +255,13 @@ public void close() throws IOException {
*
* @see ColumnFamilyHandle
*/
- public static final class ColumnFamily {
+ public final class ColumnFamily {
private final byte[] nameBytes;
- private AtomicLong counter;
private final String name;
private final ColumnFamilyHandle handle;
- private AtomicBoolean isClosed = new AtomicBoolean(false);
- public ColumnFamily(ColumnFamilyHandle handle, AtomicLong counter)
- throws RocksDBException {
+ private ColumnFamily(ColumnFamilyHandle handle) throws RocksDBException {
this.nameBytes = handle.getName();
- this.counter = counter;
this.name = bytes2String(nameBytes);
this.handle = handle;
LOG.debug("new ColumnFamily for {}", name);
@@ -289,10 +280,6 @@ public ColumnFamilyHandle getHandle() {
return handle;
}
- public int getID() {
- return getHandle().getID();
- }
-
public void batchDelete(ManagedWriteBatch writeBatch, byte[] key)
throws IOException {
try (UncheckedAutoCloseable ignored = acquire()) {
@@ -331,10 +318,6 @@ public void batchPut(ManagedWriteBatch writeBatch, ByteBuffer key,
}
}
- public void markClosed() {
- isClosed.set(true);
- }
-
private UncheckedAutoCloseable acquire() throws IOException {
if (isClosed.get()) {
throw new IOException("Rocks Database is closed");
@@ -353,27 +336,49 @@ public String toString() {
}
private final String name;
+ private final Throwable creationStackTrace = new Throwable("Object creation stack trace");
+
private final ManagedRocksDB db;
private final ManagedDBOptions dbOptions;
private final ManagedWriteOptions writeOptions;
private final List descriptors;
+ /** column family names -> {@link ColumnFamily}. */
private final Map columnFamilies;
+ /** {@link ColumnFamilyHandle#getID()} -> column family names. */
+ private final Supplier
- false
8
8
+ https://sourceware.org/pub/bzip2/bzip2-${bzip2.version}.tar.gz
+ https://zlib.net/fossils/zlib-${zlib.version}.tar.gz
@@ -134,7 +135,7 @@
wget
- https://zlib.net/fossils/zlib-${zlib.version}.tar.gz
+ ${zlib.url}
zlib-${zlib.version}.tar.gz
${project.build.directory}/zlib
@@ -146,7 +147,7 @@
wget
- https://sourceware.org/pub/bzip2/bzip2-${bzip2.version}.tar.gz
+ ${bzip2.url}
bzip2-v${bzip2.version}.tar.gz
${project.build.directory}/bzip2
@@ -220,6 +221,7 @@
+
diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java
index ed5873770e3..8fc4e83e7a1 100644
--- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java
+++ b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java
@@ -33,12 +33,12 @@
import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME;
import static org.apache.hadoop.hdds.utils.NativeLibraryLoader.NATIVE_LIB_TMP_DIR;
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.ArgumentMatchers.same;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.CALLS_REAL_METHODS;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.same;
import static org.mockito.Mockito.mockStatic;
/**
diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java
index d8fefeb9b75..d2796c19fc5 100644
--- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java
+++ b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java
@@ -54,9 +54,9 @@
import java.util.stream.Stream;
import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assumptions.assumeTrue;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
@@ -234,7 +234,7 @@ protected KeyValue getTransformedValue(
ManagedSSTDumpIterator.KeyValue r = iterator.next();
String key = new String(r.getKey(), StandardCharsets.UTF_8);
Pair recordKey = Pair.of(key, r.getType());
- assertTrue(expectedKeys.containsKey(recordKey));
+ assertThat(expectedKeys).containsKey(recordKey);
assertEquals(Optional.ofNullable(expectedKeys
.get(recordKey)).orElse(""),
new String(r.getValue(), StandardCharsets.UTF_8));
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml
index 3d5967e9c0c..829c0d6ac36 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml
@@ -30,7 +30,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
jar
- false
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
index e830106e570..97d015fb239 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
@@ -172,7 +172,7 @@ public class RocksDBCheckpointDiffer implements AutoCloseable,
private ColumnFamilyHandle snapshotInfoTableCFHandle;
private final AtomicInteger tarballRequestCount;
- private final String dagPruningServiceName = "CompactionDagPruningService";
+ private static final String DAG_PRUNING_SERVICE_NAME = "CompactionDagPruningService";
private AtomicBoolean suspended;
private ColumnFamilyHandle compactionLogTableCFHandle;
@@ -230,7 +230,7 @@ public class RocksDBCheckpointDiffer implements AutoCloseable,
TimeUnit.MILLISECONDS);
if (pruneCompactionDagDaemonRunIntervalInMs > 0) {
- this.scheduler = new Scheduler(dagPruningServiceName,
+ this.scheduler = new Scheduler(DAG_PRUNING_SERVICE_NAME,
true, 1);
this.scheduler.scheduleWithFixedDelay(
@@ -307,7 +307,7 @@ public void close() throws Exception {
if (!closed) {
closed = true;
if (scheduler != null) {
- LOG.info("Shutting down {}.", dagPruningServiceName);
+ LOG.info("Shutting down {}.", DAG_PRUNING_SERVICE_NAME);
scheduler.close();
}
}
@@ -1421,16 +1421,21 @@ public String getCompactionLogDir() {
* those are not needed to generate snapshot diff. These files are basically
* non-leaf nodes of the DAG.
*/
- public synchronized void pruneSstFiles() {
+ public void pruneSstFiles() {
if (!shouldRun()) {
return;
}
Set nonLeafSstFiles;
- nonLeafSstFiles = forwardCompactionDAG.nodes().stream()
- .filter(node -> !forwardCompactionDAG.successors(node).isEmpty())
- .map(node -> node.getFileName())
- .collect(Collectors.toSet());
+ // This is synchronized because compaction thread can update the compactionDAG and can be in situation
+ // when nodes are added to the graph, but arcs are still in progress.
+ // Hence, the lock is taken.
+ synchronized (this) {
+ nonLeafSstFiles = forwardCompactionDAG.nodes().stream()
+ .filter(node -> !forwardCompactionDAG.successors(node).isEmpty())
+ .map(node -> node.getFileName())
+ .collect(Collectors.toSet());
+ }
if (CollectionUtils.isNotEmpty(nonLeafSstFiles)) {
LOG.info("Removing SST files: {} as part of SST file pruning.",
@@ -1448,8 +1453,13 @@ public void incrementTarballRequestCount() {
tarballRequestCount.incrementAndGet();
}
- public void decrementTarballRequestCount() {
- tarballRequestCount.decrementAndGet();
+ public void decrementTarballRequestCountAndNotify() {
+ // Synchronized block is used to ensure that lock is on the same instance notifyAll is being called.
+ synchronized (this) {
+ tarballRequestCount.decrementAndGet();
+ // Notify compaction threads to continue.
+ notifyAll();
+ }
}
public boolean shouldRun() {
@@ -1517,8 +1527,7 @@ public static RocksDBCheckpointDiffer getInstance(
* for cache.
*/
public static void invalidateCacheEntry(String cacheKey) {
- IOUtils.closeQuietly(INSTANCE_MAP.get(cacheKey));
- INSTANCE_MAP.remove(cacheKey);
+ IOUtils.close(LOG, INSTANCE_MAP.remove(cacheKey));
}
}
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
index f70b85daebb..b01e4cc2e30 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
@@ -96,6 +96,7 @@
import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.DEBUG_DAG_LIVE_NODES;
import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.DEBUG_READ_ALL_DB_KEYS;
import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.SST_FILE_EXTENSION;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -125,10 +126,10 @@ public class TestRocksDBCheckpointDiffer {
private final List> colHandles = new ArrayList<>();
- private final String activeDbDirName = "./rocksdb-data";
- private final String metadataDirName = "./metadata";
- private final String compactionLogDirName = "compaction-log";
- private final String sstBackUpDirName = "compaction-sst-backup";
+ private static final String ACTIVE_DB_DIR_NAME = "./rocksdb-data";
+ private static final String METADATA_DIR_NAME = "./metadata";
+ private static final String COMPACTION_LOG_DIR_NAME = "compaction-log";
+ private static final String SST_BACK_UP_DIR_NAME = "compaction-sst-backup";
private File activeDbDir;
private File metadataDirDir;
private File compactionLogDir;
@@ -149,17 +150,17 @@ public void init() throws RocksDBException {
// Test class log level. Set to DEBUG for verbose output
GenericTestUtils.setLogLevel(TestRocksDBCheckpointDiffer.LOG, Level.INFO);
- activeDbDir = new File(activeDbDirName);
- createDir(activeDbDir, activeDbDirName);
+ activeDbDir = new File(ACTIVE_DB_DIR_NAME);
+ createDir(activeDbDir, ACTIVE_DB_DIR_NAME);
- metadataDirDir = new File(metadataDirName);
- createDir(metadataDirDir, metadataDirName);
+ metadataDirDir = new File(METADATA_DIR_NAME);
+ createDir(metadataDirDir, METADATA_DIR_NAME);
- compactionLogDir = new File(metadataDirName, compactionLogDirName);
- createDir(compactionLogDir, metadataDirName + "/" + compactionLogDirName);
+ compactionLogDir = new File(METADATA_DIR_NAME, COMPACTION_LOG_DIR_NAME);
+ createDir(compactionLogDir, METADATA_DIR_NAME + "/" + COMPACTION_LOG_DIR_NAME);
- sstBackUpDir = new File(metadataDirName, sstBackUpDirName);
- createDir(sstBackUpDir, metadataDirName + "/" + sstBackUpDirName);
+ sstBackUpDir = new File(METADATA_DIR_NAME, SST_BACK_UP_DIR_NAME);
+ createDir(sstBackUpDir, METADATA_DIR_NAME + "/" + SST_BACK_UP_DIR_NAME);
config = mock(ConfigurationSource.class);
@@ -173,10 +174,10 @@ public void init() throws RocksDBException {
OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_DAG_DAEMON_RUN_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS)).thenReturn(0L);
- rocksDBCheckpointDiffer = new RocksDBCheckpointDiffer(metadataDirName,
- sstBackUpDirName,
- compactionLogDirName,
- activeDbDirName,
+ rocksDBCheckpointDiffer = new RocksDBCheckpointDiffer(METADATA_DIR_NAME,
+ SST_BACK_UP_DIR_NAME,
+ COMPACTION_LOG_DIR_NAME,
+ ACTIVE_DB_DIR_NAME,
config);
ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()
@@ -188,7 +189,7 @@ public void init() throws RocksDBException {
.setCreateMissingColumnFamilies(true);
rocksDBCheckpointDiffer.setRocksDBForCompactionTracking(dbOptions);
- activeRocksDB = RocksDB.open(dbOptions, activeDbDirName, cfDescriptors,
+ activeRocksDB = RocksDB.open(dbOptions, ACTIVE_DB_DIR_NAME, cfDescriptors,
cfHandles);
keyTableCFHandle = cfHandles.get(1);
directoryTableCFHandle = cfHandles.get(2);
@@ -517,7 +518,7 @@ public void testGetSSTDiffListWithoutDB(String description,
@Test
void testDifferWithDB() throws Exception {
writeKeysAndCheckpointing();
- readRocksDBInstance(activeDbDirName, activeRocksDB, null,
+ readRocksDBInstance(ACTIVE_DB_DIR_NAME, activeRocksDB, null,
rocksDBCheckpointDiffer);
if (LOG.isDebugEnabled()) {
@@ -613,7 +614,7 @@ private void createCheckpoint(RocksDB rocksDB) throws RocksDBException {
}
cpDirList.add(dir);
- createCheckPoint(activeDbDirName, cpPath, rocksDB);
+ createCheckPoint(ACTIVE_DB_DIR_NAME, cpPath, rocksDB);
final UUID snapshotId = UUID.randomUUID();
List colHandle = new ArrayList<>();
colHandles.add(colHandle);
@@ -1272,7 +1273,7 @@ public void testPruneOlderSnapshotsWithCompactionHistory(
if (compactionLogs != null) {
for (int i = 0; i < compactionLogs.size(); i++) {
- String compactionFileName = metadataDirName + "/" + compactionLogDirName
+ String compactionFileName = METADATA_DIR_NAME + "/" + COMPACTION_LOG_DIR_NAME
+ "/0000" + i + COMPACTION_LOG_FILE_NAME_SUFFIX;
File compactionFile = new File(compactionFileName);
Files.write(compactionFile.toPath(),
@@ -1349,10 +1350,10 @@ private void waitForLock(RocksDBCheckpointDiffer differ,
});
// Confirm that the consumer doesn't finish with lock taken.
assertThrows(TimeoutException.class,
- () -> future.get(5000, TimeUnit.MILLISECONDS));
+ () -> future.get(1000, TimeUnit.MILLISECONDS));
}
// Confirm consumer finishes when unlocked.
- assertTrue(future.get(1000, TimeUnit.MILLISECONDS));
+ assertTrue(future.get(100, TimeUnit.MILLISECONDS));
}
private static Stream sstFilePruningScenarios() {
@@ -1490,8 +1491,8 @@ public void testSstFilePruning(
Path compactionLogFilePath = null;
if (compactionLog != null) {
- String compactionLogFileName = metadataDirName + "/" +
- compactionLogDirName + "/compaction_log" +
+ String compactionLogFileName = METADATA_DIR_NAME + "/" +
+ COMPACTION_LOG_DIR_NAME + "/compaction_log" +
COMPACTION_LOG_FILE_NAME_SUFFIX;
compactionLogFilePath = new File(compactionLogFileName).toPath();
createFileWithContext(compactionLogFileName, compactionLog);
@@ -1511,7 +1512,7 @@ public void testSstFilePruning(
Set actualFileSetAfterPruning;
try (Stream pathStream = Files.list(
- Paths.get(metadataDirName + "/" + sstBackUpDirName))
+ Paths.get(METADATA_DIR_NAME + "/" + SST_BACK_UP_DIR_NAME))
.filter(e -> e.toString().toLowerCase()
.endsWith(SST_FILE_EXTENSION))
.sorted()) {
@@ -1867,7 +1868,7 @@ public void testDagOnlyContainsDesiredCfh()
createKeys(compactionLogTableCFHandle, "logName-", "logValue-", 100);
// Make sures that some compaction happened.
- assertFalse(rocksDBCheckpointDiffer.getCompactionNodeMap().isEmpty());
+ assertThat(rocksDBCheckpointDiffer.getCompactionNodeMap()).isNotEmpty();
List compactionNodes = rocksDBCheckpointDiffer.
getCompactionNodeMap().values().stream()
@@ -1877,7 +1878,7 @@ public void testDagOnlyContainsDesiredCfh()
// CompactionNodeMap should not contain any node other than 'keyTable',
// 'directoryTable' and 'fileTable' column families nodes.
- assertTrue(compactionNodes.isEmpty());
+ assertThat(compactionNodes).isEmpty();
// Assert that only 'keyTable', 'directoryTable' and 'fileTable'
// column families SST files are backed-up.
@@ -1889,7 +1890,7 @@ public void testDagOnlyContainsDesiredCfh()
fileReader.open(path.toAbsolutePath().toString());
String columnFamily = StringUtils.bytes2String(
fileReader.getTableProperties().getColumnFamilyName());
- assertTrue(COLUMN_FAMILIES_TO_TRACK_IN_DAG.contains(columnFamily));
+ assertThat(COLUMN_FAMILIES_TO_TRACK_IN_DAG).contains(columnFamily);
} catch (RocksDBException rocksDBException) {
fail("Failed to read file: " + path.toAbsolutePath());
}
diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml
index b42262fbabd..bb2bdec1405 100644
--- a/hadoop-hdds/server-scm/pom.xml
+++ b/hadoop-hdds/server-scm/pom.xml
@@ -29,7 +29,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
jar
- false
@@ -68,7 +67,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
org.bouncycastle
- bcprov-jdk15on
+ bcprov-jdk18on
io.dropwizard.metrics
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java
similarity index 86%
rename from hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java
rename to hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java
index 76439a78464..e1d0fdd35aa 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java
@@ -17,6 +17,7 @@
package org.apache.hadoop.hdds.scm;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import java.util.List;
@@ -26,6 +27,15 @@
*/
public interface PipelineChoosePolicy {
+ /**
+ * Updates the policy with NodeManager.
+ * @return updated policy.
+ */
+ default PipelineChoosePolicy init(final NodeManager nodeManager) {
+ // override if the policy requires nodeManager
+ return this;
+ }
+
/**
* Given an initial list of pipelines, return one of the pipelines.
*
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java
index 27a97a0349d..cc6147c7a64 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java
@@ -35,7 +35,7 @@
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.ha.ConfUtils;
import org.apache.hadoop.util.StringUtils;
-import org.jetbrains.annotations.NotNull;
+import jakarta.annotation.Nonnull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -204,7 +204,7 @@ public static boolean shouldRemovePeers(final ConfigurationSource conf) {
ScmConfigKeys.OZONE_SCM_DATANODE_DISALLOW_SAME_PEERS_DEFAULT));
}
- @NotNull
+ @Nonnull
public static List> initContainerReportQueue(
OzoneConfiguration configuration) {
int threadPoolSize = configuration.getInt(getContainerReportConfPrefix()
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index 1260ea6a006..5f42fb00e45 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -97,12 +97,9 @@ public BlockManagerImpl(final ConfigurationSource conf,
// SCM block deleting transaction log and deleting service.
deletedBlockLog = new DeletedBlockLogImpl(conf,
+ scm,
scm.getContainerManager(),
- scm.getScmHAManager().getRatisServer(),
- scm.getScmMetadataStore().getDeletedBlocksTXTable(),
scm.getScmHAManager().getDBTransactionBuffer(),
- scm.getScmContext(),
- scm.getSequenceIdGen(),
metrics);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
index ac64f6e973e..9d5377b9e3e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
@@ -46,9 +46,9 @@
import org.apache.hadoop.hdds.scm.container.replication.ContainerHealthResult;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
import org.apache.hadoop.hdds.scm.ha.SCMContext;
-import org.apache.hadoop.hdds.scm.ha.SCMRatisServer;
import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator;
import org.apache.hadoop.hdds.scm.metadata.DBTransactionBuffer;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.hdds.server.events.EventHandler;
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.hdds.utils.db.Table;
@@ -95,14 +95,10 @@ public class DeletedBlockLogImpl
private static final int LIST_ALL_FAILED_TRANSACTIONS = -1;
- @SuppressWarnings("parameternumber")
public DeletedBlockLogImpl(ConfigurationSource conf,
+ StorageContainerManager scm,
ContainerManager containerManager,
- SCMRatisServer ratisServer,
- Table deletedBlocksTXTable,
DBTransactionBuffer dbTxBuffer,
- SCMContext scmContext,
- SequenceIdGenerator sequenceIdGen,
ScmBlockDeletingServiceMetrics metrics) {
maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY,
OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT);
@@ -112,17 +108,17 @@ public DeletedBlockLogImpl(ConfigurationSource conf,
this.deletedBlockLogStateManager = DeletedBlockLogStateManagerImpl
.newBuilder()
.setConfiguration(conf)
- .setDeletedBlocksTable(deletedBlocksTXTable)
+ .setDeletedBlocksTable(scm.getScmMetadataStore().getDeletedBlocksTXTable())
.setContainerManager(containerManager)
- .setRatisServer(ratisServer)
+ .setRatisServer(scm.getScmHAManager().getRatisServer())
.setSCMDBTransactionBuffer(dbTxBuffer)
.build();
- this.scmContext = scmContext;
- this.sequenceIdGen = sequenceIdGen;
+ this.scmContext = scm.getScmContext();
+ this.sequenceIdGen = scm.getSequenceIdGen();
this.metrics = metrics;
this.transactionStatusManager =
new SCMDeletedBlockTransactionStatusManager(deletedBlockLogStateManager,
- containerManager, scmContext, metrics, scmCommandTimeoutMs);
+ containerManager, this.scmContext, metrics, scmCommandTimeoutMs);
}
@Override
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMDeletedBlockTransactionStatusManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMDeletedBlockTransactionStatusManager.java
index b43e91e0592..d0306211350 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMDeletedBlockTransactionStatusManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMDeletedBlockTransactionStatusManager.java
@@ -201,7 +201,7 @@ protected void onSent(UUID dnId, long scmCmdId) {
}
protected void onDatanodeDead(UUID dnId) {
- LOG.info("Clean SCMCommand record for DN: {}", dnId);
+ LOG.info("Clean SCMCommand record for Datanode: {}", dnId);
scmCmdStatusRecord.remove(dnId);
}
@@ -252,12 +252,14 @@ private void updateStatus(UUID dnId, long scmCmdId,
CommandStatus.Status newStatus) {
Map recordForDn = scmCmdStatusRecord.get(dnId);
if (recordForDn == null) {
- LOG.warn("Unknown Datanode: {} scmCmdId {} newStatus {}",
+ LOG.warn("Unknown Datanode: {} Scm Command ID: {} report status {}",
dnId, scmCmdId, newStatus);
return;
}
if (recordForDn.get(scmCmdId) == null) {
- LOG.warn("Unknown SCM Command: {} Datanode {} newStatus {}",
+ // Because of the delay in the DN report, the DN sometimes report obsolete
+ // Command status that are cleared by the SCM.
+ LOG.debug("Unknown SCM Command ID: {} Datanode: {} report status {}",
scmCmdId, dnId, newStatus);
return;
}
@@ -293,22 +295,23 @@ private void updateStatus(UUID dnId, long scmCmdId,
// which should not normally occur.
LOG.error("Received {} status for a command marked TO_BE_SENT. " +
"This indicates a potential issue in command handling. " +
- "SCM Command ID: {}, Datanode ID: {}, Current Status: {}",
+ "SCM Command ID: {}, Datanode: {}, Current status: {}",
newStatus, scmCmdId, dnId, oldStatus);
removeScmCommand(dnId, scmCmdId);
changed = true;
}
break;
default:
- LOG.error("Can not update to Unknown new Status: {}", newStatus);
+ LOG.error("Unexpected status from Datanode: {}. SCM Command ID: {} with status: {}.",
+ dnId, scmCmdId, newStatus);
break;
}
if (!changed) {
- LOG.warn("Cannot update illegal status for DN: {} ScmCommandId {} " +
- "Status From {} to {}", dnId, scmCmdId, oldStatus, newStatus);
+ LOG.warn("Cannot update illegal status for Datanode: {} SCM Command ID: {} " +
+ "status {} by DN report status {}", dnId, scmCmdId, oldStatus, newStatus);
} else {
- LOG.debug("Successful update DN: {} ScmCommandId {} Status From {} to" +
- " {}", dnId, scmCmdId, oldStatus, newStatus);
+ LOG.debug("Successful update Datanode: {} SCM Command ID: {} status From {} to" +
+ " {}, DN report status {}", dnId, scmCmdId, oldStatus, statusData.getStatus(), newStatus);
}
}
@@ -320,11 +323,8 @@ private void removeTimeoutScmCommand(UUID dnId,
if (updateTime != null &&
Duration.between(updateTime, now).toMillis() > timeoutMs) {
CmdStatusData state = removeScmCommand(dnId, scmCmdId);
- LOG.warn("Remove Timeout SCM BlockDeletionCommand {} for DN {} " +
- "after without update {}ms}", state, dnId, timeoutMs);
- } else {
- LOG.warn("Timeout SCM scmCmdIds {} for DN {} " +
- "after without update {}ms}", scmCmdIds, dnId, timeoutMs);
+ LOG.warn("SCM BlockDeletionCommand {} for Datanode: {} was removed after {}ms without update",
+ state, dnId, timeoutMs);
}
}
}
@@ -335,7 +335,7 @@ private CmdStatusData removeScmCommand(UUID dnId, long scmCmdId) {
return null;
}
CmdStatusData statusData = record.remove(scmCmdId);
- LOG.debug("Remove ScmCommand {} for DN: {} ", statusData, dnId);
+ LOG.debug("Remove ScmCommand {} for Datanode: {} ", statusData, dnId);
return statusData;
}
@@ -483,7 +483,7 @@ public void commitTransactions(
// Mostly likely it's a retried delete command response.
if (LOG.isDebugEnabled()) {
LOG.debug(
- "Transaction txId={} commit by dnId={} for containerID={}"
+ "Transaction txId: {} commit by Datanode: {} for ContainerId: {}"
+ " failed. Corresponding entry not found.", txID, dnId,
containerId);
}
@@ -508,13 +508,13 @@ public void commitTransactions(
transactionToDNsCommitMap.remove(txID);
transactionToRetryCountMap.remove(txID);
if (LOG.isDebugEnabled()) {
- LOG.debug("Purging txId={} from block deletion log", txID);
+ LOG.debug("Purging txId: {} from block deletion log", txID);
}
txIDsToBeDeleted.add(txID);
}
}
if (LOG.isDebugEnabled()) {
- LOG.debug("Datanode txId={} containerId={} committed by dnId={}",
+ LOG.debug("Datanode txId: {} ContainerId: {} committed by Datanode: {}",
txID, containerId, dnId);
}
} catch (IOException e) {
@@ -557,7 +557,7 @@ private void processSCMCommandStatus(List deleteBlockStatus,
lastStatus.put(cmdStatus.getCmdId(), cmdStatus);
summary.put(cmdStatus.getCmdId(), cmdStatus.getStatus());
});
- LOG.debug("CommandStatus {} from Datanode {} ", summary, dnID);
+ LOG.debug("CommandStatus {} from Datanode: {} ", summary, dnID);
for (Map.Entry entry : lastStatus.entrySet()) {
CommandStatus.Status status = entry.getValue().getStatus();
scmDeleteBlocksCommandStatusManager.updateStatusByDNCommandStatus(
@@ -568,11 +568,11 @@ private void processSCMCommandStatus(List deleteBlockStatus,
private boolean isTransactionFailed(DeleteBlockTransactionResult result) {
if (LOG.isDebugEnabled()) {
LOG.debug(
- "Got block deletion ACK from datanode, TXIDs={}, " + "success={}",
+ "Got block deletion ACK from datanode, TXIDs {}, " + "success {}",
result.getTxID(), result.getSuccess());
}
if (!result.getSuccess()) {
- LOG.warn("Got failed ACK for TXID={}, prepare to resend the "
+ LOG.warn("Got failed ACK for TXID {}, prepare to resend the "
+ "TX in next interval", result.getTxID());
return true;
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
index e9d260f743a..2da19b4ef20 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
@@ -20,7 +20,7 @@
import java.util.List;
import java.util.stream.Collectors;
-import javax.annotation.Nullable;
+import jakarta.annotation.Nullable;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java
index 56da38ed577..78ebfd311dd 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java
@@ -39,33 +39,21 @@ public final class ContainerReplica implements Comparable {
private final UUID placeOfBirth;
private final int replicaIndex;
- private Long sequenceId;
+ private final Long sequenceId;
private final long keyCount;
private final long bytesUsed;
private final boolean isEmpty;
- @SuppressWarnings("parameternumber")
- private ContainerReplica(
- final ContainerID containerID,
- final ContainerReplicaProto.State state,
- final int replicaIndex,
- final DatanodeDetails datanode,
- final UUID originNodeId,
- long keyNum,
- long dataSize,
- boolean isEmpty) {
- this.containerID = containerID;
- this.state = state;
- this.datanodeDetails = datanode;
- this.placeOfBirth = originNodeId;
- this.keyCount = keyNum;
- this.bytesUsed = dataSize;
- this.replicaIndex = replicaIndex;
- this.isEmpty = isEmpty;
- }
-
- private void setSequenceId(Long seqId) {
- sequenceId = seqId;
+ private ContainerReplica(ContainerReplicaBuilder b) {
+ containerID = b.containerID;
+ state = b.state;
+ datanodeDetails = b.datanode;
+ placeOfBirth = Optional.ofNullable(b.placeOfBirth).orElse(datanodeDetails.getUuid());
+ keyCount = b.keyCount;
+ bytesUsed = b.bytesUsed;
+ replicaIndex = b.replicaIndex;
+ isEmpty = b.isEmpty;
+ sequenceId = b.sequenceId;
}
/**
@@ -299,12 +287,7 @@ public ContainerReplica build() {
"Container state can't be null");
Preconditions.checkNotNull(datanode,
"DatanodeDetails can't be null");
- ContainerReplica replica = new ContainerReplica(
- containerID, state, replicaIndex, datanode,
- Optional.ofNullable(placeOfBirth).orElse(datanode.getUuid()),
- keyCount, bytesUsed, isEmpty);
- Optional.ofNullable(sequenceId).ifPresent(replica::setSequenceId);
- return replica;
+ return new ContainerReplica(this);
}
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java
index 72d90abe1f4..cf5975d05eb 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java
@@ -251,16 +251,12 @@ private void initialize() throws IOException {
pipelineManager.addContainerToPipelineSCMStart(
container.getPipelineID(), container.containerID());
} catch (PipelineNotFoundException ex) {
+ // We are ignoring this here. The container will be moved to
+ // CLOSING state by ReplicationManager's OpenContainerHandler
+ // For more info: HDDS-10231
LOG.warn("Found container {} which is in OPEN state with " +
- "pipeline {} that does not exist. Marking container for " +
- "closing.", container, container.getPipelineID());
- try {
- updateContainerState(container.containerID().getProtobuf(),
- LifeCycleEvent.FINALIZE);
- } catch (InvalidStateTransitionException e) {
- // This cannot happen.
- LOG.warn("Unable to finalize Container {}.", container);
- }
+ "pipeline {} that does not exist.",
+ container, container.getPipelineID());
}
}
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java
index 660452b2d8b..5416a9ff1c3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java
@@ -96,7 +96,7 @@ private void setConfiguration(ContainerBalancerConfiguration conf) {
* Find a {@link ContainerMoveSelection} consisting of a target and
* container to move for a source datanode. Favours more under-utilized nodes.
* @param source Datanode to find a target for
- * @param candidateContainers Set of candidate containers satisfying
+ * @param container candidate container satisfying
* selection criteria
* {@link ContainerBalancerSelectionCriteria}
* (DatanodeDetails, Long) method returns true if the size specified in the
@@ -105,29 +105,27 @@ private void setConfiguration(ContainerBalancerConfiguration conf) {
*/
@Override
public ContainerMoveSelection findTargetForContainerMove(
- DatanodeDetails source, Set candidateContainers) {
+ DatanodeDetails source, ContainerID container) {
sortTargetForSource(source);
for (DatanodeUsageInfo targetInfo : potentialTargets) {
DatanodeDetails target = targetInfo.getDatanodeDetails();
- for (ContainerID container : candidateContainers) {
- Set replicas;
- ContainerInfo containerInfo;
- try {
- replicas = containerManager.getContainerReplicas(container);
- containerInfo = containerManager.getContainer(container);
- } catch (ContainerNotFoundException e) {
- logger.warn("Could not get Container {} from Container Manager for " +
- "obtaining replicas in Container Balancer.", container, e);
- continue;
- }
+ Set replicas;
+ ContainerInfo containerInfo;
+ try {
+ replicas = containerManager.getContainerReplicas(container);
+ containerInfo = containerManager.getContainer(container);
+ } catch (ContainerNotFoundException e) {
+ logger.warn("Could not get Container {} from Container Manager for " +
+ "obtaining replicas in Container Balancer.", container, e);
+ return null;
+ }
- if (replicas.stream().noneMatch(
- replica -> replica.getDatanodeDetails().equals(target)) &&
- containerMoveSatisfiesPlacementPolicy(container, replicas, source,
- target) &&
- canSizeEnterTarget(target, containerInfo.getUsedBytes())) {
- return new ContainerMoveSelection(target, container);
- }
+ if (replicas.stream().noneMatch(
+ replica -> replica.getDatanodeDetails().equals(target)) &&
+ containerMoveSatisfiesPlacementPolicy(container, replicas, source,
+ target) &&
+ canSizeEnterTarget(target, containerInfo.getUsedBytes())) {
+ return new ContainerMoveSelection(target, container);
}
}
logger.info("Container Balancer could not find a target for " +
@@ -228,6 +226,9 @@ public void increaseSizeEntering(DatanodeDetails target, long size) {
if (totalEnteringSize < config.getMaxSizeEnteringTarget()) {
//reorder
potentialTargets.add(nodeManager.getUsageInfo(target));
+ } else {
+ logger.debug("Datanode {} removed from the list of potential targets. The total size of data entering it in " +
+ "this iteration is {}.", target, totalEnteringSize);
}
return;
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java
index 8f9332e2d3c..7e2ba2fd012 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java
@@ -26,7 +26,7 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerBalancerConfigurationProto;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.ozone.OzoneConsts;
-import org.jetbrains.annotations.NotNull;
+import jakarta.annotation.Nonnull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -472,8 +472,8 @@ ContainerBalancerConfigurationProto.Builder toProtobufBuilder() {
}
static ContainerBalancerConfiguration fromProtobuf(
- @NotNull ContainerBalancerConfigurationProto proto,
- @NotNull OzoneConfiguration ozoneConfiguration) {
+ @Nonnull ContainerBalancerConfigurationProto proto,
+ @Nonnull OzoneConfiguration ozoneConfiguration) {
ContainerBalancerConfiguration config =
ozoneConfiguration.getObject(ContainerBalancerConfiguration.class);
if (proto.hasUtilizationThreshold()) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerSelectionCriteria.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerSelectionCriteria.java
index 8171320a54f..d9102a88329 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerSelectionCriteria.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerSelectionCriteria.java
@@ -31,8 +31,11 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.util.Collections;
import java.util.Comparator;
+import java.util.HashMap;
import java.util.HashSet;
+import java.util.Map;
import java.util.NavigableSet;
import java.util.Set;
import java.util.TreeSet;
@@ -52,6 +55,7 @@ public class ContainerBalancerSelectionCriteria {
private Set selectedContainers;
private Set excludeContainers;
private FindSourceStrategy findSourceStrategy;
+ private Map> setMap;
public ContainerBalancerSelectionCriteria(
ContainerBalancerConfiguration balancerConfiguration,
@@ -66,6 +70,7 @@ public ContainerBalancerSelectionCriteria(
selectedContainers = new HashSet<>();
excludeContainers = balancerConfiguration.getExcludeContainers();
this.findSourceStrategy = findSourceStrategy;
+ this.setMap = new HashMap<>();
}
/**
@@ -79,38 +84,20 @@ private boolean isContainerReplicatingOrDeleting(ContainerID containerID) {
}
/**
- * Gets containers that are suitable for moving based on the following
- * required criteria:
- * 1. Container must not be undergoing replication.
- * 2. Container must not already be selected for balancing.
- * 3. Container size should be closer to 5GB.
- * 4. Container must not be in the configured exclude containers list.
- * 5. Container should be closed.
- * 6. If the {@link LegacyReplicationManager} is enabled, then the container should not be an EC container.
- * @param node DatanodeDetails for which to find candidate containers.
- * @return NavigableSet of candidate containers that satisfy the criteria.
+ * Get ContainerID Set for the Datanode, it will be returned as NavigableSet
+ * Since sorting will be time-consuming, the Set will be cached.
+ *
+ * @param node source datanode
+ * @return cached Navigable ContainerID Set
*/
- public NavigableSet getCandidateContainers(
- DatanodeDetails node, long sizeMovedAlready) {
- NavigableSet containerIDSet =
- new TreeSet<>(orderContainersByUsedBytes().reversed());
- try {
- containerIDSet.addAll(nodeManager.getContainers(node));
- } catch (NodeNotFoundException e) {
- LOG.warn("Could not find Datanode {} while selecting candidate " +
- "containers for Container Balancer.", node.toString(), e);
- return containerIDSet;
+ public Set getContainerIDSet(DatanodeDetails node) {
+ // Check if the node is registered at the beginning
+ if (!nodeManager.isNodeRegistered(node)) {
+ return Collections.emptySet();
}
- if (excludeContainers != null) {
- containerIDSet.removeAll(excludeContainers);
- }
- if (selectedContainers != null) {
- containerIDSet.removeAll(selectedContainers);
- }
-
- containerIDSet.removeIf(
- containerID -> shouldBeExcluded(containerID, node, sizeMovedAlready));
- return containerIDSet;
+ Set containers = setMap.computeIfAbsent(node,
+ this::getCandidateContainers);
+ return containers != null ? containers : Collections.emptySet();
}
/**
@@ -165,7 +152,19 @@ private boolean isECContainerAndLegacyRMEnabled(ContainerInfo container) {
&& replicationManager.getConfig().isLegacyEnabled();
}
- private boolean shouldBeExcluded(ContainerID containerID,
+ /**
+ * Gets containers that are suitable for moving based on the following
+ * required criteria:
+ * 1. Container must not be undergoing replication.
+ * 2. Container must not already be selected for balancing.
+ * 3. Container size should be closer to 5GB.
+ * 4. Container must not be in the configured exclude containers list.
+ * 5. Container should be closed.
+ * 6. If the {@link LegacyReplicationManager} is enabled, then the container should not be an EC container.
+ * @param node DatanodeDetails for which to find candidate containers.
+ * @return true if the container should be excluded, else false
+ */
+ public boolean shouldBeExcluded(ContainerID containerID,
DatanodeDetails node, long sizeMovedAlready) {
ContainerInfo container;
try {
@@ -175,7 +174,8 @@ private boolean shouldBeExcluded(ContainerID containerID,
"candidate container. Excluding it.", containerID);
return true;
}
- return !isContainerClosed(container, node) || isECContainerAndLegacyRMEnabled(container) ||
+ return excludeContainers.contains(containerID) || selectedContainers.contains(containerID) ||
+ !isContainerClosed(container, node) || isECContainerAndLegacyRMEnabled(container) ||
isContainerReplicatingOrDeleting(containerID) ||
!findSourceStrategy.canSizeLeaveSource(node, container.getUsedBytes())
|| breaksMaxSizeToMoveLimit(container.containerID(),
@@ -242,4 +242,24 @@ public void setSelectedContainers(
this.selectedContainers = selectedContainers;
}
+
+ private NavigableSet getCandidateContainers(DatanodeDetails node) {
+ NavigableSet newSet =
+ new TreeSet<>(orderContainersByUsedBytes().reversed());
+ try {
+ Set idSet = nodeManager.getContainers(node);
+ if (excludeContainers != null) {
+ idSet.removeAll(excludeContainers);
+ }
+ if (selectedContainers != null) {
+ idSet.removeAll(selectedContainers);
+ }
+ newSet.addAll(idSet);
+ return newSet;
+ } catch (NodeNotFoundException e) {
+ LOG.warn("Could not find Datanode {} while selecting candidate " +
+ "containers for Container Balancer.", node, e);
+ return null;
+ }
+ }
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
index abbc50ac86a..94e8cfd04a1 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
@@ -50,7 +50,6 @@
import java.util.Iterator;
import java.util.List;
import java.util.Map;
-import java.util.NavigableSet;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
@@ -692,11 +691,10 @@ private long cancelMovesThatExceedTimeoutDuration() {
* @return ContainerMoveSelection containing the selected target and container
*/
private ContainerMoveSelection matchSourceWithTarget(DatanodeDetails source) {
- NavigableSet candidateContainers =
- selectionCriteria.getCandidateContainers(source,
- sizeScheduledForMoveInLatestIteration);
+ Set sourceContainerIDSet =
+ selectionCriteria.getContainerIDSet(source);
- if (candidateContainers.isEmpty()) {
+ if (sourceContainerIDSet.isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("ContainerBalancer could not find any candidate containers " +
"for datanode {}", source.getUuidString());
@@ -708,9 +706,23 @@ private ContainerMoveSelection matchSourceWithTarget(DatanodeDetails source) {
LOG.debug("ContainerBalancer is finding suitable target for source " +
"datanode {}", source.getUuidString());
}
- ContainerMoveSelection moveSelection =
- findTargetStrategy.findTargetForContainerMove(
- source, candidateContainers);
+
+ ContainerMoveSelection moveSelection = null;
+ Set toRemoveContainerIds = new HashSet<>();
+ for (ContainerID containerId: sourceContainerIDSet) {
+ if (selectionCriteria.shouldBeExcluded(containerId, source,
+ sizeScheduledForMoveInLatestIteration)) {
+ toRemoveContainerIds.add(containerId);
+ continue;
+ }
+ moveSelection = findTargetStrategy.findTargetForContainerMove(source,
+ containerId);
+ if (moveSelection != null) {
+ break;
+ }
+ }
+ // Update cached containerIDSet in setMap
+ sourceContainerIDSet.removeAll(toRemoveContainerIds);
if (moveSelection == null) {
if (LOG.isDebugEnabled()) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java
index 4f5868f2456..6350c3c7619 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java
@@ -20,7 +20,7 @@
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo;
import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.jetbrains.annotations.NotNull;
+import jakarta.annotation.Nonnull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -72,7 +72,7 @@ private void setLowerLimit(Double lowerLimit) {
* {@inheritDoc}
*/
public void resetPotentialSources(
- @NotNull Collection sources) {
+ @Nonnull Collection sources) {
List usageInfos = new ArrayList<>(sources.size());
sources.forEach(source -> usageInfos.add(nodeManager.getUsageInfo(source)));
resetSources(usageInfos);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceStrategy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceStrategy.java
index e2716304839..236bdfd98d4 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceStrategy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceStrategy.java
@@ -21,7 +21,7 @@
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import java.util.Collection;
import java.util.List;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetGreedyByNetworkTopology.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetGreedyByNetworkTopology.java
index 87feae4981d..393b44d44bf 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetGreedyByNetworkTopology.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetGreedyByNetworkTopology.java
@@ -25,7 +25,8 @@
import org.apache.hadoop.hdds.scm.net.NetworkTopology;
import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo;
import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.jetbrains.annotations.NotNull;
+import jakarta.annotation.Nonnull;
+import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
@@ -41,6 +42,8 @@
*/
public class FindTargetGreedyByNetworkTopology
extends AbstractFindTargetGreedy {
+ public static final Logger LOG =
+ LoggerFactory.getLogger(FindTargetGreedyByNetworkTopology.class);
private NetworkTopology networkTopology;
private List potentialTargets;
@@ -51,7 +54,7 @@ public FindTargetGreedyByNetworkTopology(
NodeManager nodeManager,
NetworkTopology networkTopology) {
super(containerManager, placementPolicyValidateProxy, nodeManager);
- setLogger(LoggerFactory.getLogger(FindTargetGreedyByNetworkTopology.class));
+ setLogger(LOG);
potentialTargets = new LinkedList<>();
setPotentialTargets(potentialTargets);
this.networkTopology = networkTopology;
@@ -87,7 +90,7 @@ public void sortTargetForSource(DatanodeDetails source) {
*/
@Override
public void resetPotentialTargets(
- @NotNull Collection targets) {
+ @Nonnull Collection targets) {
// create DatanodeUsageInfo from DatanodeDetails
List usageInfos = new ArrayList<>(targets.size());
targets.forEach(datanodeDetails -> usageInfos.add(
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetGreedyByUsageInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetGreedyByUsageInfo.java
index 71a338db922..6e0c923b926 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetGreedyByUsageInfo.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetGreedyByUsageInfo.java
@@ -24,7 +24,8 @@
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo;
import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.jetbrains.annotations.NotNull;
+import jakarta.annotation.Nonnull;
+import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
@@ -37,12 +38,15 @@
* target with the lowest space usage.
*/
public class FindTargetGreedyByUsageInfo extends AbstractFindTargetGreedy {
+ public static final Logger LOG =
+ LoggerFactory.getLogger(FindTargetGreedyByUsageInfo.class);
+
public FindTargetGreedyByUsageInfo(
ContainerManager containerManager,
PlacementPolicyValidateProxy placementPolicyValidateProxy,
NodeManager nodeManager) {
super(containerManager, placementPolicyValidateProxy, nodeManager);
- setLogger(LoggerFactory.getLogger(FindTargetGreedyByUsageInfo.class));
+ setLogger(LOG);
setPotentialTargets(new TreeSet<>((a, b) -> compareByUsage(a, b)));
}
@@ -63,7 +67,7 @@ public void sortTargetForSource(DatanodeDetails source) {
*/
@Override
public void resetPotentialTargets(
- @NotNull Collection targets) {
+ @Nonnull Collection targets) {
// create DatanodeUsageInfo from DatanodeDetails
List usageInfos = new ArrayList<>(targets.size());
targets.forEach(datanodeDetails -> usageInfos.add(
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetStrategy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetStrategy.java
index 17f6aa329dc..a9f2ee00a2d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetStrategy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindTargetStrategy.java
@@ -22,10 +22,9 @@
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import java.util.Collection;
import java.util.List;
-import java.util.Set;
/**
* This interface can be used to implement strategies to find a target for a
@@ -40,7 +39,7 @@ public interface FindTargetStrategy {
* enter a potential target.
*
* @param source Datanode to find a target for
- * @param candidateContainers Set of candidate containers satisfying
+ * @param candidateContainer candidate containers satisfying
* selection criteria
* {@link ContainerBalancerSelectionCriteria}
* (DatanodeDetails, Long) method returns true if the size specified in the
@@ -49,7 +48,7 @@ public interface FindTargetStrategy {
* selected container
*/
ContainerMoveSelection findTargetForContainerMove(
- DatanodeDetails source, Set candidateContainers);
+ DatanodeDetails source, ContainerID candidateContainer);
/**
* increase the Entering size of a candidate target data node.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
index 330bf67416a..094e535dcbd 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
@@ -23,7 +23,8 @@
/**
* SCM Node Metric that is used in the placement classes.
*/
-public class SCMNodeMetric implements DatanodeMetric {
+public class SCMNodeMetric implements DatanodeMetric,
+ Comparable {
private SCMNodeStat stat;
/**
@@ -195,12 +196,12 @@ public void subtract(SCMNodeStat value) {
* @throws ClassCastException if the specified object's type prevents it
* from being compared to this object.
*/
- //@Override
- public int compareTo(SCMNodeStat o) {
- if (isEqual(o)) {
+ @Override
+ public int compareTo(SCMNodeMetric o) {
+ if (isEqual(o.get())) {
return 0;
}
- if (isGreater(o)) {
+ if (isGreater(o.get())) {
return 1;
} else {
return -1;
@@ -225,4 +226,9 @@ public boolean equals(Object o) {
public int hashCode() {
return stat != null ? stat.hashCode() : 0;
}
+
+ @Override
+ public String toString() {
+ return "SCMNodeMetric{" + stat.toString() + '}';
+ }
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
index 2a848a04eff..5456e6ee527 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
@@ -174,4 +174,13 @@ public int hashCode() {
return Long.hashCode(capacity.get() ^ scmUsed.get() ^ remaining.get() ^
committed.get() ^ freeSpaceToSpare.get());
}
+
+ @Override
+ public String toString() {
+ return "SCMNodeStat{" +
+ "capacity=" + capacity.get() +
+ ", scmUsed=" + scmUsed.get() +
+ ", remaining=" + remaining.get() +
+ '}';
+ }
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
index 979cff799fa..a3661243be6 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
@@ -61,6 +61,7 @@
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.node.NodeStatus;
import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
@@ -1545,5 +1546,14 @@ private int getRemainingMaintenanceRedundancy(boolean isEC) {
private static boolean isEC(ReplicationConfig replicationConfig) {
return replicationConfig.getReplicationType() == EC;
}
+
+ public boolean hasHealthyPipeline(ContainerInfo container) {
+ try {
+ return scmContext.getScm().getPipelineManager()
+ .getPipeline(container.getPipelineID()) != null;
+ } catch (PipelineNotFoundException e) {
+ return false;
+ }
+ }
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/OpenContainerHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/OpenContainerHandler.java
index 2c0b405db97..21c3c76d3e9 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/OpenContainerHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/OpenContainerHandler.java
@@ -53,20 +53,26 @@ public boolean handle(ContainerCheckRequest request) {
if (containerInfo.getState() == HddsProtos.LifeCycleState.OPEN) {
LOG.debug("Checking open container {} in OpenContainerHandler",
containerInfo);
- if (!isOpenContainerHealthy(
- containerInfo, request.getContainerReplicas())) {
- // This is an unhealthy open container, so we need to trigger the
- // close process on it.
- LOG.debug("Container {} is open but unhealthy. Triggering close.",
- containerInfo);
- request.getReport().incrementAndSample(
- ReplicationManagerReport.HealthState.OPEN_UNHEALTHY,
+ final boolean noPipeline = !replicationManager.hasHealthyPipeline(containerInfo);
+ // Minor optimization. If noPipeline is true, isOpenContainerHealthy will not
+ // be called.
+ final boolean unhealthy = noPipeline || !isOpenContainerHealthy(containerInfo,
+ request.getContainerReplicas());
+ if (unhealthy) {
+ // For an OPEN container, we close the container
+ // if the container has no Pipeline or if the container is unhealthy.
+ LOG.info("Container {} is open but {}. Triggering close.",
+ containerInfo, noPipeline ? "has no Pipeline" : "unhealthy");
+
+ request.getReport().incrementAndSample(noPipeline ?
+ ReplicationManagerReport.HealthState.OPEN_WITHOUT_PIPELINE :
+ ReplicationManagerReport.HealthState.OPEN_UNHEALTHY,
containerInfo.containerID());
+
if (!request.isReadOnly()) {
replicationManager
.sendCloseContainerEvent(containerInfo.containerID());
}
- return true;
}
// For open containers we do not want to do any further processing in RM
// so return true to stop the command chain.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/BackgroundSCMService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/BackgroundSCMService.java
index 03e2a15938e..f28fcc7423b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/BackgroundSCMService.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/BackgroundSCMService.java
@@ -45,17 +45,14 @@ public final class BackgroundSCMService implements SCMService {
private final Runnable periodicalTask;
private volatile boolean runImmediately = false;
- private BackgroundSCMService(
- final Clock clock, final SCMContext scmContext,
- final String serviceName, final long intervalInMillis,
- final long waitTimeInMillis, final Runnable task) {
- this.scmContext = scmContext;
- this.clock = clock;
- this.periodicalTask = task;
- this.serviceName = serviceName;
- this.log = LoggerFactory.getLogger(serviceName);
- this.intervalInMillis = intervalInMillis;
- this.waitTimeInMillis = waitTimeInMillis;
+ private BackgroundSCMService(Builder b) {
+ scmContext = b.scmContext;
+ clock = b.clock;
+ periodicalTask = b.periodicalTask;
+ serviceName = b.serviceName;
+ log = LoggerFactory.getLogger(serviceName);
+ intervalInMillis = b.intervalInMillis;
+ waitTimeInMillis = b.waitTimeInMillis;
start();
}
@@ -206,8 +203,7 @@ public BackgroundSCMService build() {
Preconditions.assertNotNull(clock, "clock is null");
Preconditions.assertNotNull(serviceName, "serviceName is null");
- return new BackgroundSCMService(clock, scmContext, serviceName,
- intervalInMillis, waitTimeInMillis, periodicalTask);
+ return new BackgroundSCMService(this);
}
}
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMContext.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMContext.java
index 08ee20f5af7..b5f926638d4 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMContext.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMContext.java
@@ -27,7 +27,6 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.util.Optional;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -78,17 +77,13 @@ public static SCMContext emptyContext() {
*/
private volatile FinalizationCheckpoint finalizationCheckpoint;
- private SCMContext(boolean isLeader, long term,
- final SafeModeStatus safeModeStatus,
- final FinalizationCheckpoint finalizationCheckpoint,
- final OzoneStorageContainerManager scm, String threadNamePrefix) {
- this.isLeader = isLeader;
- this.term = term;
- this.safeModeStatus = safeModeStatus;
- this.finalizationCheckpoint = finalizationCheckpoint;
- this.scm = scm;
- this.isLeaderReady = false;
- this.threadNamePrefix = threadNamePrefix;
+ private SCMContext(Builder b) {
+ isLeader = b.isLeader;
+ term = b.term;
+ safeModeStatus = new SafeModeStatus(b.isInSafeMode, b.isPreCheckComplete);
+ finalizationCheckpoint = b.finalizationCheckpoint;
+ scm = b.scm;
+ threadNamePrefix = b.threadNamePrefix;
}
/**
@@ -104,9 +99,9 @@ public void updateLeaderAndTerm(boolean leader, long newTerm) {
isLeader = leader;
// If it is not leader, set isLeaderReady to false.
if (!isLeader) {
- isLeaderReady = false;
LOG.info("update from <{}> to <{}>", isLeaderReady,
false);
+ isLeaderReady = false;
}
term = newTerm;
} finally {
@@ -285,7 +280,7 @@ public static class Builder {
private boolean isInSafeMode = false;
private boolean isPreCheckComplete = true;
private OzoneStorageContainerManager scm = null;
- private FinalizationCheckpoint finalizationCheckpoint;
+ private FinalizationCheckpoint finalizationCheckpoint = FinalizationCheckpoint.FINALIZATION_COMPLETE;
private String threadNamePrefix = "";
public Builder setLeader(boolean leader) {
@@ -335,13 +330,7 @@ public SCMContext build() {
*/
@VisibleForTesting
SCMContext buildMaybeInvalid() {
- return new SCMContext(
- isLeader,
- term,
- new SafeModeStatus(isInSafeMode, isPreCheckComplete),
- Optional.ofNullable(finalizationCheckpoint).orElse(
- FinalizationCheckpoint.FINALIZATION_COMPLETE),
- scm, threadNamePrefix);
+ return new SCMContext(this);
}
}
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMGrpcOutputStream.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMGrpcOutputStream.java
index 1194a5260ce..2f2e1c4c1ff 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMGrpcOutputStream.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMGrpcOutputStream.java
@@ -30,7 +30,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import java.io.IOException;
import java.io.OutputStream;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeDetails.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeDetails.java
index 17901ecfde5..bd65d384844 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeDetails.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeDetails.java
@@ -19,8 +19,6 @@
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.hdds.NodeDetails;
-import org.apache.ratis.protocol.RaftGroup;
-import org.apache.ratis.protocol.RaftPeerId;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -30,38 +28,26 @@
* Construct SCM node details.
*/
public final class SCMNodeDetails extends NodeDetails {
- private InetSocketAddress blockProtocolServerAddress;
- private String blockProtocolServerAddressKey;
- private InetSocketAddress clientProtocolServerAddress;
- private String clientProtocolServerAddressKey;
- private InetSocketAddress datanodeProtocolServerAddress;
- private String datanodeAddressKey;
- private int grpcPort;
+ private final InetSocketAddress blockProtocolServerAddress;
+ private final String blockProtocolServerAddressKey;
+ private final InetSocketAddress clientProtocolServerAddress;
+ private final String clientProtocolServerAddressKey;
+ private final InetSocketAddress datanodeProtocolServerAddress;
+ private final String datanodeAddressKey;
+ private final int grpcPort;
+
public static final Logger LOG =
LoggerFactory.getLogger(SCMNodeDetails.class);
- /**
- * Constructs SCMNodeDetails object.
- */
- @SuppressWarnings("checkstyle:ParameterNumber")
- private SCMNodeDetails(String serviceId, String nodeId,
- InetSocketAddress rpcAddr, int ratisPort, int grpcPort,
- String httpAddress, String httpsAddress,
- InetSocketAddress blockProtocolServerAddress,
- InetSocketAddress clientProtocolServerAddress,
- InetSocketAddress datanodeProtocolServerAddress, RaftGroup group,
- RaftPeerId selfPeerId, String datanodeAddressKey,
- String blockProtocolServerAddressKey,
- String clientProtocolServerAddressAddressKey) {
- super(serviceId, nodeId, rpcAddr, ratisPort,
- httpAddress, httpsAddress);
- this.grpcPort = grpcPort;
- this.blockProtocolServerAddress = blockProtocolServerAddress;
- this.clientProtocolServerAddress = clientProtocolServerAddress;
- this.datanodeProtocolServerAddress = datanodeProtocolServerAddress;
- this.datanodeAddressKey = datanodeAddressKey;
- this.blockProtocolServerAddressKey = blockProtocolServerAddressKey;
- this.clientProtocolServerAddressKey = clientProtocolServerAddressAddressKey;
+ private SCMNodeDetails(Builder b) {
+ super(b.scmServiceId, b.scmNodeId, b.rpcAddress, b.ratisPort, b.httpAddr, b.httpsAddr);
+ grpcPort = b.grpcPort;
+ blockProtocolServerAddress = b.blockProtocolServerAddress;
+ clientProtocolServerAddress = b.clientProtocolServerAddress;
+ datanodeProtocolServerAddress = b.datanodeProtocolServerAddress;
+ datanodeAddressKey = b.datanodeAddressKey;
+ blockProtocolServerAddressKey = b.blockProtocolServerAddressKey;
+ clientProtocolServerAddressKey = b.clientProtocolServerAddressKey;
}
@Override
@@ -96,8 +82,6 @@ public static class Builder {
private String clientProtocolServerAddressKey;
private InetSocketAddress datanodeProtocolServerAddress;
private String datanodeAddressKey;
- private RaftGroup raftGroup;
- private RaftPeerId selfPeerId;
public Builder setDatanodeAddressKey(String addressKey) {
this.datanodeAddressKey = addressKey;
@@ -129,16 +113,6 @@ public Builder setDatanodeProtocolServerAddress(InetSocketAddress address) {
return this;
}
- public Builder setRaftGroup(RaftGroup group) {
- this.raftGroup = group;
- return this;
- }
-
- public Builder setSelfPeerId(RaftPeerId peerId) {
- this.selfPeerId = peerId;
- return this;
- }
-
public Builder setRpcAddress(InetSocketAddress rpcAddr) {
this.rpcAddress = rpcAddr;
return this;
@@ -175,11 +149,7 @@ public Builder setHttpsAddress(String httpsAddress) {
}
public SCMNodeDetails build() {
- return new SCMNodeDetails(scmServiceId, scmNodeId, rpcAddress,
- ratisPort, grpcPort, httpAddr, httpsAddr, blockProtocolServerAddress,
- clientProtocolServerAddress, datanodeProtocolServerAddress,
- raftGroup, selfPeerId, datanodeAddressKey,
- blockProtocolServerAddressKey, clientProtocolServerAddressKey);
+ return new SCMNodeDetails(this);
}
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java
index c7ab2cb2e8c..3e7db16c2a0 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java
@@ -32,7 +32,7 @@
import org.apache.hadoop.hdds.utils.io.LengthOutputStream;
import org.apache.ratis.util.function.CheckedFunction;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
/**
* Codec to serialize/deserialize {@link X509Certificate}.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java
index e0b4c3ce543..fbfbb49c252 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java
@@ -18,7 +18,11 @@
package org.apache.hadoop.hdds.scm.node;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
+import java.util.List;
+import java.util.Map;
import java.util.Set;
/**
@@ -31,4 +35,6 @@ public interface DatanodeAdminMonitor extends Runnable {
void stopMonitoring(DatanodeDetails dn);
Set getTrackedNodes();
void setMetrics(NodeDecommissionMetrics metrics);
+ Map> getContainersReplicatedOnNode(DatanodeDetails dn)
+ throws NodeNotFoundException;
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
index 51c6d12dea9..d7975ff1e58 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
@@ -96,8 +96,8 @@ public class DatanodeAdminMonitorImpl implements DatanodeAdminMonitor {
public static final class TrackedNode {
private DatanodeDetails datanodeDetails;
-
private long startTime = 0L;
+ private Map> containersReplicatedOnNode = new ConcurrentHashMap<>();
public TrackedNode(DatanodeDetails datanodeDetails, long startTime) {
this.datanodeDetails = datanodeDetails;
@@ -122,6 +122,15 @@ public DatanodeDetails getDatanodeDetails() {
public long getStartTime() {
return startTime;
}
+
+ public Map> getContainersReplicatedOnNode() {
+ return containersReplicatedOnNode;
+ }
+
+ public void setContainersReplicatedOnNode(List underReplicated, List unClosed) {
+ this.containersReplicatedOnNode.put("UnderReplicated", Collections.unmodifiableList(underReplicated));
+ this.containersReplicatedOnNode.put("UnClosed", Collections.unmodifiableList(unClosed));
+ }
}
private Map containerStateByHost;
@@ -423,9 +432,7 @@ private boolean checkContainersReplicatedOnNode(TrackedNode dn)
boolean isHealthy = replicaSet.isHealthyEnoughForOffline();
if (!isHealthy) {
- if (LOG.isDebugEnabled()) {
- unClosedIDs.add(cid);
- }
+ unClosedIDs.add(cid);
if (unclosed < containerDetailsLoggingLimit
|| LOG.isDebugEnabled()) {
LOG.info("Unclosed Container {} {}; {}", cid, replicaSet, replicaDetails(replicaSet.getReplicas()));
@@ -448,20 +455,18 @@ private boolean checkContainersReplicatedOnNode(TrackedNode dn)
replicationManager.checkContainerStatus(replicaSet.getContainer(), report);
replicatedOK = report.getStat(ReplicationManagerReport.HealthState.UNDER_REPLICATED) == 0;
}
-
if (replicatedOK) {
sufficientlyReplicated++;
} else {
- if (LOG.isDebugEnabled()) {
- underReplicatedIDs.add(cid);
- }
+ underReplicatedIDs.add(cid);
if (underReplicated < containerDetailsLoggingLimit || LOG.isDebugEnabled()) {
LOG.info("Under Replicated Container {} {}; {}", cid, replicaSet, replicaDetails(replicaSet.getReplicas()));
}
underReplicated++;
}
} catch (ContainerNotFoundException e) {
- LOG.warn("ContainerID {} present in node list for {} but not found in containerManager", cid, dn);
+ LOG.warn("ContainerID {} present in node list for {} but not found in containerManager", cid,
+ dn.getDatanodeDetails());
}
}
LOG.info("{} has {} sufficientlyReplicated, {} deleting, {} " +
@@ -485,9 +490,21 @@ private boolean checkContainersReplicatedOnNode(TrackedNode dn)
unclosed, unClosedIDs.stream().map(
Object::toString).collect(Collectors.joining(", ")));
}
+ dn.setContainersReplicatedOnNode(underReplicatedIDs, unClosedIDs);
return underReplicated == 0 && unclosed == 0;
}
+ public Map> getContainersReplicatedOnNode(DatanodeDetails dn) {
+ Iterator iterator = trackedNodes.iterator();
+ while (iterator.hasNext()) {
+ TrackedNode trackedNode = iterator.next();
+ if (trackedNode.equals(new TrackedNode(dn, 0L))) {
+ return trackedNode.getContainersReplicatedOnNode();
+ }
+ }
+ return new HashMap<>();
+ }
+
private String replicaDetails(Collection replicas) {
StringBuilder sb = new StringBuilder();
sb.append("Replicas{");
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
index 3c40437d7f6..19ed24fbcaf 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
@@ -42,7 +42,7 @@
import com.google.common.base.Preconditions;
-import javax.annotation.Nullable;
+import jakarta.annotation.Nullable;
import static org.apache.hadoop.hdds.scm.events.SCMEvents.CLOSE_CONTAINER;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java
index c98cc63c466..38e59b89e76 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
import org.apache.hadoop.hdds.scm.DatanodeAdminError;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
import org.apache.hadoop.hdds.scm.ha.SCMContext;
import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
@@ -40,6 +41,7 @@
import java.util.Comparator;
import java.util.LinkedList;
import java.util.List;
+import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
@@ -292,6 +294,11 @@ public NodeDecommissionManager(OzoneConfiguration config, NodeManager nm,
TimeUnit.SECONDS);
}
+ public Map> getContainersReplicatedOnNode(DatanodeDetails dn)
+ throws NodeNotFoundException {
+ return getMonitor().getContainersReplicatedOnNode(dn);
+ }
+
@VisibleForTesting
public DatanodeAdminMonitor getMonitor() {
return monitor;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index 399a7ef952e..21bcd1f78a2 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -40,7 +40,7 @@
import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import javax.annotation.Nullable;
+import jakarta.annotation.Nullable;
import java.io.Closeable;
import java.util.List;
import java.util.Map;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index e9b7d220e1f..a149998db8b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -69,7 +69,9 @@
import javax.management.ObjectName;
import java.io.IOException;
+import java.math.RoundingMode;
import java.net.InetAddress;
+import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
@@ -138,9 +140,11 @@ public class SCMNodeManager implements NodeManager {
* consistent view of the node state.
*/
private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
- private final String opeState = "OPSTATE";
- private final String comState = "COMSTATE";
- private final String lastHeartbeat = "LASTHEARTBEAT";
+ private static final String OPESTATE = "OPSTATE";
+ private static final String COMSTATE = "COMSTATE";
+ private static final String LASTHEARTBEAT = "LASTHEARTBEAT";
+ private static final String USEDSPACEPERCENT = "USEDSPACEPERCENT";
+ private static final String TOTALCAPACITY = "CAPACITY";
/**
* Constructs SCM machine Manager.
*/
@@ -268,7 +272,7 @@ public int getNodeCount(NodeStatus nodeStatus) {
* Returns the Number of Datanodes by State they are in. Passing null for
* either of the states acts like a wildcard for that state.
*
- * @parem nodeOpState - The Operational State of the node
+ * @param nodeOpState - The Operational State of the node
* @param health - The health of the node
* @return count
*/
@@ -507,19 +511,15 @@ private boolean updateDnsToUuidMap(
* Send heartbeat to indicate the datanode is alive and doing well.
*
* @param datanodeDetails - DatanodeDetailsProto.
- * @param layoutInfo - Layout Version Proto.
* @return SCMheartbeat response.
*/
@Override
public List processHeartbeat(DatanodeDetails datanodeDetails,
- LayoutVersionProto layoutInfo,
- CommandQueueReportProto queueReport) {
+ CommandQueueReportProto queueReport) {
Preconditions.checkNotNull(datanodeDetails, "Heartbeat is missing " +
"DatanodeDetails.");
try {
nodeStateManager.updateLastHeartbeatTime(datanodeDetails);
- nodeStateManager.updateLastKnownLayoutVersion(datanodeDetails,
- layoutInfo);
metrics.incNumHBProcessed();
updateDatanodeOpState(datanodeDetails);
} catch (NodeNotFoundException e) {
@@ -682,6 +682,15 @@ public void processLayoutVersionReport(DatanodeDetails datanodeDetails,
layoutVersionReport.toString().replaceAll("\n", "\\\\n"));
}
+ try {
+ nodeStateManager.updateLastKnownLayoutVersion(datanodeDetails,
+ layoutVersionReport);
+ } catch (NodeNotFoundException e) {
+ LOG.error("SCM trying to process Layout Version from an " +
+ "unregistered node {}.", datanodeDetails);
+ return;
+ }
+
// Software layout version is hardcoded to the SCM.
int scmSlv = scmLayoutVersionManager.getSoftwareLayoutVersion();
int dnSlv = layoutVersionReport.getSoftwareLayoutVersion();
@@ -1103,9 +1112,9 @@ public Map> getNodeStatusInfo() {
heartbeatTimeDiff = getLastHeartbeatTimeDiff(dni.getLastHeartbeatTime());
}
Map map = new HashMap<>();
- map.put(opeState, opstate);
- map.put(comState, healthState);
- map.put(lastHeartbeat, heartbeatTimeDiff);
+ map.put(OPESTATE, opstate);
+ map.put(COMSTATE, healthState);
+ map.put(LASTHEARTBEAT, heartbeatTimeDiff);
if (httpPort != null) {
map.put(httpPort.getName().toString(), httpPort.getValue().toString());
}
@@ -1113,11 +1122,97 @@ public Map> getNodeStatusInfo() {
map.put(httpsPort.getName().toString(),
httpsPort.getValue().toString());
}
+ String capacity = calculateStorageCapacity(dni.getStorageReports());
+ map.put(TOTALCAPACITY, capacity);
+ String[] storagePercentage = calculateStoragePercentage(
+ dni.getStorageReports());
+ String scmUsedPerc = storagePercentage[0];
+ String nonScmUsedPerc = storagePercentage[1];
+ map.put(USEDSPACEPERCENT,
+ "Ozone: " + scmUsedPerc + "%, other: " + nonScmUsedPerc + "%");
nodes.put(hostName, map);
}
return nodes;
}
+ /**
+ * Calculate the storage capacity of the DataNode node.
+ * @param storageReports Calculate the storage capacity corresponding
+ * to the storage collection.
+ * @return
+ */
+ public static String calculateStorageCapacity(
+ List storageReports) {
+ long capacityByte = 0;
+ if (storageReports != null && !storageReports.isEmpty()) {
+ for (StorageReportProto storageReport : storageReports) {
+ capacityByte += storageReport.getCapacity();
+ }
+ }
+
+ double ua = capacityByte;
+ StringBuilder unit = new StringBuilder("B");
+ if (ua > 1024) {
+ ua = ua / 1024;
+ unit.replace(0, 1, "KB");
+ }
+ if (ua > 1024) {
+ ua = ua / 1024;
+ unit.replace(0, 2, "MB");
+ }
+ if (ua > 1024) {
+ ua = ua / 1024;
+ unit.replace(0, 2, "GB");
+ }
+ if (ua > 1024) {
+ ua = ua / 1024;
+ unit.replace(0, 2, "TB");
+ }
+
+ DecimalFormat decimalFormat = new DecimalFormat("#0.0");
+ decimalFormat.setRoundingMode(RoundingMode.HALF_UP);
+ String capacity = decimalFormat.format(ua);
+ return capacity + unit.toString();
+ }
+
+ /**
+ * Calculate the storage usage percentage of a DataNode node.
+ * @param storageReports Calculate the storage percentage corresponding
+ * to the storage collection.
+ * @return
+ */
+ public static String[] calculateStoragePercentage(
+ List storageReports) {
+ String[] storagePercentage = new String[2];
+ String usedPercentage = "N/A";
+ String nonUsedPercentage = "N/A";
+ if (storageReports != null && !storageReports.isEmpty()) {
+ long capacity = 0;
+ long scmUsed = 0;
+ long remaining = 0;
+ for (StorageReportProto storageReport : storageReports) {
+ capacity += storageReport.getCapacity();
+ scmUsed += storageReport.getScmUsed();
+ remaining += storageReport.getRemaining();
+ }
+ long scmNonUsed = capacity - scmUsed - remaining;
+
+ DecimalFormat decimalFormat = new DecimalFormat("#0.00");
+ decimalFormat.setRoundingMode(RoundingMode.HALF_UP);
+
+ double usedPerc = ((double) scmUsed / capacity) * 100;
+ usedPerc = usedPerc > 100.0 ? 100.0 : usedPerc;
+ double nonUsedPerc = ((double) scmNonUsed / capacity) * 100;
+ nonUsedPerc = nonUsedPerc > 100.0 ? 100.0 : nonUsedPerc;
+ usedPercentage = decimalFormat.format(usedPerc);
+ nonUsedPercentage = decimalFormat.format(nonUsedPerc);
+ }
+
+ storagePercentage[0] = usedPercentage;
+ storagePercentage[1] = nonUsedPercentage;
+ return storagePercentage;
+ }
+
/**
* Based on the current time and the last heartbeat, calculate the time difference
* and get a string of the relative value. E.g. "2s ago", "1m 2s ago", etc.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableRatisContainerProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableRatisContainerProvider.java
index f9fc651f2fa..99a58f690c2 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableRatisContainerProvider.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableRatisContainerProvider.java
@@ -32,7 +32,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.annotation.Nullable;
+import jakarta.annotation.Nullable;
import java.io.IOException;
import java.util.List;
import java.util.stream.Collectors;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/CapacityPipelineChoosePolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/CapacityPipelineChoosePolicy.java
new file mode 100644
index 00000000000..a95a473de6d
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/CapacityPipelineChoosePolicy.java
@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.pipeline.choose.algorithms;
+
+import org.apache.hadoop.hdds.scm.PipelineChoosePolicy;
+import org.apache.hadoop.hdds.scm.PipelineRequestInformation;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.Deque;
+import java.util.List;
+import java.util.Objects;
+
+/**
+ * Pipeline choose policy that randomly choose pipeline with relatively
+ * lower utilization.
+ *
+ * The Algorithm is as follows, Pick 2 random pipelines from a given pool of
+ * pipelines and then pick the pipeline which has lower utilization.
+ * This leads to a higher probability of pipelines with lower utilization
+ * to be picked.
+ *
+ * For those wondering why we choose two pipelines randomly and choose the
+ * pipeline with lower utilization. There are links to this original papers in
+ * HDFS-11564.
+ * Also, the same algorithm applies to SCMContainerPlacementCapacity.
+ *
+ */
+public class CapacityPipelineChoosePolicy implements PipelineChoosePolicy {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(PipelineChoosePolicy.class);
+
+ private NodeManager nodeManager;
+
+ private final PipelineChoosePolicy healthPolicy;
+
+ public CapacityPipelineChoosePolicy() {
+ healthPolicy = new HealthyPipelineChoosePolicy();
+ }
+
+ @Override
+ public PipelineChoosePolicy init(final NodeManager scmNodeManager) {
+ this.nodeManager = scmNodeManager;
+ return this;
+ }
+
+ @Override
+ public Pipeline choosePipeline(List pipelineList,
+ PipelineRequestInformation pri) {
+ Pipeline pipeline1 = healthPolicy.choosePipeline(pipelineList, pri);
+ Pipeline pipeline2 = healthPolicy.choosePipeline(pipelineList, pri);
+
+ int result = new CapacityPipelineComparator(this)
+ .compare(pipeline1, pipeline2);
+
+ LOG.debug("Chosen the {} pipeline", result <= 0 ? "first" : "second");
+ return result <= 0 ? pipeline1 : pipeline2;
+ }
+
+ @Override
+ public int choosePipelineIndex(List pipelineList,
+ PipelineRequestInformation pri) {
+ List mutableList = new ArrayList<>(pipelineList);
+ Pipeline pipeline = choosePipeline(mutableList, pri);
+ return pipelineList.indexOf(pipeline);
+ }
+
+ /**
+ * Return a list of SCMNodeMetrics corresponding to the DataNodes in the
+ * pipeline, sorted in descending order based on scm used storage.
+ * @param pipeline pipeline
+ * @return sorted SCMNodeMetrics corresponding the pipeline
+ */
+ private Deque getSortedNodeFromPipeline(Pipeline pipeline) {
+ Deque sortedNodeStack = new ArrayDeque<>();
+ pipeline.getNodes().stream()
+ .map(nodeManager::getNodeStat)
+ .filter(Objects::nonNull)
+ .sorted()
+ .forEach(sortedNodeStack::push);
+ return sortedNodeStack;
+ }
+
+ static class CapacityPipelineComparator implements Comparator {
+ private final CapacityPipelineChoosePolicy policy;
+
+ CapacityPipelineComparator(CapacityPipelineChoosePolicy policy) {
+ this.policy = policy;
+ }
+ @Override
+ public int compare(Pipeline p1, Pipeline p2) {
+ if (p1.getId().equals(p2.getId())) {
+ LOG.debug("Compare the same pipeline {}", p1);
+ return 0;
+ }
+ Deque sortedNodes1 = policy.getSortedNodeFromPipeline(p1);
+ Deque sortedNodes2 = policy.getSortedNodeFromPipeline(p2);
+
+ // Compare the scmUsed weight of the node in the two sorted node stacks
+ LOG.debug("Compare scmUsed weight in pipelines, first : {}, second : {}",
+ sortedNodes1, sortedNodes2);
+ int result = 0;
+ int count = 0;
+ while (result == 0 &&
+ !sortedNodes1.isEmpty() && !sortedNodes2.isEmpty()) {
+ count++;
+ LOG.debug("Compare {} round", count);
+ result = sortedNodes1.pop().compareTo(sortedNodes2.pop());
+ }
+ return result;
+ }
+ }
+
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java
index d040dbe2bca..90736a01813 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java
@@ -22,6 +22,7 @@
import org.apache.hadoop.hdds.scm.PipelineChoosePolicy;
import org.apache.hadoop.hdds.scm.ScmConfig;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -48,14 +49,14 @@ public final class PipelineChoosePolicyFactory {
private PipelineChoosePolicyFactory() {
}
- public static PipelineChoosePolicy getPolicy(
+ public static PipelineChoosePolicy getPolicy(final NodeManager nodeManager,
ScmConfig scmConfig, boolean forEC) throws SCMException {
Class extends PipelineChoosePolicy> policyClass = null;
String policyName = forEC ? scmConfig.getECPipelineChoosePolicyName() :
scmConfig.getPipelineChoosePolicyName();
try {
policyClass = getClass(policyName, PipelineChoosePolicy.class);
- return createPipelineChoosePolicyFromClass(policyClass);
+ return createPipelineChoosePolicyFromClass(nodeManager, policyClass);
} catch (Exception e) {
Class extends PipelineChoosePolicy> defaultPolicy = forEC ?
OZONE_SCM_EC_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT :
@@ -64,13 +65,14 @@ public static PipelineChoosePolicy getPolicy(
LOG.error("Met an exception while create pipeline choose policy "
+ "for the given class {}. Fallback to the default pipeline "
+ " choose policy {}", policyName, defaultPolicy, e);
- return createPipelineChoosePolicyFromClass(defaultPolicy);
+ return createPipelineChoosePolicyFromClass(nodeManager, defaultPolicy);
}
throw e;
}
}
private static PipelineChoosePolicy createPipelineChoosePolicyFromClass(
+ final NodeManager nodeManager,
Class extends PipelineChoosePolicy> policyClass) throws SCMException {
Constructor extends PipelineChoosePolicy> constructor;
try {
@@ -86,7 +88,7 @@ private static PipelineChoosePolicy createPipelineChoosePolicyFromClass(
}
try {
- return constructor.newInstance();
+ return constructor.newInstance().init(nodeManager);
} catch (Exception e) {
throw new RuntimeException("Failed to instantiate class " +
policyClass.getCanonicalName() + " for " + e.getMessage());
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
index 7738d0e3907..f402b9309fe 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.TransferLeadershipRequestProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.TransferLeadershipResponseProto;
@@ -51,6 +52,9 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerReplicasRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerReplicasResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainersOnDecomNodeRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainersOnDecomNodeProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainersOnDecomNodeResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerTokenRequestProto;
@@ -92,6 +96,8 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationRequest;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse.Status;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SingleNodeQueryResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SingleNodeQueryRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartMaintenanceNodesRequestProto;
@@ -120,6 +126,7 @@
import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics;
import org.apache.hadoop.ozone.ClientVersion;
import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages;
+import org.apache.hadoop.util.ProtobufUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -460,6 +467,13 @@ public ScmContainerLocationResponse processRequest(
.setNodeQueryResponse(queryNode(request.getNodeQueryRequest(),
request.getVersion()))
.build();
+ case SingleNodeQuery:
+ return ScmContainerLocationResponse.newBuilder()
+ .setCmdType(request.getCmdType())
+ .setStatus(Status.OK)
+ .setSingleNodeQueryResponse(querySingleNode(request
+ .getSingleNodeQueryRequest()))
+ .build();
case CloseContainer:
return ScmContainerLocationResponse.newBuilder()
.setCmdType(request.getCmdType())
@@ -604,6 +618,12 @@ public ScmContainerLocationResponse processRequest(
.setDecommissionNodesResponse(decommissionNodes(
request.getDecommissionNodesRequest()))
.build();
+ case GetContainersOnDecomNode:
+ return ScmContainerLocationResponse.newBuilder()
+ .setCmdType(request.getCmdType())
+ .setStatus(Status.OK)
+ .setGetContainersOnDecomNodeResponse(getContainersOnDecomNode(request.getGetContainersOnDecomNodeRequest()))
+ .build();
case RecommissionNodes:
return ScmContainerLocationResponse.newBuilder()
.setCmdType(request.getCmdType())
@@ -866,6 +886,16 @@ public NodeQueryResponseProto queryNode(
.build();
}
+ public SingleNodeQueryResponseProto querySingleNode(
+ SingleNodeQueryRequestProto request)
+ throws IOException {
+
+ HddsProtos.Node datanode = impl.queryNode(ProtobufUtils.fromProtobuf(request.getUuid()));
+ return SingleNodeQueryResponseProto.newBuilder()
+ .setDatanode(datanode)
+ .build();
+ }
+
public SCMCloseContainerResponseProto closeContainer(
SCMCloseContainerRequestProto request)
throws IOException {
@@ -1140,6 +1170,22 @@ public DecommissionNodesResponseProto decommissionNodes(
return response.build();
}
+ public GetContainersOnDecomNodeResponseProto getContainersOnDecomNode(GetContainersOnDecomNodeRequestProto request)
+ throws IOException {
+ Map> containerMap = impl.getContainersOnDecomNode(
+ DatanodeDetails.getFromProtoBuf(request.getDatanodeDetails()));
+ List containersProtoList = new ArrayList<>();
+ for (Map.Entry> containerList : containerMap.entrySet()) {
+ List containerIdsProto = new ArrayList<>();
+ for (ContainerID id : containerList.getValue()) {
+ containerIdsProto.add(id.getProtobuf());
+ }
+ containersProtoList.add(ContainersOnDecomNodeProto.newBuilder().setName(containerList.getKey())
+ .addAllId(containerIdsProto).build());
+ }
+ return GetContainersOnDecomNodeResponseProto.newBuilder().addAllContainersOnDecomNode(containersProtoList).build();
+ }
+
public RecommissionNodesResponseProto recommissionNodes(
RecommissionNodesRequestProto request) throws IOException {
List errors =
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/SecretKeyManagerService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/SecretKeyManagerService.java
index c1f52914f4f..6b77350cc8c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/SecretKeyManagerService.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/SecretKeyManagerService.java
@@ -61,7 +61,6 @@ public class SecretKeyManagerService implements SCMService, Runnable {
private final ScheduledExecutorService scheduler;
- @SuppressWarnings("parameternumber")
public SecretKeyManagerService(SCMContext scmContext,
ConfigurationSource conf,
SCMRatisServer ratisServer) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ContainerReportQueue.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ContainerReportQueue.java
index bffddff87b3..2748f9c3a49 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ContainerReportQueue.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ContainerReportQueue.java
@@ -32,8 +32,8 @@
import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode;
import org.apache.hadoop.hdds.server.events.FixedThreadPoolWithAffinityExecutor.IQueueMetrics;
import org.apache.hadoop.util.Time;
-import org.jetbrains.annotations.NotNull;
-import org.jetbrains.annotations.Nullable;
+import jakarta.annotation.Nonnull;
+import jakarta.annotation.Nullable;
/**
* Customized queue to handle FCR and ICR from datanode optimally,
@@ -159,7 +159,7 @@ private ContainerReport getReport(String uuid) {
return null;
}
- public boolean addValue(@NotNull ContainerReport value) {
+ public boolean addValue(@Nonnull ContainerReport value) {
synchronized (this) {
if (remainingCapacity() == 0) {
return false;
@@ -177,7 +177,7 @@ public boolean addValue(@NotNull ContainerReport value) {
}
@Override
- public boolean add(@NotNull ContainerReport value) {
+ public boolean add(@Nonnull ContainerReport value) {
Objects.requireNonNull(value);
synchronized (this) {
if (remainingCapacity() == 0) {
@@ -189,7 +189,7 @@ public boolean add(@NotNull ContainerReport value) {
}
@Override
- public boolean offer(@NotNull ContainerReport value) {
+ public boolean offer(@Nonnull ContainerReport value) {
Objects.requireNonNull(value);
synchronized (this) {
return addValue(value);
@@ -229,7 +229,7 @@ public ContainerReport peek() {
}
@Override
- public void put(@NotNull ContainerReport value) throws InterruptedException {
+ public void put(@Nonnull ContainerReport value) throws InterruptedException {
Objects.requireNonNull(value);
while (!addValue(value)) {
Thread.currentThread().sleep(10);
@@ -238,7 +238,7 @@ public void put(@NotNull ContainerReport value) throws InterruptedException {
@Override
public boolean offer(ContainerReport value, long timeout,
- @NotNull TimeUnit unit) throws InterruptedException {
+ @Nonnull TimeUnit unit) throws InterruptedException {
Objects.requireNonNull(value);
long timeoutMillis = unit.toMillis(timeout);
while (timeoutMillis > 0) {
@@ -253,7 +253,7 @@ public boolean offer(ContainerReport value, long timeout,
return false;
}
- @NotNull
+ @Nonnull
@Override
public ContainerReport take() throws InterruptedException {
String uuid = orderingQueue.take();
@@ -264,7 +264,7 @@ public ContainerReport take() throws InterruptedException {
@Nullable
@Override
- public ContainerReport poll(long timeout, @NotNull TimeUnit unit)
+ public ContainerReport poll(long timeout, @Nonnull TimeUnit unit)
throws InterruptedException {
String uuid = orderingQueue.poll(timeout, unit);
synchronized (this) {
@@ -286,25 +286,25 @@ public boolean remove(Object o) {
}
@Override
- public boolean containsAll(@NotNull Collection> c) {
+ public boolean containsAll(@Nonnull Collection> c) {
// no need support this
throw new UnsupportedOperationException("not supported");
}
@Override
- public boolean addAll(@NotNull Collection extends ContainerReport> c) {
+ public boolean addAll(@Nonnull Collection extends ContainerReport> c) {
// no need support this
throw new UnsupportedOperationException("not supported");
}
@Override
- public boolean removeAll(@NotNull Collection> c) {
+ public boolean removeAll(@Nonnull Collection> c) {
// no need support this
throw new UnsupportedOperationException("not supported");
}
@Override
- public boolean retainAll(@NotNull Collection> c) {
+ public boolean retainAll(@Nonnull Collection> c) {
// no need support this
throw new UnsupportedOperationException("not supported");
}
@@ -336,35 +336,35 @@ public boolean contains(Object o) {
throw new UnsupportedOperationException("not supported");
}
- @NotNull
+ @Nonnull
@Override
public Iterator iterator() {
// no need support this
throw new UnsupportedOperationException("not supported");
}
- @NotNull
+ @Nonnull
@Override
public Object[] toArray() {
// no need support this
throw new UnsupportedOperationException("not supported");
}
- @NotNull
+ @Nonnull
@Override
- public T[] toArray(@NotNull T[] a) {
+ public T[] toArray(@Nonnull T[] a) {
// no need support this
throw new UnsupportedOperationException("not supported");
}
@Override
- public int drainTo(@NotNull Collection super ContainerReport> c) {
+ public int drainTo(@Nonnull Collection super ContainerReport> c) {
// no need support this
throw new UnsupportedOperationException("not supported");
}
@Override
- public int drainTo(@NotNull Collection super ContainerReport> c,
+ public int drainTo(@Nonnull Collection super ContainerReport> c,
int maxElements) {
// no need support this
throw new UnsupportedOperationException("not supported");
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index 3d38fdbe819..13bef8590b7 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -109,6 +109,7 @@
import java.util.TreeSet;
import java.util.stream.Collectors;
import java.util.stream.Stream;
+import java.util.UUID;
import static org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StorageContainerLocationProtocolService.newReflectiveBlockingService;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_DEFAULT;
@@ -587,6 +588,15 @@ public void deleteContainer(long containerID) throws IOException {
}
}
+ @Override
+ public Map> getContainersOnDecomNode(DatanodeDetails dn) throws IOException {
+ try {
+ return scm.getScmDecommissionManager().getContainersReplicatedOnNode(dn);
+ } catch (NodeNotFoundException e) {
+ throw new IOException("Failed to get containers list. Unable to find required node", e);
+ }
+ }
+
@Override
public List queryNode(
HddsProtos.NodeOperationalState opState, HddsProtos.NodeState state,
@@ -613,6 +623,27 @@ public List queryNode(
return result;
}
+ @Override
+ public HddsProtos.Node queryNode(UUID uuid)
+ throws IOException {
+ HddsProtos.Node result = null;
+ try {
+ DatanodeDetails node = scm.getScmNodeManager().getNodeByUuid(uuid);
+ if (node != null) {
+ NodeStatus ns = scm.getScmNodeManager().getNodeStatus(node);
+ result = HddsProtos.Node.newBuilder()
+ .setNodeID(node.getProtoBufMessage())
+ .addNodeStates(ns.getHealth())
+ .addNodeOperationalStates(ns.getOperationalState())
+ .build();
+ }
+ } catch (NodeNotFoundException e) {
+ throw new IOException(
+ "An unexpected error occurred querying the NodeStatus", e);
+ }
+ return result;
+ }
+
@Override
public List decommissionNodes(List nodes)
throws IOException {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
index 38db618ef53..b6dc6f599bd 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
@@ -130,8 +130,7 @@ public List dispatch(SCMHeartbeatRequestProto heartbeat) {
commandQueueReport = heartbeat.getCommandQueueReport();
}
// should we dispatch heartbeat through eventPublisher?
- commands = nodeManager.processHeartbeat(datanodeDetails,
- layoutVersion, commandQueueReport);
+ commands = nodeManager.processHeartbeat(datanodeDetails, commandQueueReport);
if (heartbeat.hasNodeReport()) {
LOG.debug("Dispatching Node Report.");
eventPublisher.fireEvent(
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java
index bad326cad1e..40431330d11 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java
@@ -83,7 +83,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.annotation.Nullable;
+import jakarta.annotation.Nullable;
import static org.apache.hadoop.hdds.scm.ScmUtils.checkIfCertSignRequestAllowed;
import static org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator.CERTIFICATE_ID;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 1a3ea2515f2..11fdc0d16d7 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -450,7 +450,6 @@ private StorageContainerManager(OzoneConfiguration conf,
moveManager = new MoveManager(replicationManager, containerManager);
containerReplicaPendingOps.registerSubscriber(moveManager);
containerBalancer = new ContainerBalancer(this);
- LOG.info(containerBalancer.toString());
// Emit initial safe mode status, as now handlers are registered.
scmSafeModeManager.emitSafeModeStatus();
@@ -804,9 +803,9 @@ private void initializeSystemManagers(OzoneConfiguration conf,
ScmConfig scmConfig = conf.getObject(ScmConfig.class);
pipelineChoosePolicy = PipelineChoosePolicyFactory
- .getPolicy(scmConfig, false);
+ .getPolicy(scmNodeManager, scmConfig, false);
ecPipelineChoosePolicy = PipelineChoosePolicyFactory
- .getPolicy(scmConfig, true);
+ .getPolicy(scmNodeManager, scmConfig, true);
if (configurator.getWritableContainerFactory() != null) {
writableContainerFactory = configurator.getWritableContainerFactory();
} else {
diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
index 214a2ad7868..fdd8de15b6a 100644
--- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
+++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
@@ -48,6 +48,10 @@ Node Status
| HostName |
+ Used Space Percent |
+ Capacity |
Operational State |
element.key === "USEDSPACEPERCENT").value,
+ capacity: value && value.find((element) => element.key === "CAPACITY").value,
comstate: value && value.find((element) => element.key === "COMSTATE").value,
lastheartbeat: value && value.find((element) => element.key === "LASTHEARTBEAT").value,
port: portSpec.port,
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
index 6c651cbfacd..634a723f289 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
@@ -37,7 +37,9 @@
import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
+import java.io.File;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
@@ -72,9 +74,9 @@ public class TestSCMCommonPlacementPolicy {
private OzoneConfiguration conf;
@BeforeEach
- public void setup() {
+ void setup(@TempDir File testDir) {
nodeManager = new MockNodeManager(true, 10);
- conf = SCMTestUtils.getConf();
+ conf = SCMTestUtils.getConf(testDir);
}
@Test
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
index 0aa2aacf9d1..754fab6d1b1 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
@@ -24,7 +24,6 @@
import java.net.URL;
import java.net.URLConnection;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.server.StorageContainerManagerHttpServer;
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
@@ -33,10 +32,10 @@
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.EnumSource;
@@ -46,8 +45,8 @@
* Test http server os SCM with various HTTP option.
*/
public class TestStorageContainerManagerHttpServer {
- private static final String BASEDIR = GenericTestUtils
- .getTempPath(TestStorageContainerManagerHttpServer.class.getSimpleName());
+ @TempDir
+ private static File baseDir;
private static String keystoresDir;
private static String sslConfDir;
private static OzoneConfiguration conf;
@@ -55,12 +54,10 @@ public class TestStorageContainerManagerHttpServer {
@BeforeAll
public static void setUp() throws Exception {
- File base = new File(BASEDIR);
- FileUtil.fullyDelete(base);
- File ozoneMetadataDirectory = new File(BASEDIR, "metadata");
+ File ozoneMetadataDirectory = new File(baseDir, "metadata");
ozoneMetadataDirectory.mkdirs();
conf = new OzoneConfiguration();
- keystoresDir = new File(BASEDIR).getAbsolutePath();
+ keystoresDir = baseDir.getAbsolutePath();
sslConfDir = KeyStoreTestUtil.getClasspathDir(
TestStorageContainerManagerHttpServer.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
@@ -77,7 +74,6 @@ public static void setUp() throws Exception {
@AfterAll
public static void tearDown() throws Exception {
connectionFactory.destroy();
- FileUtil.fullyDelete(new File(BASEDIR));
KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
index 9d852a15446..6438b6f8d49 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
@@ -17,10 +17,10 @@
package org.apache.hadoop.hdds.scm.block;
+import java.io.File;
import java.io.IOException;
import java.time.Clock;
import java.time.ZoneId;
-import java.nio.file.Path;
import java.time.ZoneOffset;
import java.util.List;
import java.util.Map;
@@ -83,7 +83,6 @@
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.fail;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
@@ -114,14 +113,13 @@ public class TestBlockManager {
private ReplicationConfig replicationConfig;
@BeforeEach
- public void setUp(@TempDir Path tempDir) throws Exception {
- conf = SCMTestUtils.getConf();
+ void setUp(@TempDir File tempDir) throws Exception {
+ conf = SCMTestUtils.getConf(tempDir);
numContainerPerOwnerInPipeline = conf.getInt(
ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT,
ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT);
- conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempDir.toString());
conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false);
conf.setTimeDuration(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL, 5,
TimeUnit.SECONDS);
@@ -249,7 +247,7 @@ public void testAllocateBlockWithExclusion() throws Exception {
}
@Test
- public void testAllocateBlockInParallel() {
+ void testAllocateBlockInParallel() throws Exception {
int threadCount = 20;
List executors = new ArrayList<>(threadCount);
for (int i = 0; i < threadCount; i++) {
@@ -273,17 +271,14 @@ public void testAllocateBlockInParallel() {
}, executors.get(i));
futureList.add(future);
}
- try {
- CompletableFuture
- .allOf(futureList.toArray(new CompletableFuture[futureList.size()]))
- .get();
- } catch (Exception e) {
- fail("testAllocateBlockInParallel failed");
- }
+
+ CompletableFuture
+ .allOf(futureList.toArray(new CompletableFuture[futureList.size()]))
+ .get();
}
@Test
- public void testBlockDistribution() throws Exception {
+ void testBlockDistribution() throws Exception {
int threadCount = numContainerPerOwnerInPipeline *
numContainerPerOwnerInPipeline;
nodeManager.setNumPipelinePerDatanode(1);
@@ -323,24 +318,19 @@ public void testBlockDistribution() throws Exception {
}, executors.get(i));
futureList.add(future);
}
- try {
- CompletableFuture.allOf(futureList.toArray(
- new CompletableFuture[0])).get();
-
- assertEquals(1, pipelineManager.getPipelines(replicationConfig).size());
- assertEquals(numContainerPerOwnerInPipeline, allocatedBlockMap.size());
- assertEquals(numContainerPerOwnerInPipeline, allocatedBlockMap.values().size());
- allocatedBlockMap.values().forEach(v -> {
- assertEquals(numContainerPerOwnerInPipeline, v.size());
- });
- } catch (Exception e) {
- fail("testAllocateBlockInParallel failed");
- }
+ CompletableFuture.allOf(futureList.toArray(new CompletableFuture[0])).get();
+
+ assertEquals(1, pipelineManager.getPipelines(replicationConfig).size());
+ assertEquals(numContainerPerOwnerInPipeline, allocatedBlockMap.size());
+ assertEquals(numContainerPerOwnerInPipeline, allocatedBlockMap.values().size());
+ allocatedBlockMap.values().forEach(v -> {
+ assertEquals(numContainerPerOwnerInPipeline, v.size());
+ });
}
@Test
- public void testBlockDistributionWithMultipleDisks() throws Exception {
+ void testBlockDistributionWithMultipleDisks() throws Exception {
int threadCount = numContainerPerOwnerInPipeline *
numContainerPerOwnerInPipeline;
nodeManager.setNumHealthyVolumes(numContainerPerOwnerInPipeline);
@@ -381,30 +371,26 @@ public void testBlockDistributionWithMultipleDisks() throws Exception {
}, executors.get(i));
futureList.add(future);
}
- try {
- CompletableFuture
- .allOf(futureList.toArray(
- new CompletableFuture[futureList.size()])).get();
- assertEquals(1,
- pipelineManager.getPipelines(replicationConfig).size());
- Pipeline pipeline =
- pipelineManager.getPipelines(replicationConfig).get(0);
- // total no of containers to be created will be number of healthy
- // volumes * number of numContainerPerOwnerInPipeline which is equal to
- // the thread count
- assertEquals(threadCount, pipelineManager.getNumberOfContainers(pipeline.getId()));
- assertEquals(threadCount, allocatedBlockMap.size());
- assertEquals(threadCount, allocatedBlockMap.values().size());
- allocatedBlockMap.values().forEach(v -> {
- assertEquals(1, v.size());
- });
- } catch (Exception e) {
- fail("testAllocateBlockInParallel failed");
- }
+ CompletableFuture
+ .allOf(futureList.toArray(
+ new CompletableFuture[futureList.size()])).get();
+ assertEquals(1,
+ pipelineManager.getPipelines(replicationConfig).size());
+ Pipeline pipeline =
+ pipelineManager.getPipelines(replicationConfig).get(0);
+ // total no of containers to be created will be number of healthy
+ // volumes * number of numContainerPerOwnerInPipeline which is equal to
+ // the thread count
+ assertEquals(threadCount, pipelineManager.getNumberOfContainers(pipeline.getId()));
+ assertEquals(threadCount, allocatedBlockMap.size());
+ assertEquals(threadCount, allocatedBlockMap.values().size());
+ allocatedBlockMap.values().forEach(v -> {
+ assertEquals(1, v.size());
+ });
}
@Test
- public void testBlockDistributionWithMultipleRaftLogDisks() throws Exception {
+ void testBlockDistributionWithMultipleRaftLogDisks() throws Exception {
int threadCount = numContainerPerOwnerInPipeline *
numContainerPerOwnerInPipeline;
int numMetaDataVolumes = 2;
@@ -446,25 +432,20 @@ public void testBlockDistributionWithMultipleRaftLogDisks() throws Exception {
}, executors.get(i));
futureList.add(future);
}
- try {
- CompletableFuture
- .allOf(futureList.toArray(
- new CompletableFuture[futureList.size()])).get();
- assertEquals(1,
- pipelineManager.getPipelines(replicationConfig).size());
- Pipeline pipeline =
- pipelineManager.getPipelines(replicationConfig).get(0);
- // the pipeline per raft log disk config is set to 1 by default
- int numContainers = (int)Math.ceil((double)
- (numContainerPerOwnerInPipeline *
- numContainerPerOwnerInPipeline) / numMetaDataVolumes);
- assertEquals(numContainers, pipelineManager.
- getNumberOfContainers(pipeline.getId()));
- assertEquals(numContainers, allocatedBlockMap.size());
- assertEquals(numContainers, allocatedBlockMap.values().size());
- } catch (Exception e) {
- fail("testAllocateBlockInParallel failed");
- }
+ CompletableFuture
+ .allOf(futureList.toArray(
+ new CompletableFuture[futureList.size()])).get();
+ assertEquals(1,
+ pipelineManager.getPipelines(replicationConfig).size());
+ Pipeline pipeline =
+ pipelineManager.getPipelines(replicationConfig).get(0);
+ // the pipeline per raft log disk config is set to 1 by default
+ int numContainers = (int)Math.ceil((double)
+ (numContainerPerOwnerInPipeline *
+ numContainerPerOwnerInPipeline) / numMetaDataVolumes);
+ assertEquals(numContainers, pipelineManager.getNumberOfContainers(pipeline.getId()));
+ assertEquals(numContainers, allocatedBlockMap.size());
+ assertEquals(numContainers, allocatedBlockMap.values().size());
}
@Test
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
index be57aa8ea6a..03500529ff9 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdds.scm.block;
-import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
@@ -54,10 +54,10 @@
import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.IOException;
@@ -70,7 +70,6 @@
import java.util.List;
import java.util.Map;
import java.util.Optional;
-import java.util.Random;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
@@ -81,7 +80,7 @@
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
@@ -95,6 +94,7 @@ public class TestDeletedBlockLog {
private DeletedBlockLogImpl deletedBlockLog;
private static final int BLOCKS_PER_TXN = 5;
private OzoneConfiguration conf;
+ @TempDir
private File testDir;
private ContainerManager containerManager;
private Table containerTable;
@@ -111,8 +111,6 @@ public class TestDeletedBlockLog {
@BeforeEach
public void setup() throws Exception {
- testDir = GenericTestUtils.getTestDir(
- TestDeletedBlockLog.class.getSimpleName());
conf = new OzoneConfiguration();
conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20);
@@ -128,12 +126,9 @@ public void setup() throws Exception {
new SCMHADBTransactionBufferStub(scm.getScmMetadataStore().getStore());
metrics = mock(ScmBlockDeletingServiceMetrics.class);
deletedBlockLog = new DeletedBlockLogImpl(conf,
+ scm,
containerManager,
- scm.getScmHAManager().getRatisServer(),
- scm.getScmMetadataStore().getDeletedBlocksTXTable(),
scmHADBTransactionBuffer,
- scm.getScmContext(),
- scm.getSequenceIdGen(),
metrics);
dnList = new ArrayList<>(3);
setupContainerManager();
@@ -208,7 +203,6 @@ public void tearDown() throws Exception {
deletedBlockLog.close();
scm.stop();
scm.join();
- FileUtils.deleteDirectory(testDir);
}
private Map> generateData(int dataSize) throws IOException {
@@ -218,9 +212,8 @@ private Map> generateData(int dataSize) throws IOException {
private Map> generateData(int dataSize,
HddsProtos.LifeCycleState state) throws IOException {
Map> blockMap = new HashMap<>();
- Random random = new Random(1);
- int continerIDBase = random.nextInt(100);
- int localIDBase = random.nextInt(1000);
+ int continerIDBase = RandomUtils.nextInt(0, 100);
+ int localIDBase = RandomUtils.nextInt(0, 1000);
for (int i = 0; i < dataSize; i++) {
long containerID = continerIDBase + i;
updateContainerMetadata(containerID, state);
@@ -692,13 +685,12 @@ public void testInadequateReplicaCommit() throws Exception {
@Test
public void testRandomOperateTransactions() throws Exception {
mockContainerHealthResult(true);
- Random random = new Random();
int added = 0, committed = 0;
List blocks = new ArrayList<>();
List txIDs;
// Randomly add/get/commit/increase transactions.
for (int i = 0; i < 100; i++) {
- int state = random.nextInt(4);
+ int state = RandomUtils.nextInt(0, 4);
if (state == 0) {
addTransactions(generateData(10), true);
added += 10;
@@ -736,12 +728,9 @@ public void testPersistence() throws Exception {
// transactions are stored persistently.
deletedBlockLog.close();
deletedBlockLog = new DeletedBlockLogImpl(conf,
+ scm,
containerManager,
- scm.getScmHAManager().getRatisServer(),
- scm.getScmMetadataStore().getDeletedBlocksTXTable(),
scmHADBTransactionBuffer,
- scm.getScmContext(),
- scm.getSequenceIdGen(),
metrics);
List blocks =
getTransactions(10 * BLOCKS_PER_TXN * THREE);
@@ -755,12 +744,9 @@ public void testPersistence() throws Exception {
// currentTxnID = 50
deletedBlockLog.close();
new DeletedBlockLogImpl(conf,
+ scm,
containerManager,
- scm.getScmHAManager().getRatisServer(),
- scm.getScmMetadataStore().getDeletedBlocksTXTable(),
scmHADBTransactionBuffer,
- scm.getScmContext(),
- scm.getSequenceIdGen(),
metrics);
blocks = getTransactions(40 * BLOCKS_PER_TXN * THREE);
assertEquals(0, blocks.size());
@@ -803,8 +789,7 @@ public void testDeletedBlockTransactions()
// add two transactions for same container
containerID = blocks.get(0).getContainerID();
Map> deletedBlocksMap = new HashMap<>();
- Random random = new Random();
- long localId = random.nextLong();
+ long localId = RandomUtils.nextLong();
deletedBlocksMap.put(containerID, new LinkedList<>(
Collections.singletonList(localId)));
addTransactions(deletedBlocksMap, true);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestSCMBlockDeletingService.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestSCMBlockDeletingService.java
index 3bd7ad00f6a..1fd6a2277c7 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestSCMBlockDeletingService.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestSCMBlockDeletingService.java
@@ -49,10 +49,10 @@
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyInt;
-import static org.mockito.ArgumentMatchers.anySet;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyInt;
+import static org.mockito.Mockito.anySet;
+import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index 794dedceef0..84f3684ab7c 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -772,13 +772,11 @@ private synchronized void addEntryTodnsToUuidMap(
* Send heartbeat to indicate the datanode is alive and doing well.
*
* @param datanodeDetails - Datanode ID.
- * @param layoutInfo - DataNode Layout info
* @param commandQueueReportProto - Command Queue Report Proto
* @return SCMheartbeat response list
*/
@Override
public List processHeartbeat(DatanodeDetails datanodeDetails,
- LayoutVersionProto layoutInfo,
CommandQueueReportProto commandQueueReportProto) {
return null;
}
@@ -786,7 +784,7 @@ public List processHeartbeat(DatanodeDetails datanodeDetails,
@Override
public Boolean isNodeRegistered(
DatanodeDetails datanodeDetails) {
- return false;
+ return healthyNodes.contains(datanodeDetails);
}
@Override
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
index 2bd13d4489e..9649159de3f 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
@@ -428,7 +428,6 @@ public RegisteredCommand register(DatanodeDetails datanodeDetails,
@Override
public List processHeartbeat(DatanodeDetails datanodeDetails,
- LayoutVersionProto layoutInfo,
CommandQueueReportProto commandQueueReportProto) {
return null;
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
index 97fda58163b..6891d316142 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
@@ -52,10 +52,10 @@
import static org.mockito.Mockito.when;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.verify;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyLong;
import static org.mockito.Mockito.atLeastOnce;
+import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
index a5150f3c952..25a4a80f233 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
@@ -21,11 +21,8 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
-import java.util.UUID;
import java.util.concurrent.TimeoutException;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -45,10 +42,10 @@
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
@@ -66,6 +63,7 @@
*/
public class TestContainerManagerImpl {
+ @TempDir
private File testDir;
private DBStore dbStore;
private ContainerManager containerManager;
@@ -75,11 +73,8 @@ public class TestContainerManagerImpl {
private ContainerReplicaPendingOps pendingOpsMock;
@BeforeEach
- public void setUp() throws Exception {
- final OzoneConfiguration conf = SCMTestUtils.getConf();
- testDir = GenericTestUtils.getTestDir(
- TestContainerManagerImpl.class.getSimpleName() + UUID.randomUUID());
- conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
+ void setUp() throws Exception {
+ final OzoneConfiguration conf = SCMTestUtils.getConf(testDir);
dbStore = DBStoreBuilder.createDBStore(
conf, new SCMDBDefinition());
scmhaManager = SCMHAManagerStub.getInstance(true);
@@ -105,8 +100,6 @@ public void cleanup() throws Exception {
if (dbStore != null) {
dbStore.close();
}
-
- FileUtil.fullyDelete(testDir);
}
@Test
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
index 53512528a0d..695c88d11a3 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
@@ -16,8 +16,6 @@
*/
package org.apache.hadoop.hdds.scm.container;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -48,10 +46,10 @@
import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
-import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.IOException;
@@ -63,7 +61,6 @@
import java.util.List;
import java.util.Map;
import java.util.Set;
-import java.util.UUID;
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@@ -89,20 +86,17 @@ public class TestContainerReportHandler {
private ContainerManager containerManager;
private ContainerStateManager containerStateManager;
private EventPublisher publisher;
+ @TempDir
private File testDir;
private DBStore dbStore;
private SCMHAManager scmhaManager;
private PipelineManager pipelineManager;
@BeforeEach
- public void setup() throws IOException, InvalidStateTransitionException,
- TimeoutException {
- final OzoneConfiguration conf = SCMTestUtils.getConf();
+ void setup() throws IOException, InvalidStateTransitionException {
+ final OzoneConfiguration conf = SCMTestUtils.getConf(testDir);
nodeManager = new MockNodeManager(true, 10);
containerManager = mock(ContainerManager.class);
- testDir = GenericTestUtils.getTestDir(
- TestContainerReportHandler.class.getSimpleName() + UUID.randomUUID());
- conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
dbStore = DBStoreBuilder.createDBStore(
conf, new SCMDBDefinition());
scmhaManager = SCMHAManagerStub.getInstance(true);
@@ -165,8 +159,6 @@ public void tearDown() throws Exception {
if (dbStore != null) {
dbStore.close();
}
-
- FileUtil.fullyDelete(testDir);
}
private void testReplicaIndexUpdate(ContainerInfo container,
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
index c3dd608ab28..27505c6dd3b 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
@@ -23,10 +23,8 @@
import java.time.ZoneId;
import java.util.ArrayList;
import java.util.Set;
-import java.util.UUID;
import java.util.concurrent.TimeoutException;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -46,10 +44,10 @@
import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
-import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.mock;
@@ -64,6 +62,7 @@ public class TestContainerStateManager {
private ContainerStateManager containerStateManager;
private PipelineManager pipelineManager;
private SCMHAManager scmhaManager;
+ @TempDir
private File testDir;
private DBStore dbStore;
private Pipeline pipeline;
@@ -72,8 +71,6 @@ public class TestContainerStateManager {
public void init() throws IOException, TimeoutException {
OzoneConfiguration conf = new OzoneConfiguration();
scmhaManager = SCMHAManagerStub.getInstance(true);
- testDir = GenericTestUtils.getTestDir(
- TestContainerStateManager.class.getSimpleName() + UUID.randomUUID());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
dbStore = DBStoreBuilder.createDBStore(
conf, new SCMDBDefinition());
@@ -106,8 +103,6 @@ public void tearDown() throws Exception {
if (dbStore != null) {
dbStore.close();
}
-
- FileUtil.fullyDelete(testDir);
}
@Test
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
index 8cbfdd9c788..dbcccce598c 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hdds.scm.container;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
@@ -55,10 +54,10 @@
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
-import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.IOException;
@@ -70,7 +69,6 @@
import java.util.List;
import java.util.Map;
import java.util.Set;
-import java.util.UUID;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
@@ -106,6 +104,7 @@ public class TestIncrementalContainerReportHandler {
private HDDSLayoutVersionManager versionManager;
private SCMContext scmContext = SCMContext.emptyContext();
private PipelineManager pipelineManager;
+ @TempDir
private File testDir;
private DBStore dbStore;
private SCMHAManager scmhaManager;
@@ -114,9 +113,7 @@ public class TestIncrementalContainerReportHandler {
public void setup() throws IOException, InvalidStateTransitionException,
TimeoutException {
final OzoneConfiguration conf = new OzoneConfiguration();
- final String path =
- GenericTestUtils.getTempPath(UUID.randomUUID().toString());
- Path scmPath = Paths.get(path, "scm-meta");
+ Path scmPath = Paths.get(testDir.getPath(), "scm-meta");
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
this.containerManager = mock(ContainerManager.class);
NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
@@ -129,9 +126,6 @@ public void setup() throws IOException, InvalidStateTransitionException,
new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap,
scmContext, versionManager);
scmhaManager = SCMHAManagerStub.getInstance(true);
- testDir = GenericTestUtils.getTestDir(
- TestIncrementalContainerReportHandler.class.getSimpleName()
- + UUID.randomUUID());
dbStore = DBStoreBuilder.createDBStore(
conf, new SCMDBDefinition());
@@ -200,8 +194,6 @@ public void tearDown() throws Exception {
if (dbStore != null) {
dbStore.close();
}
-
- FileUtil.fullyDelete(testDir);
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java
index 72df033ae81..9ea4ea45b56 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java
@@ -27,10 +27,7 @@
import java.io.File;
import java.io.IOException;
import java.util.Iterator;
-import java.util.UUID;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
@@ -55,10 +52,10 @@
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
-import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
/**
* Test container deletion behaviour of unknown containers
@@ -71,18 +68,16 @@ public class TestUnknownContainerReport {
private ContainerStateManager containerStateManager;
private EventPublisher publisher;
private PipelineManager pipelineManager;
+ @TempDir
private File testDir;
private DBStore dbStore;
private SCMHAManager scmhaManager;
@BeforeEach
public void setup() throws IOException {
- final OzoneConfiguration conf = SCMTestUtils.getConf();
+ final OzoneConfiguration conf = SCMTestUtils.getConf(testDir);
this.nodeManager = new MockNodeManager(true, 10);
this.containerManager = mock(ContainerManager.class);
- testDir = GenericTestUtils.getTestDir(
- TestUnknownContainerReport.class.getSimpleName() + UUID.randomUUID());
- conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
dbStore = DBStoreBuilder.createDBStore(
conf, new SCMDBDefinition());
scmhaManager = SCMHAManagerStub.getInstance(true);
@@ -107,8 +102,6 @@ public void tearDown() throws Exception {
if (dbStore != null) {
dbStore.close();
}
-
- FileUtil.fullyDelete(testDir);
}
@Test
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java
index 3b1d4db0659..3bed3878123 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java
@@ -53,7 +53,6 @@
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.ozone.test.GenericTestUtils;
-import org.apache.ozone.test.tag.Unhealthy;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
@@ -1048,7 +1047,6 @@ public void checkIterationResultException()
stopBalancer();
}
- @Unhealthy("HDDS-8941")
@Test
public void testDelayedStart() throws InterruptedException, TimeoutException {
conf.setTimeDuration("hdds.scm.wait.time.after.safemode.exit", 10,
@@ -1066,7 +1064,7 @@ public void testDelayedStart() throws InterruptedException, TimeoutException {
This is the delay before it starts balancing.
*/
GenericTestUtils.waitFor(
- () -> balancingThread.getState() == Thread.State.TIMED_WAITING, 1, 20);
+ () -> balancingThread.getState() == Thread.State.TIMED_WAITING, 1, 40);
assertEquals(Thread.State.TIMED_WAITING,
balancingThread.getState());
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
index 03ba2c54845..34678a301eb 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
@@ -43,7 +43,7 @@
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN;
import static org.assertj.core.api.Assertions.assertThat;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
index 39e19135efa..cbe513eef82 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
@@ -19,9 +19,11 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
-import java.util.Random;
import java.util.stream.IntStream;
+import org.apache.commons.lang3.RandomUtils;
+import org.apache.commons.lang3.StringUtils;
+
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -48,8 +50,6 @@
import org.junit.jupiter.params.provider.MethodSource;
import org.junit.jupiter.params.provider.ValueSource;
-import org.apache.commons.lang3.StringUtils;
-
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONED;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
@@ -64,8 +64,8 @@
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assumptions.assumeTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -356,12 +356,11 @@ public void testNoFallback(int datanodeCount) {
setup(datanodeCount);
// 5 replicas. there are only 3 racks. policy prohibit fallback should fail.
int nodeNum = 5;
- try {
- policyNoFallback.chooseDatanodes(null, null, nodeNum, 0, 15);
- fail("Fallback prohibited, this call should fail");
- } catch (Exception e) {
- assertEquals("SCMException", e.getClass().getSimpleName());
- }
+ Exception e =
+ assertThrows(Exception.class,
+ () -> policyNoFallback.chooseDatanodes(null, null, nodeNum, 0, 15),
+ "Fallback prohibited, this call should fail");
+ assertEquals("SCMException", e.getClass().getSimpleName());
// get metrics
long totalRequest = metrics.getDatanodeRequestCount();
@@ -425,13 +424,12 @@ public void testNoInfiniteLoop(int datanodeCount) {
setup(datanodeCount);
int nodeNum = 1;
- try {
- // request storage space larger than node capability
- policy.chooseDatanodes(null, null, nodeNum, STORAGE_CAPACITY + 0, 15);
- fail("Storage requested exceeds capacity, this call should fail");
- } catch (Exception e) {
- assertEquals("SCMException", e.getClass().getSimpleName());
- }
+ // request storage space larger than node capability
+ Exception e =
+ assertThrows(Exception.class,
+ () -> policy.chooseDatanodes(null, null, nodeNum, STORAGE_CAPACITY + 0, 15),
+ "Storage requested exceeds capacity, this call should fail");
+ assertEquals("SCMException", e.getClass().getSimpleName());
// get metrics
long totalRequest = metrics.getDatanodeRequestCount();
@@ -625,7 +623,7 @@ public void testOutOfServiceNodesNotSelected(int datanodeCount) {
for (int i = 0; i < 10; i++) {
// Set a random DN to in_service and ensure it is always picked
- int index = new Random().nextInt(dnInfos.size());
+ int index = RandomUtils.nextInt(0, dnInfos.size());
dnInfos.get(index).setNodeStatus(NodeStatus.inServiceHealthy());
try {
List datanodeDetails =
@@ -830,12 +828,11 @@ public void testNoFallbackWithUsedNodes(int datanodeCount) {
// 5 replicas. there are only 3 racks. policy prohibit fallback should fail.
int nodeNum = 5;
- try {
- policyNoFallback.chooseDatanodes(usedNodes, null, null, nodeNum, 0, 15);
- fail("Fallback prohibited, this call should fail");
- } catch (Exception e) {
- assertEquals("SCMException", e.getClass().getSimpleName());
- }
+ Exception e =
+ assertThrows(Exception.class,
+ () -> policyNoFallback.chooseDatanodes(usedNodes, null, null, nodeNum, 0, 15),
+ "Fallback prohibited, this call should fail");
+ assertEquals("SCMException", e.getClass().getSimpleName());
// get metrics
long totalRequest = metrics.getDatanodeRequestCount();
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackScatter.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackScatter.java
index 5bf59b27b8c..faccfa67a58 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackScatter.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackScatter.java
@@ -70,7 +70,6 @@
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assumptions.assumeTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -456,14 +455,12 @@ public void chooseNodeWithFavoredNodes(int datanodeCount)
public void testNoInfiniteLoop(int datanodeCount) {
setup(datanodeCount);
int nodeNum = 1;
-
- try {
- // request storage space larger than node capability
- policy.chooseDatanodes(null, null, nodeNum, STORAGE_CAPACITY + 0, 15);
- fail("Storage requested exceeds capacity, this call should fail");
- } catch (Exception e) {
- assertEquals("SCMException", e.getClass().getSimpleName());
- }
+ // request storage space larger than node capability
+ Exception e =
+ assertThrows(Exception.class,
+ () -> policy.chooseDatanodes(null, null, nodeNum, STORAGE_CAPACITY + 0, 15),
+ "Storage requested exceeds capacity, this call should fail");
+ assertEquals("SCMException", e.getClass().getSimpleName());
// get metrics
long totalRequest = metrics.getDatanodeRequestCount();
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestDatanodeCommandCountUpdatedHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestDatanodeCommandCountUpdatedHandler.java
index 21f756f9a0b..29dd24f11ca 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestDatanodeCommandCountUpdatedHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestDatanodeCommandCountUpdatedHandler.java
@@ -24,7 +24,7 @@
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.eq;
/**
* Tests for DatanodeCommandCountUpdatedHandler.
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECContainerReplicaCount.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECContainerReplicaCount.java
index 6f320830334..ff0b838bd8b 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECContainerReplicaCount.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECContainerReplicaCount.java
@@ -26,7 +26,7 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
-import org.jetbrains.annotations.NotNull;
+import jakarta.annotation.Nonnull;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -524,7 +524,7 @@ public void testMissingNonMaintenanceReplicasPendingAdd() {
assertEquals(0, rcnt.unavailableIndexes(true).size());
}
- @NotNull
+ @Nonnull
private List getContainerReplicaOps(
List addIndexes, List deleteIndexes) {
List pending = new ArrayList<>();
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECMisReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECMisReplicationHandler.java
index b0a77f3a7c0..73f6edb468e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECMisReplicationHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECMisReplicationHandler.java
@@ -31,9 +31,11 @@
import org.apache.ratis.protocol.exceptions.NotLeaderException;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
+import java.io.File;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
@@ -62,10 +64,10 @@ public class TestECMisReplicationHandler extends TestMisReplicationHandler {
@BeforeEach
- public void setup() throws NodeNotFoundException,
+ void setup(@TempDir File testDir) throws NodeNotFoundException,
CommandTargetOverloadedException, NotLeaderException {
ECReplicationConfig repConfig = new ECReplicationConfig(DATA, PARITY);
- setup(repConfig);
+ setup(repConfig, testDir);
}
@ParameterizedTest
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECOverReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECOverReplicationHandler.java
index 7021b956250..50cead87e03 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECOverReplicationHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECOverReplicationHandler.java
@@ -41,8 +41,10 @@
import org.apache.ratis.protocol.exceptions.NotLeaderException;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import org.mockito.stubbing.Answer;
+import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
@@ -63,10 +65,10 @@
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
-import static org.junit.jupiter.api.Assertions.fail;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyBoolean;
-import static org.mockito.ArgumentMatchers.anyInt;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyBoolean;
+import static org.mockito.Mockito.anyInt;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.mockito.Mockito.doAnswer;
@@ -82,7 +84,7 @@ public class TestECOverReplicationHandler {
private Set>> commandsSent;
@BeforeEach
- public void setup() throws NodeNotFoundException, NotLeaderException,
+ void setup(@TempDir File testDir) throws NodeNotFoundException, NotLeaderException,
CommandTargetOverloadedException {
staleNode = null;
@@ -103,7 +105,7 @@ public void setup() throws NodeNotFoundException, NotLeaderException,
commandsSent);
NodeManager nodeManager = new MockNodeManager(true, 10);
- OzoneConfiguration conf = SCMTestUtils.getConf();
+ OzoneConfiguration conf = SCMTestUtils.getConf(testDir);
ECReplicationConfig repConfig = new ECReplicationConfig(3, 2);
container = ReplicationTestUtil
.createContainer(HddsProtos.LifeCycleState.CLOSED, repConfig);
@@ -311,13 +313,8 @@ public void testDeleteThrottling() throws IOException {
ECOverReplicationHandler ecORH =
new ECOverReplicationHandler(policy, replicationManager);
- try {
- ecORH.processAndSendCommands(availableReplicas, ImmutableList.of(),
- health, 1);
- fail("Expected CommandTargetOverloadedException");
- } catch (CommandTargetOverloadedException e) {
- // This is expected.
- }
+ assertThrows(CommandTargetOverloadedException.class,
+ () -> ecORH.processAndSendCommands(availableReplicas, ImmutableList.of(), health, 1));
assertEquals(1, commandsSent.size());
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java
index 0d09e26b27c..22c3630e0c6 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java
@@ -49,9 +49,11 @@
import org.assertj.core.util.Lists;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
+import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
@@ -86,16 +88,16 @@
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyBoolean;
-import static org.mockito.ArgumentMatchers.anyInt;
-import static org.mockito.ArgumentMatchers.anyList;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.ArgumentMatchers.isNull;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyBoolean;
+import static org.mockito.Mockito.anyInt;
+import static org.mockito.Mockito.anyList;
+import static org.mockito.Mockito.anyLong;
import static org.mockito.Mockito.clearInvocations;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.isNull;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.spy;
@@ -125,7 +127,7 @@ public class TestECUnderReplicationHandler {
= new AtomicBoolean(false);
@BeforeEach
- public void setup() throws NodeNotFoundException,
+ void setup(@TempDir File testDir) throws NodeNotFoundException,
CommandTargetOverloadedException, NotLeaderException {
nodeManager = new MockNodeManager(true, 10) {
@Override
@@ -159,7 +161,7 @@ public NodeStatus getNodeStatus(DatanodeDetails dd) {
replicationManager, commandsSent,
throwOverloadedExceptionOnReconstruction);
- conf = SCMTestUtils.getConf();
+ conf = SCMTestUtils.getConf(testDir);
repConfig = new ECReplicationConfig(DATA, PARITY);
container = createContainer(HddsProtos.LifeCycleState.CLOSED, repConfig);
policy = ReplicationTestUtil
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java
index 1dcf15ed65b..8aac64de702 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestLegacyReplicationManager.java
@@ -77,10 +77,12 @@
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Nested;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.EnumSource;
import org.junit.jupiter.params.provider.ValueSource;
+import java.io.File;
import java.io.IOException;
import java.time.Clock;
import java.time.Instant;
@@ -155,15 +157,18 @@ public class TestLegacyReplicationManager {
private DBStore dbStore;
private ContainerReplicaPendingOps containerReplicaPendingOps;
+ @TempDir
+ private File tempDir;
+
int getInflightCount(InflightType type) {
return replicationManager.getLegacyReplicationManager()
.getInflightCount(type);
}
@BeforeEach
- void setup() throws IOException, InterruptedException,
+ void setup(@TempDir File testDir) throws IOException, InterruptedException,
NodeNotFoundException, InvalidStateTransitionException {
- OzoneConfiguration conf = SCMTestUtils.getConf();
+ OzoneConfiguration conf = SCMTestUtils.getConf(testDir);
conf.setTimeDuration(
HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT,
0, TimeUnit.SECONDS);
@@ -260,10 +265,10 @@ private void createReplicationManager(ReplicationManagerConfiguration rmConf)
createReplicationManager(rmConf, null);
}
- void createReplicationManager(ReplicationManagerConfiguration rmConf,
+ private void createReplicationManager(ReplicationManagerConfiguration rmConf,
LegacyReplicationManagerConfiguration lrmConf)
throws InterruptedException, IOException {
- OzoneConfiguration config = SCMTestUtils.getConf();
+ OzoneConfiguration config = SCMTestUtils.getConf(tempDir);
config.setTimeDuration(
HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT,
0, TimeUnit.SECONDS);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestMisReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestMisReplicationHandler.java
index 571c79b4f52..7746b1db621 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestMisReplicationHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestMisReplicationHandler.java
@@ -38,6 +38,7 @@
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
import org.apache.ratis.protocol.exceptions.NotLeaderException;
+import java.io.File;
import java.io.IOException;
import java.util.HashSet;
import java.util.List;
@@ -56,15 +57,14 @@
import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA;
import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA;
import static org.assertj.core.api.Assertions.assertThat;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyInt;
+import static org.mockito.Mockito.anyList;
+import static org.mockito.Mockito.anyMap;
+import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.mockito.Mockito.anyLong;
-import static org.mockito.Mockito.any;
-import static org.mockito.ArgumentMatchers.anyInt;
-import static org.mockito.ArgumentMatchers.anyList;
-import static org.mockito.ArgumentMatchers.anyMap;
-import static org.mockito.ArgumentMatchers.eq;
/**
* Tests the MisReplicationHandling functionalities to test implementations.
@@ -79,10 +79,10 @@ public abstract class TestMisReplicationHandler {
new AtomicBoolean(false);
private ReplicationManagerMetrics metrics;
- protected void setup(ReplicationConfig repConfig)
+ protected void setup(ReplicationConfig repConfig, File testDir)
throws NodeNotFoundException, CommandTargetOverloadedException,
NotLeaderException {
- conf = SCMTestUtils.getConf();
+ conf = SCMTestUtils.getConf(testDir);
replicationManager = mock(ReplicationManager.class);
when(replicationManager.getNodeStatus(any(DatanodeDetails.class)))
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisMisReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisMisReplicationHandler.java
index 9430ad6f757..d69f0cd7554 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisMisReplicationHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisMisReplicationHandler.java
@@ -32,9 +32,11 @@
import org.apache.ratis.protocol.exceptions.NotLeaderException;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
+import java.io.File;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
@@ -59,11 +61,11 @@
public class TestRatisMisReplicationHandler extends TestMisReplicationHandler {
@BeforeEach
- public void setup() throws NodeNotFoundException,
+ void setup(@TempDir File testDir) throws NodeNotFoundException,
CommandTargetOverloadedException, NotLeaderException {
RatisReplicationConfig repConfig = RatisReplicationConfig
.getInstance(ReplicationFactor.THREE);
- setup(repConfig);
+ setup(repConfig, testDir);
}
@ParameterizedTest
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisOverReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisOverReplicationHandler.java
index f4476e6df54..cfb3952d133 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisOverReplicationHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisOverReplicationHandler.java
@@ -59,10 +59,10 @@
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
-import static org.junit.jupiter.api.Assertions.fail;
-import static org.mockito.ArgumentMatchers.anyBoolean;
-import static org.mockito.ArgumentMatchers.anyInt;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.mockito.Mockito.anyBoolean;
+import static org.mockito.Mockito.anyInt;
+import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.mockito.Mockito.argThat;
@@ -441,13 +441,9 @@ public void testDeleteThrottlingMisMatchedReplica() throws IOException {
RatisOverReplicationHandler handler =
new RatisOverReplicationHandler(policy, replicationManager);
- try {
- handler.processAndSendCommands(replicas, Collections.emptyList(),
- getOverReplicatedHealthResult(), 2);
- fail("Expected CommandTargetOverloadedException");
- } catch (CommandTargetOverloadedException e) {
- // Expected
- }
+ assertThrows(CommandTargetOverloadedException.class,
+ () -> handler.processAndSendCommands(replicas, Collections.emptyList(),
+ getOverReplicatedHealthResult(), 2));
assertEquals(1, commandsSent.size());
Pair> cmd = commandsSent.iterator().next();
assertNotEquals(quasiClosedReplica.getDatanodeDetails(),
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java
index ca86cb689fb..d77f093abe3 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java
@@ -39,12 +39,12 @@
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
import org.apache.ratis.protocol.exceptions.NotLeaderException;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import org.mockito.ArgumentCaptor;
-import org.mockito.Mockito;
+import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
@@ -67,9 +67,9 @@
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyInt;
-import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyInt;
+import static org.mockito.Mockito.anyLong;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.mockito.Mockito.eq;
@@ -91,13 +91,13 @@ public class TestRatisUnderReplicationHandler {
private ReplicationManagerMetrics metrics;
@BeforeEach
- public void setup() throws NodeNotFoundException,
+ void setup(@TempDir File testDir) throws NodeNotFoundException,
CommandTargetOverloadedException, NotLeaderException {
container = ReplicationTestUtil.createContainer(
HddsProtos.LifeCycleState.CLOSED, RATIS_REPLICATION_CONFIG);
nodeManager = mock(NodeManager.class);
- conf = SCMTestUtils.getConf();
+ conf = SCMTestUtils.getConf(testDir);
policy = ReplicationTestUtil
.getSimpleTestPlacementPolicy(nodeManager, conf);
replicationManager = mock(ReplicationManager.class);
@@ -605,11 +605,11 @@ public void testUnderReplicationWithVulnerableReplicasOnUniqueOrigins() throws I
DECOMMISSIONING, State.UNHEALTHY, sequenceID);
replicas.add(unhealthyReplica);
UnderReplicatedHealthResult result = getUnderReplicatedHealthResult();
- Mockito.when(result.hasVulnerableUnhealthy()).thenReturn(true);
+ when(result.hasVulnerableUnhealthy()).thenReturn(true);
final Set>> commands = testProcessing(replicas, Collections.emptyList(),
result, 2, 1);
- Assertions.assertEquals(unhealthyReplica.getDatanodeDetails(), commands.iterator().next().getKey());
+ assertEquals(unhealthyReplica.getDatanodeDetails(), commands.iterator().next().getKey());
}
/**
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
index fe1cdcc0695..47844f32fb0 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
+import org.apache.hadoop.hdds.scm.HddsTestUtils;
import org.apache.hadoop.hdds.scm.PlacementPolicy;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
@@ -43,6 +44,9 @@
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.node.NodeStatus;
import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.hdds.security.token.ContainerTokenGenerator;
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand;
import org.apache.hadoop.ozone.protocol.commands.ReconstructECContainersCommand;
@@ -56,7 +60,6 @@
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.EnumSource;
import org.mockito.ArgumentCaptor;
-import org.mockito.Mockito;
import java.io.IOException;
import java.time.Instant;
@@ -175,6 +178,16 @@ public void setup() throws IOException {
// Ensure that RM will run when asked.
when(scmContext.isLeaderReady()).thenReturn(true);
when(scmContext.isInSafeMode()).thenReturn(false);
+
+ PipelineManager pipelineManager = mock(PipelineManager.class);
+ when(pipelineManager.getPipeline(any()))
+ .thenReturn(HddsTestUtils.getRandomPipeline());
+
+ StorageContainerManager scm = mock(StorageContainerManager.class);
+ when(scm.getPipelineManager()).thenReturn(pipelineManager);
+ when(scm.getContainerTokenGenerator()).thenReturn(ContainerTokenGenerator.DISABLED);
+
+ when(scmContext.getScm()).thenReturn(scm);
}
private ReplicationManager createReplicationManager() throws IOException {
@@ -530,7 +543,7 @@ public void testQuasiClosedContainerWithUnhealthyReplicaOnDecommissioningNodeWit
ContainerReplicaProto.State.UNHEALTHY);
replicas.add(unhealthy);
storeContainerAndReplicas(container, replicas);
- Mockito.when(replicationManager.getNodeStatus(any(DatanodeDetails.class)))
+ when(replicationManager.getNodeStatus(any(DatanodeDetails.class)))
.thenAnswer(invocation -> {
DatanodeDetails dn = invocation.getArgument(0);
if (dn.equals(unhealthy.getDatanodeDetails())) {
@@ -550,9 +563,9 @@ public void testQuasiClosedContainerWithUnhealthyReplicaOnDecommissioningNodeWit
assertEquals(0, repQueue.overReplicatedQueueSize());
// next, this test sets up some mocks to test if RatisUnderReplicationHandler will handle this container correctly
- Mockito.when(ratisPlacementPolicy.chooseDatanodes(anyList(), anyList(), eq(null), eq(1), anyLong(),
+ when(ratisPlacementPolicy.chooseDatanodes(anyList(), anyList(), eq(null), eq(1), anyLong(),
anyLong())).thenAnswer(invocation -> ImmutableList.of(MockDatanodeDetails.randomDatanodeDetails()));
- Mockito.when(nodeManager.getTotalDatanodeCommandCounts(any(DatanodeDetails.class), any(), any()))
+ when(nodeManager.getTotalDatanodeCommandCounts(any(DatanodeDetails.class), any(), any()))
.thenAnswer(invocation -> {
Map map = new HashMap<>();
map.put(SCMCommandProto.Type.replicateContainerCommand, 0);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java
index 723828a44bb..437fdc1c06f 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java
@@ -67,7 +67,7 @@
import java.util.stream.Stream;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestOpenContainerHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestOpenContainerHandler.java
index a950008ec9f..dec61610d1e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestOpenContainerHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestOpenContainerHandler.java
@@ -24,17 +24,20 @@
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport;
+import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport.HealthState;
import org.apache.hadoop.hdds.scm.container.replication.ContainerCheckRequest;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationTestUtil;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.mockito.Mockito;
import java.util.Collections;
import java.util.Set;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.mockito.Mockito.mock;
@@ -58,6 +61,7 @@ public void setup() {
ratisReplicationConfig = RatisReplicationConfig.getInstance(
HddsProtos.ReplicationFactor.THREE);
replicationManager = mock(ReplicationManager.class);
+ Mockito.when(replicationManager.hasHealthyPipeline(any())).thenReturn(true);
openContainerHandler = new OpenContainerHandler(replicationManager);
}
@@ -119,8 +123,36 @@ public void testOpenUnhealthyContainerIsClosed() {
assertTrue(openContainerHandler.handle(readRequest));
verify(replicationManager, times(1))
.sendCloseContainerEvent(containerInfo.containerID());
+ assertEquals(1, request.getReport().getStat(HealthState.OPEN_UNHEALTHY));
}
+ @Test
+ public void testOpenContainerWithoutPipelineIsClosed() {
+ Mockito.when(replicationManager.hasHealthyPipeline(any())).thenReturn(false);
+ ContainerInfo containerInfo = ReplicationTestUtil.createContainerInfo(
+ ecReplicationConfig, 1, OPEN);
+ Set containerReplicas = ReplicationTestUtil
+ .createReplicas(containerInfo.containerID(),
+ ContainerReplicaProto.State.OPEN, 1, 2, 3, 4);
+ ContainerCheckRequest request = new ContainerCheckRequest.Builder()
+ .setPendingOps(Collections.emptyList())
+ .setReport(new ReplicationManagerReport())
+ .setContainerInfo(containerInfo)
+ .setContainerReplicas(containerReplicas)
+ .build();
+ ContainerCheckRequest readRequest = new ContainerCheckRequest.Builder()
+ .setPendingOps(Collections.emptyList())
+ .setReport(new ReplicationManagerReport())
+ .setContainerInfo(containerInfo)
+ .setContainerReplicas(containerReplicas)
+ .setReadOnly(true)
+ .build();
+ assertTrue(openContainerHandler.handle(request));
+ assertTrue(openContainerHandler.handle(readRequest));
+ verify(replicationManager, times(1))
+ .sendCloseContainerEvent(containerInfo.containerID());
+ assertEquals(1, request.getReport().getStat(HealthState.OPEN_WITHOUT_PIPELINE));
+ }
@Test
public void testClosedRatisContainerReturnsFalse() {
ContainerInfo containerInfo = ReplicationTestUtil.createContainerInfo(
@@ -178,5 +210,33 @@ public void testOpenUnhealthyRatisContainerIsClosed() {
assertTrue(openContainerHandler.handle(request));
assertTrue(openContainerHandler.handle(readRequest));
verify(replicationManager, times(1)).sendCloseContainerEvent(any());
+ assertEquals(1, request.getReport().getStat(HealthState.OPEN_UNHEALTHY));
+ }
+
+ @Test
+ public void testOpenRatisContainerWithoutPipelineIsClosed() {
+ Mockito.when(replicationManager.hasHealthyPipeline(any())).thenReturn(false);
+ ContainerInfo containerInfo = ReplicationTestUtil.createContainerInfo(
+ ratisReplicationConfig, 1, OPEN);
+ Set containerReplicas = ReplicationTestUtil
+ .createReplicas(containerInfo.containerID(),
+ ContainerReplicaProto.State.OPEN, 0, 0, 0);
+ ContainerCheckRequest request = new ContainerCheckRequest.Builder()
+ .setPendingOps(Collections.emptyList())
+ .setReport(new ReplicationManagerReport())
+ .setContainerInfo(containerInfo)
+ .setContainerReplicas(containerReplicas)
+ .build();
+ ContainerCheckRequest readRequest = new ContainerCheckRequest.Builder()
+ .setPendingOps(Collections.emptyList())
+ .setReport(new ReplicationManagerReport())
+ .setContainerInfo(containerInfo)
+ .setContainerReplicas(containerReplicas)
+ .setReadOnly(true)
+ .build();
+ assertTrue(openContainerHandler.handle(request));
+ assertTrue(openContainerHandler.handle(readRequest));
+ verify(replicationManager, times(1)).sendCloseContainerEvent(any());
+ assertEquals(1, request.getReport().getStat(HealthState.OPEN_WITHOUT_PIPELINE));
}
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java
index 8fa4c974e1b..28eccd5211c 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java
@@ -35,7 +35,6 @@
import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
-import org.mockito.Mockito;
import java.util.Collections;
import java.util.HashSet;
@@ -190,7 +189,7 @@ public void testReturnsTrueForQuasiClosedContainerWithVulnerableReplicaWhenAllRe
ContainerReplica unhealthy =
createContainerReplica(container.containerID(), 0, DECOMMISSIONING, State.UNHEALTHY, sequenceId);
replicas.add(unhealthy);
- Mockito.when(replicationManager.getNodeStatus(Mockito.any(DatanodeDetails.class)))
+ when(replicationManager.getNodeStatus(any(DatanodeDetails.class)))
.thenAnswer(invocation -> {
DatanodeDetails dn = invocation.getArgument(0);
if (dn.equals(unhealthy.getDatanodeDetails())) {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestInterSCMGrpcProtocolService.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestInterSCMGrpcProtocolService.java
index a3346da970f..dfb3ff5179e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestInterSCMGrpcProtocolService.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestInterSCMGrpcProtocolService.java
@@ -55,8 +55,8 @@
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.hdds.security.x509.CertificateTestUtils.createSelfSignedCert;
import static org.assertj.core.api.Assertions.assertThat;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyBoolean;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyBoolean;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java
index 29fa47135fc..a5a2054a8ae 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestReplicationAnnotation.java
@@ -35,7 +35,7 @@
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
/**
* Tests on {@link org.apache.hadoop.hdds.scm.metadata.Replicate}.
@@ -125,13 +125,10 @@ public void testReplicateAnnotationBasic() throws Throwable {
SCMHAInvocationHandler.class.getClassLoader(),
new Class>[]{ContainerStateManager.class},
scmhaInvocationHandler);
-
- try {
- proxy.addContainer(HddsProtos.ContainerInfoProto.getDefaultInstance());
- fail("Cannot reach here: should have seen a IOException");
- } catch (IOException e) {
- assertNotNull(e.getMessage());
- assertThat(e.getMessage()).contains("submitRequest is called");
- }
+ IOException e =
+ assertThrows(IOException.class,
+ () -> proxy.addContainer(HddsProtos.ContainerInfoProto.getDefaultInstance()));
+ assertNotNull(e.getMessage());
+ assertThat(e.getMessage()).contains("submitRequest is called");
}
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMContext.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMContext.java
index 642fbd635a3..757a0ab0dce 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMContext.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMContext.java
@@ -19,20 +19,18 @@
package org.apache.hadoop.hdds.scm.ha;
import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus;
-import org.apache.ratis.protocol.exceptions.NotLeaderException;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* Test for SCMContext.
*/
public class TestSCMContext {
@Test
- public void testRaftOperations() {
+ void testRaftOperations() throws Exception {
// start as follower
SCMContext scmContext = new SCMContext.Builder()
.setLeader(false).setTerm(0).buildMaybeInvalid();
@@ -44,11 +42,8 @@ public void testRaftOperations() {
scmContext.setLeaderReady();
assertTrue(scmContext.isLeader());
assertTrue(scmContext.isLeaderReady());
- try {
- assertEquals(scmContext.getTermOfLeader(), 10);
- } catch (NotLeaderException e) {
- fail("Should not throw nle.");
- }
+ assertEquals(scmContext.getTermOfLeader(), 10);
+
// step down
scmContext.updateLeaderAndTerm(false, 0);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java
index 54a422b909b..75a943ee8da 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java
@@ -29,7 +29,6 @@
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.common.Storage;
import org.apache.hadoop.ozone.ha.ConfUtils;
-import org.apache.ozone.test.GenericTestUtils;
import org.apache.ratis.conf.RaftProperties;
import org.apache.ratis.server.RaftServerConfigKeys;
import org.apache.ratis.util.TimeDuration;
@@ -75,9 +74,11 @@
*/
class TestSCMHAConfiguration {
private OzoneConfiguration conf;
+ @TempDir
+ private File tempDir;
@BeforeEach
- void setup(@TempDir File tempDir) {
+ void setup() {
conf = new OzoneConfiguration();
conf.set(OZONE_METADATA_DIRS, tempDir.getAbsolutePath());
DefaultConfigManager.clearDefaultConfigs();
@@ -214,8 +215,7 @@ public void testSCMHAConfig() throws Exception {
assertEquals(0, scmRatisConfig.getLogAppenderWaitTimeMin(),
"getLogAppenderWaitTimeMin");
- final File testDir = GenericTestUtils.getRandomizedTestDir();
- conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath());
+ conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempDir.getPath());
final RaftProperties p = RatisUtil.newRaftProperties(conf);
final TimeDuration t = RaftServerConfigKeys.Log.Appender.waitTimeMin(p);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAManagerImpl.java
index eb0f18ae019..f33eedf9695 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAManagerImpl.java
@@ -45,9 +45,13 @@
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ratis.server.DivisionInfo;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.MethodOrderer;
+import org.junit.jupiter.api.Order;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
+import org.junit.jupiter.api.TestMethodOrder;
import org.junit.jupiter.api.io.TempDir;
import java.io.IOException;
@@ -55,6 +59,7 @@
import java.util.concurrent.TimeoutException;
import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assumptions.assumeThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.Mockito.mock;
@@ -64,27 +69,34 @@
/**
* Test cases to verify {@link org.apache.hadoop.hdds.scm.ha.SCMHAManagerImpl}.
*/
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
class TestSCMHAManagerImpl {
- @TempDir
+ private static final String FOLLOWER_SCM_ID = "follower";
+
private Path storageBaseDir;
private String clusterID;
private SCMHAManager primarySCMHAManager;
+ private SCMRatisServer follower;
- @BeforeEach
- void setup() throws IOException, InterruptedException,
+ @BeforeAll
+ void setup(@TempDir Path tempDir) throws IOException, InterruptedException,
TimeoutException {
+ storageBaseDir = tempDir;
clusterID = UUID.randomUUID().toString();
OzoneConfiguration conf = getConfig("scm1", 9894);
final StorageContainerManager scm = getMockStorageContainerManager(conf);
SCMRatisServerImpl.initialize(clusterID, scm.getScmId(),
scm.getScmNodeDetails(), conf);
- scm.getScmHAManager().start();
primarySCMHAManager = scm.getScmHAManager();
+ primarySCMHAManager.start();
final DivisionInfo ratisDivision = primarySCMHAManager.getRatisServer()
.getDivision().getInfo();
// Wait for Ratis Server to be ready
waitForSCMToBeReady(ratisDivision);
+ follower = getMockStorageContainerManager(getConfig(FOLLOWER_SCM_ID, 9898))
+ .getScmHAManager().getRatisServer();
}
private OzoneConfiguration getConfig(String scmId, int ratisPort) {
@@ -97,42 +109,55 @@ private OzoneConfiguration getConfig(String scmId, int ratisPort) {
return conf;
}
- public void waitForSCMToBeReady(DivisionInfo ratisDivision)
+ private void waitForSCMToBeReady(DivisionInfo ratisDivision)
throws TimeoutException,
InterruptedException {
GenericTestUtils.waitFor(ratisDivision::isLeaderReady,
1000, 10000);
}
- @AfterEach
- public void cleanup() throws IOException {
+ @AfterAll
+ void cleanup() throws IOException {
+ follower.stop();
primarySCMHAManager.stop();
}
@Test
- public void testAddSCM() throws IOException, InterruptedException {
- assertEquals(1, primarySCMHAManager.getRatisServer()
- .getDivision().getGroup().getPeers().size());
+ @Order(1)
+ void testAddSCM() throws IOException {
+ assertEquals(1, getPeerCount());
+
+ follower.start();
+ final AddSCMRequest request = new AddSCMRequest(
+ clusterID, FOLLOWER_SCM_ID, getFollowerAddress());
+ primarySCMHAManager.addSCM(request);
+ assertEquals(2, getPeerCount());
+ }
- final StorageContainerManager scm2 = getMockStorageContainerManager(
- getConfig("scm2", 9898));
- try {
- scm2.getScmHAManager().getRatisServer().start();
- final AddSCMRequest request = new AddSCMRequest(
- clusterID, scm2.getScmId(),
- "localhost:" + scm2.getScmHAManager().getRatisServer()
- .getDivision().getRaftServer().getServerRpc()
- .getInetSocketAddress().getPort());
- primarySCMHAManager.addSCM(request);
- assertEquals(2, primarySCMHAManager.getRatisServer()
- .getDivision().getGroup().getPeers().size());
- } finally {
- scm2.getScmHAManager().getRatisServer().stop();
- }
+ @Test
+ @Order(2) // requires testAddSCM
+ void testRemoveSCM() throws IOException {
+ assumeThat(getPeerCount()).isEqualTo(2);
+
+ final RemoveSCMRequest removeSCMRequest = new RemoveSCMRequest(
+ clusterID, FOLLOWER_SCM_ID, getFollowerAddress());
+ primarySCMHAManager.removeSCM(removeSCMRequest);
+ assertEquals(1, getPeerCount());
+ }
+
+ private int getPeerCount() {
+ return primarySCMHAManager.getRatisServer()
+ .getDivision().getGroup().getPeers().size();
+ }
+
+ private String getFollowerAddress() {
+ return "localhost:" +
+ follower.getDivision()
+ .getRaftServer().getServerRpc().getInetSocketAddress().getPort();
}
@Test
- public void testHARingRemovalErrors() throws IOException,
+ void testHARingRemovalErrors() throws IOException,
AuthenticationException {
OzoneConfiguration config = new OzoneConfiguration();
config.set(ScmConfigKeys.OZONE_SCM_PRIMORDIAL_NODE_ID_KEY, "scm1");
@@ -160,35 +185,6 @@ public void testHARingRemovalErrors() throws IOException,
scm2.getScmHAManager().getRatisServer().stop();
}
}
- @Test
- public void testRemoveSCM() throws IOException, InterruptedException {
- assertEquals(1, primarySCMHAManager.getRatisServer()
- .getDivision().getGroup().getPeers().size());
-
- final StorageContainerManager scm2 = getMockStorageContainerManager(
- getConfig("scm2", 9898));
- try {
- scm2.getScmHAManager().getRatisServer().start();
- final AddSCMRequest addSCMRequest = new AddSCMRequest(
- clusterID, scm2.getScmId(),
- "localhost:" + scm2.getScmHAManager().getRatisServer()
- .getDivision().getRaftServer().getServerRpc()
- .getInetSocketAddress().getPort());
- primarySCMHAManager.addSCM(addSCMRequest);
- assertEquals(2, primarySCMHAManager.getRatisServer()
- .getDivision().getGroup().getPeers().size());
-
- final RemoveSCMRequest removeSCMRequest = new RemoveSCMRequest(
- clusterID, scm2.getScmId(), "localhost:" +
- scm2.getScmHAManager().getRatisServer().getDivision()
- .getRaftServer().getServerRpc().getInetSocketAddress().getPort());
- primarySCMHAManager.removeSCM(removeSCMRequest);
- assertEquals(1, primarySCMHAManager.getRatisServer()
- .getDivision().getGroup().getPeers().size());
- } finally {
- scm2.getScmHAManager().getRatisServer().stop();
- }
- }
private StorageContainerManager getMockStorageContainerManager(
OzoneConfiguration conf) throws IOException {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSequenceIDGenerator.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSequenceIDGenerator.java
index 743c7aea9da..2e3c8e84368 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSequenceIDGenerator.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSequenceIDGenerator.java
@@ -25,12 +25,15 @@
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
+
+import java.io.File;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SEQUENCE_ID_BATCH_SIZE;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.ArgumentMatchers.anyString;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
+import static org.mockito.Mockito.anyLong;
+import static org.mockito.Mockito.anyString;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
@@ -38,9 +41,13 @@
* Tests for {@link SequenceIdGenerator}.
*/
public class TestSequenceIDGenerator {
+
+ @TempDir
+ private File testDir;
+
@Test
public void testSequenceIDGenUponNonRatis() throws Exception {
- OzoneConfiguration conf = SCMTestUtils.getConf();
+ OzoneConfiguration conf = SCMTestUtils.getConf(testDir);
SCMMetadataStore scmMetadataStore = new SCMMetadataStoreImpl(conf);
scmMetadataStore.start(conf);
@@ -82,7 +89,7 @@ public void testSequenceIDGenUponNonRatis() throws Exception {
@Test
public void testSequenceIDGenUponRatis() throws Exception {
- OzoneConfiguration conf = SCMTestUtils.getConf();
+ OzoneConfiguration conf = SCMTestUtils.getConf(testDir);
// change batchSize to 100
conf.setInt(OZONE_SCM_SEQUENCE_ID_BATCH_SIZE, 100);
@@ -129,7 +136,7 @@ public void testSequenceIDGenUponRatis() throws Exception {
public void testSequenceIDGenUponRatisWhenCurrentScmIsNotALeader()
throws Exception {
int batchSize = 100;
- OzoneConfiguration conf = SCMTestUtils.getConf();
+ OzoneConfiguration conf = SCMTestUtils.getConf(testDir);
conf.setInt(OZONE_SCM_SEQUENCE_ID_BATCH_SIZE, batchSize);
SCMMetadataStore scmMetadataStore = new SCMMetadataStoreImpl(conf);
scmMetadataStore.start(conf);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index 8dd6914e644..b241ac0f2d2 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -25,9 +25,7 @@
import java.util.Arrays;
import java.util.List;
import java.util.UUID;
-import java.util.concurrent.TimeoutException;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
@@ -35,7 +33,6 @@
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
import org.apache.hadoop.hdds.scm.PlacementPolicy;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.HddsTestUtils;
@@ -67,19 +64,17 @@
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.hadoop.ozone.upgrade.LayoutVersionManager;
import org.apache.hadoop.test.PathUtils;
import org.apache.commons.io.IOUtils;
-import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import static java.util.Collections.emptyList;
import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA;
import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA;
import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA;
-import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.toLayoutVersionProto;
import static org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager.maxLayoutVersion;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -91,6 +86,7 @@
* Test for different container placement policy.
*/
public class TestContainerPlacement {
+ @TempDir
private File testDir;
private DBStore dbStore;
private ContainerManager containerManager;
@@ -103,8 +99,6 @@ public class TestContainerPlacement {
@BeforeEach
public void setUp() throws Exception {
conf = getConf();
- testDir = GenericTestUtils.getTestDir(
- TestContainerPlacement.class.getSimpleName() + UUID.randomUUID());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
dbStore = DBStoreBuilder.createDBStore(
conf, new SCMDBDefinition());
@@ -123,8 +117,6 @@ public void cleanup() throws Exception {
if (dbStore != null) {
dbStore.close();
}
-
- FileUtil.fullyDelete(testDir);
}
/**
@@ -182,7 +174,7 @@ ContainerManager createContainerManager()
*/
@Test
public void testContainerPlacementCapacity() throws IOException,
- InterruptedException, TimeoutException {
+ InterruptedException {
final int nodeCount = 4;
final long capacity = 10L * OzoneConsts.GB;
final long used = 2L * OzoneConsts.GB;
@@ -201,11 +193,6 @@ public void testContainerPlacementCapacity() throws IOException,
List datanodes = HddsTestUtils
.getListOfRegisteredDatanodeDetails(scmNodeManager, nodeCount);
XceiverClientManager xceiverClientManager = null;
- LayoutVersionManager versionManager =
- scmNodeManager.getLayoutVersionManager();
- LayoutVersionProto layoutInfo =
- toLayoutVersionProto(versionManager.getMetadataLayoutVersion(),
- versionManager.getSoftwareLayoutVersion());
try {
for (DatanodeDetails datanodeDetails : datanodes) {
UUID dnId = datanodeDetails.getUuid();
@@ -221,7 +208,7 @@ public void testContainerPlacementCapacity() throws IOException,
Arrays.asList(report), emptyList());
datanodeInfo.updateStorageReports(
nodeReportProto.getStorageReportList());
- scmNodeManager.processHeartbeat(datanodeDetails, layoutInfo);
+ scmNodeManager.processHeartbeat(datanodeDetails);
}
//TODO: wait for heartbeat to be processed
@@ -265,7 +252,6 @@ public void testContainerPlacementCapacity() throws IOException,
if (xceiverClientManager != null) {
xceiverClientManager.close();
}
- FileUtil.fullyDelete(testDir);
}
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
index 523d4226cb4..06565e1b7e5 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
@@ -41,7 +41,6 @@
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
-import org.mockito.Mockito;
import java.io.IOException;
import java.util.Collections;
@@ -49,6 +48,7 @@
import java.util.Set;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.mock;
@@ -406,7 +406,7 @@ public void testDecommissionWaitsForUnhealthyReplicaWithUniqueOriginToReplicateN
replicas.add(unhealthy);
nodeManager.setContainers(dn1, ImmutableSet.of(containerID));
- Mockito.when(repManager.getContainerReplicaCount(Mockito.eq(containerID)))
+ when(repManager.getContainerReplicaCount(eq(containerID)))
.thenReturn(new RatisContainerReplicaCount(container, replicas,
Collections.emptyList(), 2, false));
DatanodeAdminMonitorTestUtil.mockCheckContainerState(repManager, true);
@@ -430,7 +430,7 @@ public void testDecommissionWaitsForUnhealthyReplicaWithUniqueOriginToReplicateN
.setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails())
.build();
replicas.add(copyOfUnhealthyOnNewNode);
- Mockito.when(repManager.getContainerReplicaCount(Mockito.eq(containerID)))
+ when(repManager.getContainerReplicaCount(eq(containerID)))
.thenReturn(new RatisContainerReplicaCount(container, replicas,
Collections.emptyList(), 2, false));
DatanodeAdminMonitorTestUtil.mockCheckContainerState(repManager, false);
@@ -692,8 +692,8 @@ public void testStartTimeMetricWhenNodesDecommissioned()
assertEquals(1, monitor.getTrackedNodeCount());
long monitoredTime = monitor.getSingleTrackedNode(dn1.getIpAddress())
.getStartTime();
- assertTrue(monitoredTime >= beforeTime);
- assertTrue(monitoredTime <= afterTime);
+ assertThat(monitoredTime).isGreaterThanOrEqualTo(beforeTime);
+ assertThat(monitoredTime).isLessThanOrEqualTo(afterTime);
}
@Test
@@ -837,6 +837,50 @@ public void testCancelledNodesMovedToInService()
nodeManager.getNodeStatus(dn1).getOperationalState());
}
+ @Test
+ public void testContainersReplicatedOnDecomDnAPI()
+ throws NodeNotFoundException, ContainerNotFoundException {
+ conf.setBoolean("hdds.scm.replication.enable.legacy", false);
+
+ DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails();
+ nodeManager.register(dn1,
+ new NodeStatus(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ HddsProtos.NodeState.HEALTHY));
+
+ Set containers = new HashSet<>();
+ containers.add(ContainerID.valueOf(1));
+ containers.add(ContainerID.valueOf(2));
+ nodeManager.setContainers(dn1, containers);
+ DatanodeAdminMonitorTestUtil
+ .mockGetContainerReplicaCount(repManager,
+ true,
+ HddsProtos.LifeCycleState.CLOSED,
+ DECOMMISSIONING,
+ IN_SERVICE,
+ IN_SERVICE);
+
+ monitor.startMonitoring(dn1);
+ monitor.run();
+ assertEquals(1, monitor.getTrackedNodeCount());
+ assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ nodeManager.getNodeStatus(dn1).getOperationalState());
+ assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnderReplicated").size(), 2);
+ assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnClosed").size(), 0);
+
+ DatanodeAdminMonitorTestUtil
+ .mockGetContainerReplicaCount(repManager,
+ true,
+ HddsProtos.LifeCycleState.OPEN,
+ IN_SERVICE);
+
+ monitor.run();
+ assertEquals(1, monitor.getTrackedNodeCount());
+ assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ nodeManager.getNodeStatus(dn1).getOperationalState());
+ assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnderReplicated").size(), 0);
+ assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnClosed").size(), 2);
+ }
+
/**
* Generate a set of ContainerID, starting from an ID of zero up to the given
* count minus 1.
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
index 4724d94ae26..aa09022b14d 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
@@ -33,11 +33,9 @@
import java.util.Arrays;
import java.util.Collections;
import java.util.Set;
-import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -73,11 +71,11 @@
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.ozone.test.GenericTestUtils;
import org.apache.ozone.test.LambdaTestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
/**
* Test DeadNodeHandler.
@@ -92,7 +90,8 @@ public class TestDeadNodeHandler {
private HealthyReadOnlyNodeHandler healthyReadOnlyNodeHandler;
private EventPublisher publisher;
private EventQueue eventQueue;
- private String storageDir;
+ @TempDir
+ private File storageDir;
private SCMContext scmContext;
private DeletedBlockLog deletedBlockLog;
@@ -104,9 +103,7 @@ public void setup() throws IOException, AuthenticationException {
conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 2);
conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN,
10, StorageUnit.MB);
- storageDir = GenericTestUtils.getTempPath(
- TestDeadNodeHandler.class.getSimpleName() + UUID.randomUUID());
- conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
+ conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir.getPath());
eventQueue = new EventQueue();
scm = HddsTestUtils.getScm(conf);
nodeManager = (SCMNodeManager) scm.getScmNodeManager();
@@ -136,20 +133,19 @@ public void setup() throws IOException, AuthenticationException {
public void teardown() {
scm.stop();
scm.join();
- FileUtil.fullyDelete(new File(storageDir));
}
@Test
@SuppressWarnings("checkstyle:MethodLength")
- public void testOnMessage() throws Exception {
+ public void testOnMessage(@TempDir File tempDir) throws Exception {
//GIVEN
DatanodeDetails datanode1 = MockDatanodeDetails.randomDatanodeDetails();
DatanodeDetails datanode2 = MockDatanodeDetails.randomDatanodeDetails();
DatanodeDetails datanode3 = MockDatanodeDetails.randomDatanodeDetails();
- String storagePath = GenericTestUtils.getRandomizedTempPath()
+ String storagePath = tempDir.getPath()
.concat("/data-" + datanode1.getUuidString());
- String metaStoragePath = GenericTestUtils.getRandomizedTempPath()
+ String metaStoragePath = tempDir.getPath()
.concat("/metadata-" + datanode1.getUuidString());
StorageReportProto storageOne = HddsTestUtils.createStorageReport(
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
index 332d762a4cd..09f0dd59b9f 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
@@ -41,7 +41,6 @@
import java.util.ArrayList;
import static java.util.Collections.singletonList;
-import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.defaultLayoutVersionProto;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
@@ -165,7 +164,7 @@ public void testNodesCanBeDecommissionedAndRecommissioned()
// Attempt to decommission on dn(9) which has another instance at
// dn(11) with identical ports.
- nodeManager.processHeartbeat(dns.get(9), defaultLayoutVersionProto());
+ nodeManager.processHeartbeat(dns.get(9));
DatanodeDetails duplicatePorts = dns.get(9);
decom.decommissionNodes(singletonList(duplicatePorts.getIpAddress()));
assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
@@ -237,7 +236,7 @@ public void testNodesCanBeDecommissionedAndRecommissionedMixedPorts()
// Now decommission one of the DNs with the duplicate port
DatanodeDetails expectedDN = dns.get(9);
- nodeManager.processHeartbeat(expectedDN, defaultLayoutVersionProto());
+ nodeManager.processHeartbeat(expectedDN);
decom.decommissionNodes(singletonList(
expectedDN.getIpAddress() + ":" + ratisPort));
@@ -287,7 +286,7 @@ public void testNodesCanBePutIntoMaintenanceAndRecommissioned()
// Attempt to enable maintenance on dn(9) which has another instance at
// dn(11) with identical ports.
- nodeManager.processHeartbeat(dns.get(9), defaultLayoutVersionProto());
+ nodeManager.processHeartbeat(dns.get(9));
DatanodeDetails duplicatePorts = dns.get(9);
decom.startMaintenanceNodes(singletonList(duplicatePorts.getIpAddress()),
100);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionMetrics.java
index 2005d518efb..d9cd79b7522 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionMetrics.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionMetrics.java
@@ -35,7 +35,7 @@
import java.util.Set;
import static org.mockito.Mockito.mock;
-import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONED;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING;
@@ -346,7 +346,7 @@ public void testDecommMonitorStartTimeForHost() {
monitor.run();
long startTime = monitor.getSingleTrackedNode(dn1.getIpAddress())
.getStartTime();
- assertTrue(before <= startTime);
- assertTrue(after >= startTime);
+ assertThat(before).isLessThanOrEqualTo(startTime);
+ assertThat(after).isGreaterThanOrEqualTo(startTime);
}
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
index ecd5cbed5f8..558fc420f48 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
@@ -22,10 +22,10 @@
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
+import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
-import java.util.UUID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
@@ -43,9 +43,9 @@
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.hdds.server.events.EventQueue;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
-import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -59,10 +59,10 @@ public class TestNodeReportHandler implements EventPublisher {
private NodeReportHandler nodeReportHandler;
private HDDSLayoutVersionManager versionManager;
private SCMNodeManager nodeManager;
- private String storagePath = GenericTestUtils.getRandomizedTempPath()
- .concat("/data-" + UUID.randomUUID().toString());
- private String metaStoragePath = GenericTestUtils.getRandomizedTempPath()
- .concat("/metadata-" + UUID.randomUUID().toString());
+ @TempDir
+ private File storagePath;
+ @TempDir
+ private File metaStoragePath;
@BeforeEach
public void resetEventCollector() throws IOException {
@@ -84,9 +84,9 @@ public void resetEventCollector() throws IOException {
public void testNodeReport() throws IOException {
DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
StorageReportProto storageOne = HddsTestUtils
- .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null);
+ .createStorageReport(dn.getUuid(), storagePath.getPath(), 100, 10, 90, null);
MetadataStorageReportProto metaStorageOne = HddsTestUtils
- .createMetadataStorageReport(metaStoragePath, 100, 10, 90, null);
+ .createMetadataStorageReport(metaStoragePath.getPath(), 100, 10, 90, null);
SCMNodeMetric nodeMetric = nodeManager.getNodeStat(dn);
assertNull(nodeMetric);
@@ -100,7 +100,7 @@ public void testNodeReport() throws IOException {
assertEquals(10, (long) nodeMetric.get().getScmUsed().get());
StorageReportProto storageTwo = HddsTestUtils
- .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null);
+ .createStorageReport(dn.getUuid(), storagePath.getPath(), 100, 10, 90, null);
nodeReportHandler.onMessage(
getNodeReport(dn, Arrays.asList(storageOne, storageTwo),
Arrays.asList(metaStorageOne)), this);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
index 85a70b64673..cc9133cf684 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
@@ -26,7 +26,6 @@
import java.util.List;
import java.util.Set;
import java.util.UUID;
-import java.util.concurrent.ExecutionException;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
@@ -73,7 +72,6 @@
import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.apache.hadoop.ozone.upgrade.LayoutVersionManager;
import org.apache.hadoop.ozone.protocol.commands.SetNodeOperationalStateCommand;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.util.Time;
@@ -86,6 +84,7 @@
import java.util.Map;
import java.util.function.Predicate;
import java.util.stream.Collectors;
+import java.util.stream.Stream;
import static java.util.Collections.emptyList;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
@@ -123,6 +122,8 @@
import static org.mockito.Mockito.eq;
import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
import org.junit.jupiter.params.provider.ValueSource;
import org.mockito.ArgumentCaptor;
import org.slf4j.Logger;
@@ -219,17 +220,12 @@ public void testScmHeartbeat()
throws IOException, InterruptedException, AuthenticationException {
try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
- LayoutVersionManager versionManager =
- nodeManager.getLayoutVersionManager();
- LayoutVersionProto layoutInfo = toLayoutVersionProto(
- versionManager.getMetadataLayoutVersion(),
- versionManager.getSoftwareLayoutVersion());
int registeredNodes = 5;
// Send some heartbeats from different nodes.
for (int x = 0; x < registeredNodes; x++) {
DatanodeDetails datanodeDetails = HddsTestUtils
.createRandomDatanodeAndRegister(nodeManager);
- nodeManager.processHeartbeat(datanodeDetails, layoutInfo);
+ nodeManager.processHeartbeat(datanodeDetails);
}
//TODO: wait for heartbeat to be processed
@@ -376,7 +372,7 @@ private void assertPipelineClosedAfterLayoutHeartbeat(
allNodes);
// node sends incorrect layout.
- nodeManager.processHeartbeat(node, layout);
+ nodeManager.processLayoutVersionReport(node, layout);
// Its pipelines should be closed then removed, meaning there is not
// enough nodes for factor 3 pipelines.
@@ -444,8 +440,10 @@ public void testScmLayoutOnRegister()
assertPipelineCreationFailsWithNotEnoughNodes(1);
// Heartbeat bad MLV nodes back to healthy.
- nodeManager.processHeartbeat(badMlvNode1, CORRECT_LAYOUT_PROTO);
- nodeManager.processHeartbeat(badMlvNode2, CORRECT_LAYOUT_PROTO);
+ nodeManager.processLayoutVersionReport(badMlvNode1, CORRECT_LAYOUT_PROTO);
+ nodeManager.processLayoutVersionReport(badMlvNode2, CORRECT_LAYOUT_PROTO);
+ nodeManager.processHeartbeat(badMlvNode1);
+ nodeManager.processHeartbeat(badMlvNode2);
// After moving out of healthy readonly, pipeline creation should be
// triggered.
@@ -460,17 +458,15 @@ public void testScmLayoutOnRegister()
private void assertPipelineCreationFailsWithNotEnoughNodes(
int actualNodeCount) throws Exception {
- try {
+ SCMException ex = assertThrows(SCMException.class, () -> {
ReplicationConfig ratisThree =
ReplicationConfig.fromProtoTypeAndFactor(
HddsProtos.ReplicationType.RATIS,
HddsProtos.ReplicationFactor.THREE);
scm.getPipelineManager().createPipeline(ratisThree);
- fail("3 nodes should not have been found for a pipeline.");
- } catch (SCMException ex) {
- assertThat(ex.getMessage()).contains("Required 3. Found " +
- actualNodeCount);
- }
+ }, "3 nodes should not have been found for a pipeline.");
+ assertThat(ex.getMessage()).contains("Required 3. Found " +
+ actualNodeCount);
}
private void assertPipelines(HddsProtos.ReplicationFactor factor,
@@ -558,14 +554,8 @@ public void testScmShutdown()
SCMNodeManager nodeManager = createNodeManager(conf);
DatanodeDetails datanodeDetails = HddsTestUtils
.createRandomDatanodeAndRegister(nodeManager);
- LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager();
- LayoutVersionProto layoutInfo = toLayoutVersionProto(
- versionManager.getMetadataLayoutVersion(),
- versionManager.getSoftwareLayoutVersion());
- nodeManager.close();
-
// These should never be processed.
- nodeManager.processHeartbeat(datanodeDetails, layoutInfo);
+ nodeManager.processHeartbeat(datanodeDetails);
// Let us just wait for 2 seconds to prove that HBs are not processed.
Thread.sleep(2 * 1000);
@@ -588,16 +578,10 @@ public void testScmHealthyNodeCount()
final int count = 10;
try (SCMNodeManager nodeManager = createNodeManager(conf)) {
- LayoutVersionManager versionManager =
- nodeManager.getLayoutVersionManager();
- LayoutVersionProto layoutInfo = toLayoutVersionProto(
- versionManager.getMetadataLayoutVersion(),
- versionManager.getSoftwareLayoutVersion());
-
for (int x = 0; x < count; x++) {
DatanodeDetails datanodeDetails = HddsTestUtils
.createRandomDatanodeAndRegister(nodeManager);
- nodeManager.processHeartbeat(datanodeDetails, layoutInfo);
+ nodeManager.processHeartbeat(datanodeDetails);
}
//TODO: wait for heartbeat to be processed
Thread.sleep(4 * 1000);
@@ -657,12 +641,6 @@ public void testSetNodeOpStateAndCommandFired()
DatanodeDetails dn = HddsTestUtils.createRandomDatanodeAndRegister(
nodeManager);
- LayoutVersionManager versionManager =
- nodeManager.getLayoutVersionManager();
- final LayoutVersionProto layoutInfo = toLayoutVersionProto(
- versionManager.getMetadataLayoutVersion(),
- versionManager.getSoftwareLayoutVersion());
-
long expiry = System.currentTimeMillis() / 1000 + 1000;
nodeManager.setNodeOperationalState(dn,
HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, expiry);
@@ -670,7 +648,7 @@ public void testSetNodeOpStateAndCommandFired()
// If found mismatch, leader SCM fires a SetNodeOperationalStateCommand
// to update the opState persisted in Datanode.
scm.getScmContext().updateLeaderAndTerm(true, 1);
- List commands = nodeManager.processHeartbeat(dn, layoutInfo);
+ List commands = nodeManager.processHeartbeat(dn);
assertEquals(SetNodeOperationalStateCommand.class,
commands.get(0).getClass());
@@ -679,7 +657,7 @@ public void testSetNodeOpStateAndCommandFired()
// If found mismatch, follower SCM update its own opState according
// to the heartbeat, and no SCMCommand will be fired.
scm.getScmContext().updateLeaderAndTerm(false, 2);
- commands = nodeManager.processHeartbeat(dn, layoutInfo);
+ commands = nodeManager.processHeartbeat(dn);
assertEquals(0, commands.size());
@@ -713,11 +691,6 @@ public void testScmDetectStaleAndDeadNode()
try (SCMNodeManager nodeManager = createNodeManager(conf)) {
- LayoutVersionManager versionManager =
- nodeManager.getLayoutVersionManager();
- LayoutVersionProto layoutInfo = toLayoutVersionProto(
- versionManager.getMetadataLayoutVersion(),
- versionManager.getSoftwareLayoutVersion());
List nodeList = createNodeSet(nodeManager, nodeCount);
@@ -725,18 +698,18 @@ public void testScmDetectStaleAndDeadNode()
nodeManager);
// Heartbeat once
- nodeManager.processHeartbeat(staleNode, layoutInfo);
+ nodeManager.processHeartbeat(staleNode);
// Heartbeat all other nodes.
for (DatanodeDetails dn : nodeList) {
- nodeManager.processHeartbeat(dn, layoutInfo);
+ nodeManager.processHeartbeat(dn);
}
// Wait for 2 seconds .. and heartbeat good nodes again.
Thread.sleep(2 * 1000);
for (DatanodeDetails dn : nodeList) {
- nodeManager.processHeartbeat(dn, layoutInfo);
+ nodeManager.processHeartbeat(dn);
}
// Wait for 2 seconds, wait a total of 4 seconds to make sure that the
@@ -759,7 +732,7 @@ public void testScmDetectStaleAndDeadNode()
Thread.sleep(1000);
// heartbeat good nodes again.
for (DatanodeDetails dn : nodeList) {
- nodeManager.processHeartbeat(dn, layoutInfo);
+ nodeManager.processHeartbeat(dn);
}
// 6 seconds is the dead window for this test , so we wait a total of
@@ -799,8 +772,7 @@ public void testScmDetectStaleAndDeadNode()
* @throws AuthenticationException
*/
@Test
- public void testScmHandleJvmPause()
- throws IOException, InterruptedException, AuthenticationException {
+ void testScmHandleJvmPause() throws Exception {
final int healthCheckInterval = 200; // milliseconds
final int heartbeatInterval = 1; // seconds
final int staleNodeInterval = 3; // seconds
@@ -818,18 +790,13 @@ public void testScmHandleJvmPause()
deadNodeInterval, SECONDS);
try (SCMNodeManager nodeManager = createNodeManager(conf)) {
- LayoutVersionManager versionManager =
- nodeManager.getLayoutVersionManager();
- LayoutVersionProto layoutInfo = toLayoutVersionProto(
- versionManager.getMetadataLayoutVersion(),
- versionManager.getSoftwareLayoutVersion());
DatanodeDetails node1 =
HddsTestUtils.createRandomDatanodeAndRegister(nodeManager);
DatanodeDetails node2 =
HddsTestUtils.createRandomDatanodeAndRegister(nodeManager);
- nodeManager.processHeartbeat(node1, layoutInfo);
- nodeManager.processHeartbeat(node2, layoutInfo);
+ nodeManager.processHeartbeat(node1);
+ nodeManager.processHeartbeat(node2);
// Sleep so that heartbeat processing thread gets to run.
Thread.sleep(1000);
@@ -861,21 +828,18 @@ public void testScmHandleJvmPause()
schedFuture = nodeManager.unpauseHealthCheck();
// Step 3 : wait for 1 iteration of health check
- try {
- schedFuture.get();
- assertThat(nodeManager.getSkippedHealthChecks())
- .withFailMessage("We did not skip any heartbeat checks")
- .isGreaterThan(0);
- } catch (ExecutionException e) {
- fail("Unexpected exception waiting for Scheduled Health Check");
- }
+
+ schedFuture.get();
+ assertThat(nodeManager.getSkippedHealthChecks())
+ .withFailMessage("We did not skip any heartbeat checks")
+ .isGreaterThan(0);
// Step 4 : all nodes should still be HEALTHY
assertEquals(2, nodeManager.getAllNodes().size());
assertEquals(2, nodeManager.getNodeCount(NodeStatus.inServiceHealthy()));
// Step 5 : heartbeat for node1
- nodeManager.processHeartbeat(node1, layoutInfo);
+ nodeManager.processHeartbeat(node1);
// Step 6 : wait for health check process to run
Thread.sleep(1000);
@@ -998,8 +962,6 @@ public void testProcessCommandQueueReport()
SCMNodeManager nodeManager = new SCMNodeManager(conf,
scmStorageConfig, eventPublisher, new NetworkTopologyImpl(conf),
scmContext, lvm);
- LayoutVersionProto layoutInfo = toLayoutVersionProto(
- lvm.getMetadataLayoutVersion(), lvm.getSoftwareLayoutVersion());
DatanodeDetails node1 =
HddsTestUtils.createRandomDatanodeAndRegister(nodeManager);
@@ -1019,7 +981,7 @@ scmStorageConfig, eventPublisher, new NetworkTopologyImpl(conf),
assertEquals(5, nodeManager.getTotalDatanodeCommandCount(
node1, SCMCommandProto.Type.deleteBlocksCommand));
- nodeManager.processHeartbeat(node1, layoutInfo,
+ nodeManager.processHeartbeat(node1,
CommandQueueReportProto.newBuilder()
.addCommand(SCMCommandProto.Type.replicateContainerCommand)
.addCount(123)
@@ -1049,7 +1011,7 @@ scmStorageConfig, eventPublisher, new NetworkTopologyImpl(conf),
// Send another report missing an earlier entry, and ensure it is not
// still reported as a stale value.
- nodeManager.processHeartbeat(node1, layoutInfo,
+ nodeManager.processHeartbeat(node1,
CommandQueueReportProto.newBuilder()
.addCommand(SCMCommandProto.Type.closeContainerCommand)
.addCount(11)
@@ -1126,7 +1088,7 @@ public void testCommandCount()
public void testScmCheckForErrorOnNullDatanodeDetails() throws IOException, AuthenticationException {
try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
NullPointerException npe = assertThrows(NullPointerException.class,
- () -> nodeManager.processHeartbeat(null, null));
+ () -> nodeManager.processHeartbeat(null));
assertThat(npe).hasMessage("Heartbeat is missing DatanodeDetails.");
}
}
@@ -1195,20 +1157,15 @@ public void testScmClusterIsInExpectedState1()
* Cluster state: Healthy: All nodes are heartbeat-ing like normal.
*/
try (SCMNodeManager nodeManager = createNodeManager(conf)) {
- LayoutVersionManager versionManager =
- nodeManager.getLayoutVersionManager();
- LayoutVersionProto layoutInfo = toLayoutVersionProto(
- versionManager.getMetadataLayoutVersion(),
- versionManager.getSoftwareLayoutVersion());
DatanodeDetails healthyNode =
HddsTestUtils.createRandomDatanodeAndRegister(nodeManager);
DatanodeDetails staleNode =
HddsTestUtils.createRandomDatanodeAndRegister(nodeManager);
DatanodeDetails deadNode =
HddsTestUtils.createRandomDatanodeAndRegister(nodeManager);
- nodeManager.processHeartbeat(healthyNode, layoutInfo);
- nodeManager.processHeartbeat(staleNode, layoutInfo);
- nodeManager.processHeartbeat(deadNode, layoutInfo);
+ nodeManager.processHeartbeat(healthyNode);
+ nodeManager.processHeartbeat(staleNode);
+ nodeManager.processHeartbeat(deadNode);
// Sleep so that heartbeat processing thread gets to run.
Thread.sleep(500);
@@ -1234,12 +1191,12 @@ public void testScmClusterIsInExpectedState1()
* the 3 second windows.
*/
- nodeManager.processHeartbeat(healthyNode, layoutInfo);
- nodeManager.processHeartbeat(staleNode, layoutInfo);
- nodeManager.processHeartbeat(deadNode, layoutInfo);
+ nodeManager.processHeartbeat(healthyNode);
+ nodeManager.processHeartbeat(staleNode);
+ nodeManager.processHeartbeat(deadNode);
Thread.sleep(1500);
- nodeManager.processHeartbeat(healthyNode, layoutInfo);
+ nodeManager.processHeartbeat(healthyNode);
Thread.sleep(2 * 1000);
assertEquals(1, nodeManager.getNodeCount(NodeStatus.inServiceHealthy()));
@@ -1260,10 +1217,10 @@ public void testScmClusterIsInExpectedState1()
* staleNode to move to stale state and deadNode to move to dead state.
*/
- nodeManager.processHeartbeat(healthyNode, layoutInfo);
- nodeManager.processHeartbeat(staleNode, layoutInfo);
+ nodeManager.processHeartbeat(healthyNode);
+ nodeManager.processHeartbeat(staleNode);
Thread.sleep(1500);
- nodeManager.processHeartbeat(healthyNode, layoutInfo);
+ nodeManager.processHeartbeat(healthyNode);
Thread.sleep(2 * 1000);
// 3.5 seconds have elapsed for stale node, so it moves into Stale.
@@ -1295,9 +1252,9 @@ public void testScmClusterIsInExpectedState1()
* Cluster State : let us heartbeat all the nodes and verify that we get
* back all the nodes in healthy state.
*/
- nodeManager.processHeartbeat(healthyNode, layoutInfo);
- nodeManager.processHeartbeat(staleNode, layoutInfo);
- nodeManager.processHeartbeat(deadNode, layoutInfo);
+ nodeManager.processHeartbeat(healthyNode);
+ nodeManager.processHeartbeat(staleNode);
+ nodeManager.processHeartbeat(deadNode);
Thread.sleep(500);
//Assert all nodes are healthy.
assertEquals(3, nodeManager.getAllNodes().size());
@@ -1316,13 +1273,9 @@ public void testScmClusterIsInExpectedState1()
private void heartbeatNodeSet(SCMNodeManager manager,
List list,
int sleepDuration) throws InterruptedException {
- LayoutVersionManager versionManager = manager.getLayoutVersionManager();
- LayoutVersionProto layoutInfo = toLayoutVersionProto(
- versionManager.getMetadataLayoutVersion(),
- versionManager.getSoftwareLayoutVersion());
while (!Thread.currentThread().isInterrupted()) {
for (DatanodeDetails dn : list) {
- manager.processHeartbeat(dn, layoutInfo);
+ manager.processHeartbeat(dn);
}
Thread.sleep(sleepDuration);
}
@@ -1405,16 +1358,10 @@ public void testScmClusterIsInExpectedState2()
}
};
- LayoutVersionManager versionManager =
- nodeManager.getLayoutVersionManager();
- LayoutVersionProto layoutInfo = toLayoutVersionProto(
- versionManager.getMetadataLayoutVersion(),
- versionManager.getSoftwareLayoutVersion());
-
// No Thread just one time HBs the node manager, so that these will be
// marked as dead nodes eventually.
for (DatanodeDetails dn : deadNodeList) {
- nodeManager.processHeartbeat(dn, layoutInfo);
+ nodeManager.processHeartbeat(dn);
}
@@ -1541,11 +1488,6 @@ public void testScmStatsFromNodeReport()
final long remaining = capacity - used;
List dnList = new ArrayList<>(nodeCount);
try (SCMNodeManager nodeManager = createNodeManager(conf)) {
- LayoutVersionManager versionManager =
- nodeManager.getLayoutVersionManager();
- LayoutVersionProto layoutInfo = toLayoutVersionProto(
- versionManager.getMetadataLayoutVersion(),
- versionManager.getSoftwareLayoutVersion());
EventQueue eventQueue = (EventQueue) scm.getEventQueue();
for (int x = 0; x < nodeCount; x++) {
@@ -1558,7 +1500,7 @@ public void testScmStatsFromNodeReport()
.createStorageReport(dnId, storagePath, capacity, used, free, null);
nodeManager.register(dn, HddsTestUtils.createNodeReport(
Arrays.asList(report), emptyList()), null);
- nodeManager.processHeartbeat(dn, layoutInfo);
+ nodeManager.processHeartbeat(dn);
}
//TODO: wait for EventQueue to be processed
eventQueue.processAll(8000L);
@@ -1572,6 +1514,49 @@ public void testScmStatsFromNodeReport()
}
}
+ private List generateStorageReportProto(
+ int volumeCount, UUID dnId, long capacity, long used, long remaining) {
+ List reports = new ArrayList<>(volumeCount);
+ boolean failed = true;
+ for (int x = 0; x < volumeCount; x++) {
+ String storagePath = testDir.getAbsolutePath() + "/" + dnId;
+ reports.add(HddsTestUtils
+ .createStorageReport(dnId, storagePath, capacity,
+ used, remaining, null, failed));
+ failed = !failed;
+ }
+ return reports;
+ }
+
+ private static Stream calculateStoragePercentageScenarios() {
+ return Stream.of(
+ Arguments.of(600, 65, 500, 1, "600.0B", "10.83", "5.83"),
+ Arguments.of(10000, 1000, 8800, 12, "117.2KB", "10.00", "2.00"),
+ Arguments.of(100000000, 1000, 899999, 12, "1.1GB", "0.00", "99.10"),
+ Arguments.of(10000, 1000, 0, 0, "0.0B", "N/A", "N/A"),
+ Arguments.of(0, 0, 0, 0, "0.0B", "N/A", "N/A"),
+ Arguments.of(1010, 547, 400, 5, "4.9KB", "54.16", "6.24")
+ );
+ }
+
+ @ParameterizedTest
+ @MethodSource("calculateStoragePercentageScenarios")
+ public void testCalculateStoragePercentage(long perCapacity,
+ long used, long remaining, int volumeCount, String totalCapacity,
+ String scmUsedPerc, String nonScmUsedPerc) {
+ DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
+ UUID dnId = dn.getUuid();
+ List reports = volumeCount > 0 ?
+ generateStorageReportProto(volumeCount, dnId, perCapacity,
+ used, remaining) : null;
+ String capacityResult = SCMNodeManager.calculateStorageCapacity(reports);
+ assertEquals(totalCapacity, capacityResult);
+ String[] storagePercentage = SCMNodeManager.calculateStoragePercentage(
+ reports);
+ assertEquals(scmUsedPerc, storagePercentage[0]);
+ assertEquals(nonScmUsedPerc, storagePercentage[1]);
+ }
+
/**
* Test multiple nodes sending initial heartbeat with their node report
* with multiple volumes.
@@ -1607,12 +1592,7 @@ public void tesVolumeInfoFromNodeReport()
}
nodeManager.register(dn, HddsTestUtils.createNodeReport(reports,
emptyList()), null);
- LayoutVersionManager versionManager =
- nodeManager.getLayoutVersionManager();
- LayoutVersionProto layoutInfo = toLayoutVersionProto(
- versionManager.getMetadataLayoutVersion(),
- versionManager.getSoftwareLayoutVersion());
- nodeManager.processHeartbeat(dn, layoutInfo);
+ nodeManager.processHeartbeat(dn);
//TODO: wait for EventQueue to be processed
eventQueue.processAll(8000L);
@@ -1665,12 +1645,7 @@ public void testScmNodeReportUpdate()
nodeReportHandler.onMessage(
new NodeReportFromDatanode(datanodeDetails, nodeReportProto),
publisher);
- LayoutVersionManager versionManager =
- nodeManager.getLayoutVersionManager();
- LayoutVersionProto layoutInfo = toLayoutVersionProto(
- versionManager.getMetadataLayoutVersion(),
- versionManager.getSoftwareLayoutVersion());
- nodeManager.processHeartbeat(datanodeDetails, layoutInfo);
+ nodeManager.processHeartbeat(datanodeDetails);
Thread.sleep(100);
}
@@ -1745,13 +1720,7 @@ public void testScmNodeReportUpdate()
foundRemaining = nodeManager.getStats().getRemaining().get();
assertEquals(0, foundRemaining);
- LayoutVersionManager versionManager =
- nodeManager.getLayoutVersionManager();
- LayoutVersionProto layoutInfo = toLayoutVersionProto(
- versionManager.getMetadataLayoutVersion(),
- versionManager.getSoftwareLayoutVersion());
-
- nodeManager.processHeartbeat(datanodeDetails, layoutInfo);
+ nodeManager.processHeartbeat(datanodeDetails);
// Wait up to 5 seconds so that the dead node becomes healthy
// Verify usage info should be updated.
@@ -1800,14 +1769,9 @@ public void testHandlingSCMCommandEvent()
new CloseContainerCommand(1L,
PipelineID.randomId())));
- LayoutVersionManager versionManager =
- nodemanager.getLayoutVersionManager();
- LayoutVersionProto layoutInfo = toLayoutVersionProto(
- versionManager.getMetadataLayoutVersion(),
- versionManager.getSoftwareLayoutVersion());
eq.processAll(1000L);
List command =
- nodemanager.processHeartbeat(datanodeDetails, layoutInfo);
+ nodemanager.processHeartbeat(datanodeDetails);
// With dh registered, SCM will send create pipeline command to dn
assertThat(command.size()).isGreaterThanOrEqualTo(1);
assertTrue(command.get(0).getClass().equals(
@@ -1937,16 +1901,11 @@ public void testGetNodeInfo()
Arrays.asList(report), emptyList()),
HddsTestUtils.getRandomPipelineReports());
- LayoutVersionManager versionManager =
- nodeManager.getLayoutVersionManager();
- LayoutVersionProto layoutInfo = toLayoutVersionProto(
- versionManager.getMetadataLayoutVersion(),
- versionManager.getSoftwareLayoutVersion());
nodeManager.register(datanodeDetails,
HddsTestUtils.createNodeReport(Arrays.asList(report),
emptyList()),
- HddsTestUtils.getRandomPipelineReports(), layoutInfo);
- nodeManager.processHeartbeat(datanodeDetails, layoutInfo);
+ HddsTestUtils.getRandomPipelineReports());
+ nodeManager.processHeartbeat(datanodeDetails);
if (i == 5) {
nodeManager.setNodeOperationalState(datanodeDetails,
HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeMetrics.java
similarity index 87%
rename from hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
rename to hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeMetrics.java
index 79ca6013165..20c6aa2de37 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeMetrics.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.hadoop.ozone.scm.node;
+package org.apache.hadoop.hdds.scm.node;
import java.io.File;
import java.io.IOException;
@@ -26,21 +26,16 @@
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.scm.HddsTestUtils;
import org.apache.hadoop.hdds.scm.ha.SCMContext;
import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
-import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
-import org.apache.hadoop.hdds.scm.node.SCMNodeMetrics;
import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
import org.apache.hadoop.hdds.server.events.EventQueue;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.ozone.upgrade.LayoutVersionManager;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
@@ -100,16 +95,8 @@ public static void teardown() throws IOException {
@Test
public void testHBProcessing() throws InterruptedException {
long hbProcessed = getCounter("NumHBProcessed");
-
createNodeReport();
-
- LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager();
- LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
- .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
- .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
- .build();
- nodeManager.processHeartbeat(registeredDatanode, layoutInfo);
-
+ nodeManager.processHeartbeat(registeredDatanode);
assertEquals(hbProcessed + 1, getCounter("NumHBProcessed"),
"NumHBProcessed");
}
@@ -119,17 +106,8 @@ public void testHBProcessing() throws InterruptedException {
*/
@Test
public void testHBProcessingFailure() {
-
long hbProcessedFailed = getCounter("NumHBProcessingFailed");
-
- LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager();
- LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
- .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
- .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
- .build();
- nodeManager.processHeartbeat(MockDatanodeDetails
- .randomDatanodeDetails(), layoutInfo);
-
+ nodeManager.processHeartbeat(MockDatanodeDetails.randomDatanodeDetails());
assertEquals(hbProcessedFailed + 1, getCounter("NumHBProcessingFailed"),
"NumHBProcessingFailed");
}
@@ -254,13 +232,7 @@ public void testNodeCountAndInfoMetricsReported() throws Exception {
getMetrics(SCMNodeMetrics.class.getSimpleName()));
assertGauge("TotalUsed", 10L,
getMetrics(SCMNodeMetrics.class.getSimpleName()));
-
- LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager();
- LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder()
- .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
- .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
- .build();
- nodeManager.processHeartbeat(registeredDatanode, layoutInfo);
+ nodeManager.processHeartbeat(registeredDatanode);
sleep(4000);
metricsSource = getMetrics(SCMNodeMetrics.SOURCE_NAME);
assertGauge("InServiceHealthyReadonlyNodes", 0, metricsSource);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
index 6dd5f674d4d..147aa719841 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
@@ -28,10 +28,11 @@
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
-import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
+import java.io.File;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
@@ -53,24 +54,25 @@
*/
public class TestSCMNodeStorageStatMap {
private static final int DATANODE_COUNT = 100;
- private final long capacity = 10L * OzoneConsts.GB;
- private final long used = 2L * OzoneConsts.GB;
- private final long remaining = capacity - used;
+ private static final long CAPACITY = 10L * OzoneConsts.GB;
+ private static final long USED = 2L * OzoneConsts.GB;
+ private static final long REMAINING = CAPACITY - USED;
private static OzoneConfiguration conf = new OzoneConfiguration();
private final Map> testData =
new ConcurrentHashMap<>();
+ @TempDir
+ private File tempFile;
private void generateData() {
for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) {
UUID dnId = UUID.randomUUID();
Set reportSet = new HashSet<>();
- String path = GenericTestUtils.getTempPath(
- TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + dnIndex);
+ String path = tempFile.getPath() + "-" + dnIndex;
StorageLocationReport.Builder builder =
StorageLocationReport.newBuilder();
builder.setStorageType(StorageType.DISK).setId(dnId.toString())
- .setStorageLocation(path).setScmUsed(used).setRemaining(remaining)
- .setCapacity(capacity).setFailed(false);
+ .setStorageLocation(path).setScmUsed(USED).setRemaining(REMAINING)
+ .setCapacity(CAPACITY).setFailed(false);
reportSet.add(builder.build());
testData.put(UUID.randomUUID(), reportSet);
}
@@ -114,13 +116,12 @@ public void testInsertNewDatanode() throws SCMException {
public void testUpdateUnknownDatanode() {
SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
UUID unknownNode = UUID.randomUUID();
- String path = GenericTestUtils.getTempPath(
- TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + unknownNode);
+ String path = tempFile.getPath() + "-" + unknownNode;
Set reportSet = new HashSet<>();
StorageLocationReport.Builder builder = StorageLocationReport.newBuilder();
builder.setStorageType(StorageType.DISK).setId(unknownNode.toString())
- .setStorageLocation(path).setScmUsed(used).setRemaining(remaining)
- .setCapacity(capacity).setFailed(false);
+ .setStorageLocation(path).setScmUsed(USED).setRemaining(REMAINING)
+ .setCapacity(CAPACITY).setFailed(false);
reportSet.add(builder.build());
Throwable t = assertThrows(SCMException.class,
() -> map.updateDatanodeMap(unknownNode, reportSet));
@@ -136,8 +137,7 @@ public void testProcessNodeReportCheckOneNode() throws IOException {
map.insertNewDatanode(key, reportSet);
assertTrue(map.isKnownDatanode(key));
UUID storageId = UUID.randomUUID();
- String path =
- GenericTestUtils.getRandomizedTempPath().concat("/" + storageId);
+ String path = tempFile.getPath().concat("/" + storageId);
StorageLocationReport report = reportSet.iterator().next();
long reportCapacity = report.getCapacity();
long reportScmUsed = report.getScmUsed();
@@ -184,22 +184,20 @@ public void testProcessMultipleNodeReports() throws SCMException {
.entrySet()) {
map.insertNewDatanode(keyEntry.getKey(), keyEntry.getValue());
}
- assertEquals(DATANODE_COUNT * capacity, map.getTotalCapacity());
- assertEquals(DATANODE_COUNT * remaining, map.getTotalFreeSpace());
- assertEquals(DATANODE_COUNT * used, map.getTotalSpaceUsed());
+ assertEquals(DATANODE_COUNT * CAPACITY, map.getTotalCapacity());
+ assertEquals(DATANODE_COUNT * REMAINING, map.getTotalFreeSpace());
+ assertEquals(DATANODE_COUNT * USED, map.getTotalSpaceUsed());
// update 1/4th of the datanode to be full
for (Map.Entry> keyEntry : testData
.entrySet()) {
Set reportSet = new HashSet<>();
- String path = GenericTestUtils.getTempPath(
- TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + keyEntry
- .getKey().toString());
+ String path = tempFile.getPath() + "-" + keyEntry.getKey().toString();
StorageLocationReport.Builder builder =
StorageLocationReport.newBuilder();
builder.setStorageType(StorageType.DISK)
.setId(keyEntry.getKey().toString()).setStorageLocation(path)
- .setScmUsed(capacity).setRemaining(0).setCapacity(capacity)
+ .setScmUsed(CAPACITY).setRemaining(0).setCapacity(CAPACITY)
.setFailed(false);
reportSet.add(builder.build());
@@ -216,9 +214,9 @@ public void testProcessMultipleNodeReports() throws SCMException {
assertEquals(0.75 * DATANODE_COUNT,
map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.NORMAL).size(), 0);
- assertEquals(DATANODE_COUNT * capacity, map.getTotalCapacity(), 0);
- assertEquals(0.75 * DATANODE_COUNT * remaining, map.getTotalFreeSpace(), 0);
- assertEquals(0.75 * DATANODE_COUNT * used + (0.25 * DATANODE_COUNT * capacity),
+ assertEquals(DATANODE_COUNT * CAPACITY, map.getTotalCapacity(), 0);
+ assertEquals(0.75 * DATANODE_COUNT * REMAINING, map.getTotalFreeSpace(), 0);
+ assertEquals(0.75 * DATANODE_COUNT * USED + (0.25 * DATANODE_COUNT * CAPACITY),
map.getTotalSpaceUsed(), 0);
counter = 1;
// Remove 1/4 of the DataNodes from the Map
@@ -236,9 +234,9 @@ public void testProcessMultipleNodeReports() throws SCMException {
assertEquals(0.75 * DATANODE_COUNT,
map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.NORMAL).size(), 0);
- assertEquals(0.75 * DATANODE_COUNT * capacity, map.getTotalCapacity(), 0);
- assertEquals(0.75 * DATANODE_COUNT * remaining, map.getTotalFreeSpace(), 0);
- assertEquals(0.75 * DATANODE_COUNT * used, map.getTotalSpaceUsed(), 0);
+ assertEquals(0.75 * DATANODE_COUNT * CAPACITY, map.getTotalCapacity(), 0);
+ assertEquals(0.75 * DATANODE_COUNT * REMAINING, map.getTotalFreeSpace(), 0);
+ assertEquals(0.75 * DATANODE_COUNT * USED, map.getTotalSpaceUsed(), 0);
}
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java
index 8aff3dd28aa..0ef28f658d4 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java
@@ -22,8 +22,6 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.protocol.proto
@@ -40,7 +38,6 @@
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.hdds.server.events.EventQueue;
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.hadoop.ozone.upgrade.LayoutVersionManager;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -122,19 +119,13 @@ public void testStatisticsUpdate() throws Exception {
//TODO: Support logic to mark a node as dead in NodeManager.
- LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager();
- StorageContainerDatanodeProtocolProtos.LayoutVersionProto layoutInfo =
- StorageContainerDatanodeProtocolProtos.LayoutVersionProto.newBuilder()
- .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
- .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
- .build();
- nodeManager.processHeartbeat(datanode2, layoutInfo);
+ nodeManager.processHeartbeat(datanode2);
Thread.sleep(1000);
- nodeManager.processHeartbeat(datanode2, layoutInfo);
+ nodeManager.processHeartbeat(datanode2);
Thread.sleep(1000);
- nodeManager.processHeartbeat(datanode2, layoutInfo);
+ nodeManager.processHeartbeat(datanode2);
Thread.sleep(1000);
- nodeManager.processHeartbeat(datanode2, layoutInfo);
+ nodeManager.processHeartbeat(datanode2);
//THEN statistics in SCM should changed.
stat = nodeManager.getStats();
assertEquals(200L, stat.getCapacity().get());
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestECPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestECPipelineProvider.java
index 2eba81d505a..f2ed769496b 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestECPipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestECPipelineProvider.java
@@ -52,7 +52,7 @@
import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.ALLOCATED;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.mockito.Mockito.anyInt;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java
index 61ba3d3bb8d..385e1c65316 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.hdds.scm.pipeline;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -35,9 +33,9 @@
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.ozone.ClientVersion;
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.CsvSource;
import org.slf4j.Logger;
@@ -46,7 +44,6 @@
import java.io.File;
import java.io.IOException;
import java.util.List;
-import java.util.UUID;
import static org.junit.jupiter.api.Assertions.fail;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
@@ -61,17 +58,14 @@ public class TestPipelineDatanodesIntersection {
private OzoneConfiguration conf;
private boolean end;
+ @TempDir
private File testDir;
private DBStore dbStore;
@BeforeEach
public void initialize() throws IOException {
- conf = SCMTestUtils.getConf();
+ conf = SCMTestUtils.getConf(testDir);
end = false;
- testDir = GenericTestUtils.getTestDir(
- TestPipelineDatanodesIntersection.class.getSimpleName()
- + UUID.randomUUID());
- conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
dbStore = DBStoreBuilder.createDBStore(
conf, new SCMDBDefinition());
}
@@ -81,8 +75,6 @@ public void cleanup() throws Exception {
if (dbStore != null) {
dbStore.close();
}
-
- FileUtil.fullyDelete(testDir);
}
@ParameterizedTest
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
index 270ae0ef493..e9407d6a941 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hdds.scm.pipeline;
-import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
@@ -100,11 +99,11 @@
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.fail;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.ArgumentMatchers.anyList;
+import static org.mockito.Mockito.anyLong;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.anyList;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.doAnswer;
@@ -128,11 +127,11 @@ public class TestPipelineManagerImpl {
private TestClock testClock;
@BeforeEach
- void init(@TempDir File testDir) throws Exception {
+ void init(@TempDir File testDir, @TempDir File dbDir) throws Exception {
testClock = new TestClock(Instant.now(), ZoneOffset.UTC);
- conf = SCMTestUtils.getConf();
- scm = HddsTestUtils.getScm(conf);
- conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
+ conf = SCMTestUtils.getConf(dbDir);
+ scm = HddsTestUtils.getScm(SCMTestUtils.getConf(testDir));
+
// Mock Node Manager is not able to correctly set up things for the EC
// placement policy (Rack Scatter), so just use the random one.
conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_EC_IMPL_KEY,
@@ -216,7 +215,7 @@ public void testCreatePipeline() throws Exception {
PipelineManagerImpl pipelineManager2 =
createPipelineManager(true, buffer2);
// Should be able to load previous pipelines.
- assertFalse(pipelineManager2.getPipelines().isEmpty());
+ assertThat(pipelineManager2.getPipelines()).isNotEmpty();
assertEquals(3, pipelineManager.getPipelines().size());
Pipeline pipeline3 = pipelineManager2.createPipeline(
RatisReplicationConfig.getInstance(ReplicationFactor.THREE));
@@ -261,10 +260,10 @@ public void testUpdatePipelineStates() throws Exception {
assertEquals(Pipeline.PipelineState.DORMANT, pipelineManager.getPipeline(pipelineID).getPipelineState());
buffer.flush();
assertEquals(Pipeline.PipelineState.DORMANT, pipelineStore.get(pipeline.getId()).getPipelineState());
- assertFalse(pipelineManager
+ assertThat(pipelineManager
.getPipelines(RatisReplicationConfig
.getInstance(ReplicationFactor.THREE),
- Pipeline.PipelineState.OPEN).contains(pipeline));
+ Pipeline.PipelineState.OPEN)).doesNotContain(pipeline);
assertEquals(1, pipelineManager.getPipelineCount(
RatisReplicationConfig.getInstance(ReplicationFactor.THREE),
Pipeline.PipelineState.DORMANT));
@@ -332,28 +331,16 @@ public void testRemovePipeline() throws Exception {
.getPipelines(RatisReplicationConfig
.getInstance(ReplicationFactor.THREE),
Pipeline.PipelineState.OPEN).contains(pipeline));
-
- try {
- pipelineManager.removePipeline(pipeline);
- fail();
- } catch (IOException ioe) {
- // Should not be able to remove the OPEN pipeline.
- assertEquals(1, pipelineManager.getPipelines().size());
- } catch (Exception e) {
- fail("Should not reach here.");
- }
+ assertThrows(IOException.class, () -> pipelineManager.removePipeline(pipeline));
+ // Should not be able to remove the OPEN pipeline.
+ assertEquals(1, pipelineManager.getPipelines().size());
// Destroy pipeline
pipelineManager.closePipeline(pipeline.getId());
pipelineManager.deletePipeline(pipeline.getId());
- try {
- pipelineManager.getPipeline(pipeline.getId());
- fail("Pipeline should not have been retrieved");
- } catch (PipelineNotFoundException e) {
- // There may be pipelines created by BackgroundPipelineCreator
- // exist in pipelineManager, just ignore them.
- }
+ assertThrows(PipelineNotFoundException.class, () -> pipelineManager.getPipeline(pipeline.getId()),
+ "Pipeline should not have been retrieved");
}
}
@@ -443,17 +430,11 @@ public void testPipelineCreationFailedMetric() throws Exception {
assertEquals(0, numPipelineCreateFailed);
//This should fail...
- try {
- pipelineManager
- .createPipeline(RatisReplicationConfig
- .getInstance(ReplicationFactor.THREE));
- fail();
- } catch (SCMException ioe) {
- // pipeline creation failed this time.
- assertEquals(
- ResultCodes.FAILED_TO_FIND_SUITABLE_NODE,
- ioe.getResult());
- }
+ SCMException e =
+ assertThrows(SCMException.class,
+ () -> pipelineManager.createPipeline(RatisReplicationConfig.getInstance(ReplicationFactor.THREE)));
+ // pipeline creation failed this time.
+ assertEquals(ResultCodes.FAILED_TO_FIND_SUITABLE_NODE, e.getResult());
metrics = getMetrics(
SCMPipelineMetrics.class.getSimpleName());
@@ -573,16 +554,16 @@ public void testScrubPipelines() throws Exception {
pipelineManager.scrubPipelines();
// The allocatedPipeline should now be scrubbed as the interval has passed
- assertFalse(pipelineManager
+ assertThat(pipelineManager
.getPipelines(RatisReplicationConfig
.getInstance(ReplicationFactor.THREE),
- Pipeline.PipelineState.ALLOCATED).contains(allocatedPipeline));
+ Pipeline.PipelineState.ALLOCATED)).doesNotContain(allocatedPipeline);
// The closedPipeline should now be scrubbed as the interval has passed
- assertFalse(pipelineManager
+ assertThat(pipelineManager
.getPipelines(RatisReplicationConfig
.getInstance(ReplicationFactor.THREE),
- Pipeline.PipelineState.CLOSED).contains(closedPipeline));
+ Pipeline.PipelineState.CLOSED)).doesNotContain(closedPipeline);
pipelineManager.close();
}
@@ -636,15 +617,11 @@ public void testPipelineNotCreatedUntilSafeModePrecheck() throws Exception {
new SCMSafeModeManager.SafeModeStatus(true, false));
PipelineManagerImpl pipelineManager = createPipelineManager(true);
- try {
- pipelineManager
- .createPipeline(RatisReplicationConfig
- .getInstance(ReplicationFactor.THREE));
- fail("Pipelines should not have been created");
- } catch (IOException e) {
- // No pipeline is created.
- assertTrue(pipelineManager.getPipelines().isEmpty());
- }
+ assertThrows(IOException.class,
+ () -> pipelineManager.createPipeline(RatisReplicationConfig.getInstance(ReplicationFactor.THREE)),
+ "Pipelines should not have been created");
+ // No pipeline is created.
+ assertTrue(pipelineManager.getPipelines().isEmpty());
// Ensure a pipeline of factor ONE can be created - no exceptions should be
// raised.
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
index 2f0b0a5cc76..0f9ec84f033 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
@@ -27,8 +27,6 @@
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -58,10 +56,10 @@
import org.apache.hadoop.ozone.ClientVersion;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import java.io.IOException;
@@ -83,7 +81,6 @@
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotSame;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* Test for PipelinePlacementPolicy.
@@ -96,6 +93,7 @@ public class TestPipelinePlacementPolicy {
private NetworkTopologyImpl cluster;
private static final int PIPELINE_PLACEMENT_MAX_NODES_COUNT = 10;
private static final int PIPELINE_LOAD_LIMIT = 5;
+ @TempDir
private File testDir;
private DBStore dbStore;
private SCMHAManager scmhaManager;
@@ -109,14 +107,11 @@ public void init() throws Exception {
// start with nodes with rack awareness.
nodeManager = new MockNodeManager(cluster, getNodesWithRackAwareness(),
false, PIPELINE_PLACEMENT_MAX_NODES_COUNT);
- conf = SCMTestUtils.getConf();
+ conf = SCMTestUtils.getConf(testDir);
conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, PIPELINE_LOAD_LIMIT);
conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN,
10, StorageUnit.MB);
nodeManager.setNumPipelinePerDatanode(PIPELINE_LOAD_LIMIT);
- testDir = GenericTestUtils.getTestDir(
- TestPipelinePlacementPolicy.class.getSimpleName() + UUID.randomUUID());
- conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
dbStore = DBStoreBuilder.createDBStore(
conf, new SCMDBDefinition());
scmhaManager = SCMHAManagerStub.getInstance(true);
@@ -135,8 +130,6 @@ public void cleanup() throws Exception {
if (dbStore != null) {
dbStore.close();
}
-
- FileUtil.fullyDelete(testDir);
}
private NetworkTopologyImpl initTopology() {
@@ -251,25 +244,19 @@ public void testChooseNodeNotEnoughSpace() throws IOException {
String expectedMessageSubstring = "Unable to find enough nodes that meet " +
"the space requirement";
- try {
- // A huge container size
- localPlacementPolicy.chooseDatanodes(new ArrayList<>(datanodes.size()),
- new ArrayList<>(datanodes.size()), nodesRequired,
- 0, 10 * OzoneConsts.TB);
- fail("SCMException should have been thrown.");
- } catch (SCMException ex) {
- assertThat(ex.getMessage()).contains(expectedMessageSubstring);
- }
- try {
- // a huge free space min configured
- localPlacementPolicy.chooseDatanodes(new ArrayList<>(datanodes.size()),
- new ArrayList<>(datanodes.size()), nodesRequired, 10 * OzoneConsts.TB,
- 0);
- fail("SCMException should have been thrown.");
- } catch (SCMException ex) {
- assertThat(ex.getMessage()).contains(expectedMessageSubstring);
- }
+ // A huge container size
+ SCMException ex =
+ assertThrows(SCMException.class,
+ () -> localPlacementPolicy.chooseDatanodes(new ArrayList<>(datanodes.size()),
+ new ArrayList<>(datanodes.size()), nodesRequired, 0, 10 * OzoneConsts.TB));
+ assertThat(ex.getMessage()).contains(expectedMessageSubstring);
+
+ // a huge free space min configured
+ ex = assertThrows(SCMException.class,
+ () -> localPlacementPolicy.chooseDatanodes(new ArrayList<>(datanodes.size()),
+ new ArrayList<>(datanodes.size()), nodesRequired, 10 * OzoneConsts.TB, 0));
+ assertThat(ex.getMessage()).contains(expectedMessageSubstring);
}
@Test
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java
index 3874a88941d..9feb9e1f0a9 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.hdds.scm.pipeline;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -39,10 +37,10 @@
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.ozone.ClientVersion;
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.IOException;
@@ -50,13 +48,11 @@
import java.util.HashSet;
import java.util.List;
import java.util.Set;
-import java.util.UUID;
import java.util.concurrent.TimeoutException;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.assertj.core.api.Assertions.assertThat;
/**
@@ -65,15 +61,13 @@
public class TestPipelineStateManagerImpl {
private PipelineStateManager stateManager;
+ @TempDir
private File testDir;
private DBStore dbStore;
@BeforeEach
public void init() throws Exception {
- final OzoneConfiguration conf = SCMTestUtils.getConf();
- testDir = GenericTestUtils.getTestDir(
- TestPipelineStateManagerImpl.class.getSimpleName() + UUID.randomUUID());
- conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
+ final OzoneConfiguration conf = SCMTestUtils.getConf(testDir);
dbStore = DBStoreBuilder.createDBStore(
conf, new SCMDBDefinition());
@@ -93,8 +87,6 @@ public void cleanup() throws Exception {
if (dbStore != null) {
dbStore.close();
}
-
- FileUtil.fullyDelete(testDir);
}
private Pipeline createDummyPipeline(int numNodes) {
@@ -323,14 +315,13 @@ public void testAddAndGetContainer() throws IOException, TimeoutException {
finalizePipeline(pipelineProto);
removePipeline(pipelineProto);
- try {
- stateManager.addContainerToPipeline(pipeline.getId(),
- ContainerID.valueOf(++containerID));
- fail("Container should not have been added");
- } catch (IOException e) {
- // Can not add a container to removed pipeline
- assertThat(e.getMessage()).contains("not found");
- }
+ Pipeline finalPipeline = pipeline;
+ ContainerID cid = ContainerID.valueOf(++containerID);
+ IOException e =
+ assertThrows(IOException.class,
+ () -> stateManager.addContainerToPipeline(finalPipeline.getId(), cid));
+ // Can not add a container to removed pipeline
+ assertThat(e.getMessage()).contains("not found");
}
@Test
@@ -344,13 +335,9 @@ public void testRemovePipeline() throws IOException, TimeoutException {
stateManager
.addContainerToPipeline(pipeline.getId(), ContainerID.valueOf(1));
- try {
- removePipeline(pipelineProto);
- fail("Pipeline should not have been removed");
- } catch (IOException e) {
- // can not remove a pipeline which already has containers
- assertThat(e.getMessage()).contains("not yet closed");
- }
+ IOException e = assertThrows(IOException.class, () -> removePipeline(pipelineProto));
+ // can not remove a pipeline which already has containers
+ assertThat(e.getMessage()).contains("not yet closed");
// close the pipeline
finalizePipeline(pipelineProto);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
index 977cf137fd8..5350c0da86e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hdds.scm.pipeline;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
@@ -41,10 +40,10 @@
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.ozone.ClientVersion;
-import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Assumptions;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.IOException;
@@ -67,10 +66,9 @@
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.fail;
/**
- * Test for RatisPipelineProvider.
+ * Test for {@link RatisPipelineProvider}.
*/
public class TestRatisPipelineProvider {
@@ -80,6 +78,7 @@ public class TestRatisPipelineProvider {
private MockNodeManager nodeManager;
private RatisPipelineProvider provider;
private PipelineStateManager stateManager;
+ @TempDir
private File testDir;
private DBStore dbStore;
@@ -89,9 +88,11 @@ public void init(int maxPipelinePerNode) throws Exception {
public void init(int maxPipelinePerNode, OzoneConfiguration conf)
throws Exception {
- testDir = GenericTestUtils.getTestDir(
- TestRatisPipelineProvider.class.getSimpleName() + UUID.randomUUID());
- conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
+ init(maxPipelinePerNode, conf, testDir);
+ }
+
+ public void init(int maxPipelinePerNode, OzoneConfiguration conf, File dir) throws Exception {
+ conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.getAbsolutePath());
dbStore = DBStoreBuilder.createDBStore(
conf, new SCMDBDefinition());
nodeManager = new MockNodeManager(true, 10);
@@ -114,8 +115,6 @@ void cleanup() throws Exception {
if (dbStore != null) {
dbStore.close();
}
-
- FileUtil.fullyDelete(testDir);
}
private static void assertPipelineProperties(
@@ -332,7 +331,7 @@ public void testFactorTHREEPipelineRackScatterEngagement()
}
@Test
- public void testCreatePipelinesWhenNotEnoughSpace() throws Exception {
+ public void testCreatePipelinesWhenNotEnoughSpace(@TempDir File tempDir) throws Exception {
String expectedErrorSubstring = "Unable to find enough" +
" nodes that meet the space requirement";
@@ -345,29 +344,23 @@ public void testCreatePipelinesWhenNotEnoughSpace() throws Exception {
if (factor == ReplicationFactor.ZERO) {
continue;
}
- try {
- provider.create(RatisReplicationConfig.getInstance(factor));
- fail("Expected SCMException for large container size with " +
- "replication factor " + factor.toString());
- } catch (SCMException ex) {
- assertThat(ex.getMessage()).contains(expectedErrorSubstring);
- }
+ SCMException ex =
+ assertThrows(SCMException.class, () -> provider.create(RatisReplicationConfig.getInstance(factor)),
+ "Expected SCMException for large container size with replication factor " + factor.toString());
+ assertThat(ex.getMessage()).contains(expectedErrorSubstring);
}
OzoneConfiguration largeMetadataConf = new OzoneConfiguration();
largeMetadataConf.set(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, "300TB");
- init(1, largeMetadataConf);
+ init(1, largeMetadataConf, tempDir);
for (ReplicationFactor factor: ReplicationFactor.values()) {
if (factor == ReplicationFactor.ZERO) {
continue;
}
- try {
- provider.create(RatisReplicationConfig.getInstance(factor));
- fail("Expected SCMException for large metadata size with " +
- "replication factor " + factor.toString());
- } catch (SCMException ex) {
- assertThat(ex.getMessage()).contains(expectedErrorSubstring);
- }
+ SCMException ex =
+ assertThrows(SCMException.class, () -> provider.create(RatisReplicationConfig.getInstance(factor)),
+ "Expected SCMException for large metadata size with replication factor " + factor.toString());
+ assertThat(ex.getMessage()).contains(expectedErrorSubstring);
}
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java
index bbb714debb8..b69ebedb04d 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.hdds.scm.pipeline;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -34,16 +32,15 @@
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.ozone.ClientVersion;
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
-import java.util.UUID;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -55,16 +52,14 @@ public class TestSimplePipelineProvider {
private NodeManager nodeManager;
private PipelineProvider provider;
private PipelineStateManager stateManager;
+ @TempDir
private File testDir;
private DBStore dbStore;
@BeforeEach
public void init() throws Exception {
nodeManager = new MockNodeManager(true, 10);
- final OzoneConfiguration conf = SCMTestUtils.getConf();
- testDir = GenericTestUtils.getTestDir(
- TestSimplePipelineProvider.class.getSimpleName() + UUID.randomUUID());
- conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
+ final OzoneConfiguration conf = SCMTestUtils.getConf(testDir);
dbStore = DBStoreBuilder.createDBStore(
conf, new SCMDBDefinition());
SCMHAManager scmhaManager = SCMHAManagerStub.getInstance(true);
@@ -82,8 +77,6 @@ public void cleanup() throws Exception {
if (dbStore != null) {
dbStore.close();
}
-
- FileUtil.fullyDelete(testDir);
}
@Test
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java
index 54d2ffed828..4f86450d03e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java
@@ -34,7 +34,11 @@
import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub;
import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition;
+import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
+import org.apache.hadoop.hdds.scm.net.NodeSchema;
+import org.apache.hadoop.hdds.scm.net.NodeSchemaManager;
import org.apache.hadoop.hdds.scm.pipeline.WritableECContainerProvider.WritableECContainerProviderConfig;
+import org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.CapacityPipelineChoosePolicy;
import org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.HealthyPipelineChoosePolicy;
import org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.RandomPipelineChoosePolicy;
import org.apache.hadoop.hdds.utils.db.DBStore;
@@ -54,8 +58,13 @@
import java.util.Map;
import java.util.NavigableSet;
import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
import static org.apache.hadoop.hdds.conf.StorageUnit.BYTES;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA;
import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.CLOSED;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -84,7 +93,7 @@ public class TestWritableECContainerProvider {
private OzoneConfiguration conf;
private DBStore dbStore;
private SCMHAManager scmhaManager;
- private MockNodeManager nodeManager;
+ private static MockNodeManager nodeManager;
private WritableContainerProvider provider;
private ECReplicationConfig repConfig;
@@ -93,8 +102,20 @@ public class TestWritableECContainerProvider {
public static Collection policies() {
Collection policies = new ArrayList<>();
+ // init nodeManager
+ NodeSchemaManager.getInstance().init(new NodeSchema[]
+ {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA}, true);
+ NetworkTopologyImpl cluster =
+ new NetworkTopologyImpl(NodeSchemaManager.getInstance());
+ int count = 10;
+ List datanodes = IntStream.range(0, count)
+ .mapToObj(i -> MockDatanodeDetails.randomDatanodeDetails())
+ .collect(Collectors.toList());
+ nodeManager = new MockNodeManager(cluster, datanodes, false, count);
+
policies.add(new RandomPipelineChoosePolicy());
policies.add(new HealthyPipelineChoosePolicy());
+ policies.add(new CapacityPipelineChoosePolicy().init(nodeManager));
return policies;
}
@@ -110,7 +131,6 @@ void setup(@TempDir File testDir) throws IOException {
dbStore = DBStoreBuilder.createDBStore(
conf, new SCMDBDefinition());
scmhaManager = SCMHAManagerStub.getInstance(true);
- nodeManager = new MockNodeManager(true, 10);
pipelineManager =
new MockPipelineManager(dbStore, scmhaManager, nodeManager);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestCapacityPipelineChoosePolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestCapacityPipelineChoosePolicy.java
new file mode 100644
index 00000000000..421d2396bfa
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestCapacityPipelineChoosePolicy.java
@@ -0,0 +1,107 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.pipeline.choose.algorithms;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
+import org.apache.hadoop.hdds.scm.PipelineChoosePolicy;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.pipeline.MockPipeline;
+import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.junit.jupiter.api.Test;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test for the capacity pipeline choose policy.
+ */
+public class TestCapacityPipelineChoosePolicy {
+
+ @Test
+ public void testChoosePipeline() throws Exception {
+
+ // given 4 datanode
+ List datanodes = new ArrayList<>();
+ for (int i = 0; i < 4; i++) {
+ datanodes.add(MockDatanodeDetails.randomDatanodeDetails());
+ }
+ // dn0 dn1 dn2 dn3
+ // used 0 10 20 30
+ NodeManager mockNodeManager = mock(NodeManager.class);
+ when(mockNodeManager.getNodeStat(datanodes.get(0)))
+ .thenReturn(new SCMNodeMetric(100L, 0, 100L, 0, 0));
+ when(mockNodeManager.getNodeStat(datanodes.get(1)))
+ .thenReturn(new SCMNodeMetric(100L, 10L, 90L, 0, 0));
+ when(mockNodeManager.getNodeStat(datanodes.get(2)))
+ .thenReturn(new SCMNodeMetric(100L, 20L, 80L, 0, 0));
+ when(mockNodeManager.getNodeStat(datanodes.get(3)))
+ .thenReturn(new SCMNodeMetric(100L, 30L, 70L, 0, 0));
+
+ PipelineChoosePolicy policy = new CapacityPipelineChoosePolicy().init(mockNodeManager);
+
+ // generate 4 pipelines, and every pipeline has 3 datanodes
+ //
+ // pipeline0 dn1 dn2 dn3
+ // pipeline1 dn0 dn2 dn3
+ // pipeline2 dn0 dn1 dn3
+ // pipeline3 dn0 dn1 dn2
+ //
+ // In the above scenario, pipeline0 vs pipeline1 runs through three rounds
+ // of comparisons, (dn3 <-> dn3) -> (dn2 <-> dn2 ) -> (dn1 <-> dn0),
+ // finally comparing dn0 and dn1, and dn0 wins, so pipeline1 is selected.
+ //
+ List pipelines = new ArrayList<>();
+ for (int i = 0; i < 4; i++) {
+ List dns = new ArrayList<>();
+ for (int j = 0; j < datanodes.size(); j++) {
+ if (i != j) {
+ dns.add(datanodes.get(j));
+ }
+ }
+ Pipeline pipeline = MockPipeline.createPipeline(dns);
+ MockRatisPipelineProvider.markPipelineHealthy(pipeline);
+ pipelines.add(pipeline);
+ }
+
+ Map selectedCount = new HashMap<>();
+ for (Pipeline pipeline : pipelines) {
+ selectedCount.put(pipeline, 0);
+ }
+ for (int i = 0; i < 1000; i++) {
+ // choosePipeline
+ Pipeline pipeline = policy.choosePipeline(pipelines, null);
+ assertNotNull(pipeline);
+ selectedCount.put(pipeline, selectedCount.get(pipeline) + 1);
+ }
+
+ // The selected count from most to least should be :
+ // pipeline3 > pipeline2 > pipeline1 > pipeline0
+ assertThat(selectedCount.get(pipelines.get(3))).isGreaterThan(selectedCount.get(pipelines.get(2)));
+ assertThat(selectedCount.get(pipelines.get(2))).isGreaterThan(selectedCount.get(pipelines.get(1)));
+ assertThat(selectedCount.get(pipelines.get(1))).isGreaterThan(selectedCount.get(pipelines.get(0)));
+ }
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestPipelineChoosePolicyFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestPipelineChoosePolicyFactory.java
index 7d0a72ed2fb..82fed5953aa 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestPipelineChoosePolicyFactory.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestPipelineChoosePolicyFactory.java
@@ -21,7 +21,9 @@
import org.apache.hadoop.hdds.scm.PipelineChoosePolicy;
import org.apache.hadoop.hdds.scm.PipelineRequestInformation;
import org.apache.hadoop.hdds.scm.ScmConfig;
+import org.apache.hadoop.hdds.scm.container.MockNodeManager;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -42,17 +44,20 @@ public class TestPipelineChoosePolicyFactory {
private ScmConfig scmConfig;
+ private NodeManager nodeManager;
+
@BeforeEach
public void setup() {
//initialize network topology instance
conf = new OzoneConfiguration();
scmConfig = conf.getObject(ScmConfig.class);
+ nodeManager = new MockNodeManager(true, 5);
}
@Test
public void testDefaultPolicy() throws IOException {
PipelineChoosePolicy policy = PipelineChoosePolicyFactory
- .getPolicy(scmConfig, false);
+ .getPolicy(nodeManager, scmConfig, false);
assertSame(OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT,
policy.getClass());
}
@@ -60,7 +65,7 @@ public void testDefaultPolicy() throws IOException {
@Test
public void testDefaultPolicyEC() throws IOException {
PipelineChoosePolicy policy = PipelineChoosePolicyFactory
- .getPolicy(scmConfig, true);
+ .getPolicy(nodeManager, scmConfig, true);
assertSame(OZONE_SCM_EC_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT,
policy.getClass());
}
@@ -69,7 +74,7 @@ public void testDefaultPolicyEC() throws IOException {
public void testNonDefaultPolicyEC() throws IOException {
scmConfig.setECPipelineChoosePolicyName(DummyGoodImpl.class.getName());
PipelineChoosePolicy policy = PipelineChoosePolicyFactory
- .getPolicy(scmConfig, true);
+ .getPolicy(nodeManager, scmConfig, true);
assertSame(DummyGoodImpl.class, policy.getClass());
}
@@ -121,10 +126,10 @@ public void testConstructorNotFound() throws SCMException {
scmConfig.setPipelineChoosePolicyName(DummyImpl.class.getName());
scmConfig.setECPipelineChoosePolicyName(DummyImpl.class.getName());
PipelineChoosePolicy policy =
- PipelineChoosePolicyFactory.getPolicy(scmConfig, false);
+ PipelineChoosePolicyFactory.getPolicy(nodeManager, scmConfig, false);
assertSame(OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT,
policy.getClass());
- policy = PipelineChoosePolicyFactory.getPolicy(scmConfig, true);
+ policy = PipelineChoosePolicyFactory.getPolicy(nodeManager, scmConfig, true);
assertSame(OZONE_SCM_EC_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT,
policy.getClass());
}
@@ -137,10 +142,10 @@ public void testClassNotImplemented() throws SCMException {
scmConfig.setECPipelineChoosePolicyName(
"org.apache.hadoop.hdds.scm.pipeline.choose.policy.HelloWorld");
PipelineChoosePolicy policy =
- PipelineChoosePolicyFactory.getPolicy(scmConfig, false);
+ PipelineChoosePolicyFactory.getPolicy(nodeManager, scmConfig, false);
assertSame(OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT,
policy.getClass());
- policy = PipelineChoosePolicyFactory.getPolicy(scmConfig, true);
+ policy = PipelineChoosePolicyFactory.getPolicy(nodeManager, scmConfig, true);
assertSame(OZONE_SCM_EC_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT,
policy.getClass());
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java
index 31cd2db1e55..98f16394902 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java
@@ -23,9 +23,7 @@
import java.time.ZoneOffset;
import java.util.ArrayList;
import java.util.List;
-import java.util.UUID;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -47,6 +45,7 @@
import org.apache.hadoop.hdds.server.events.EventQueue;
import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import org.slf4j.LoggerFactory;
import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -56,6 +55,8 @@
* This class tests HealthyPipelineSafeMode rule.
*/
public class TestHealthyPipelineSafeModeRule {
+ @TempDir
+ private File tempFile;
@Test
public void testHealthyPipelineSafeModeRuleWithNoPipelines()
@@ -66,12 +67,9 @@ public void testHealthyPipelineSafeModeRuleWithNoPipelines()
List containers =
new ArrayList<>(HddsTestUtils.getContainerInfo(1));
- String storageDir = GenericTestUtils.getTempPath(
- TestHealthyPipelineSafeModeRule.class.getName() +
- UUID.randomUUID());
OzoneConfiguration config = new OzoneConfiguration();
MockNodeManager nodeManager = new MockNodeManager(true, 0);
- config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
+ config.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempFile.getPath());
// enable pipeline check
config.setBoolean(
HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true);
@@ -106,14 +104,11 @@ public void testHealthyPipelineSafeModeRuleWithNoPipelines()
assertTrue(healthyPipelineSafeModeRule.validate());
} finally {
scmMetadataStore.getStore().close();
- FileUtil.fullyDelete(new File(storageDir));
}
}
@Test
public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception {
- String storageDir = GenericTestUtils.getTempPath(
- TestHealthyPipelineSafeModeRule.class.getName() + UUID.randomUUID());
EventQueue eventQueue = new EventQueue();
SCMServiceManager serviceManager = new SCMServiceManager();
@@ -126,7 +121,7 @@ public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception {
// stale and last one is dead, and this repeats. So for a 12 node, 9
// healthy, 2 stale and one dead.
MockNodeManager nodeManager = new MockNodeManager(true, 12);
- config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
+ config.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempFile.getPath());
// enable pipeline check
config.setBoolean(
HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true);
@@ -201,7 +196,6 @@ public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception {
1000, 5000);
} finally {
scmMetadataStore.getStore().close();
- FileUtil.fullyDelete(new File(storageDir));
}
}
@@ -209,10 +203,6 @@ public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception {
@Test
public void testHealthyPipelineSafeModeRuleWithMixedPipelines()
throws Exception {
-
- String storageDir = GenericTestUtils.getTempPath(
- TestHealthyPipelineSafeModeRule.class.getName() + UUID.randomUUID());
-
EventQueue eventQueue = new EventQueue();
SCMServiceManager serviceManager = new SCMServiceManager();
SCMContext scmContext = SCMContext.emptyContext();
@@ -225,7 +215,7 @@ public void testHealthyPipelineSafeModeRuleWithMixedPipelines()
// stale and last one is dead, and this repeats. So for a 12 node, 9
// healthy, 2 stale and one dead.
MockNodeManager nodeManager = new MockNodeManager(true, 12);
- config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
+ config.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempFile.getPath());
// enable pipeline check
config.setBoolean(
HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true);
@@ -308,7 +298,6 @@ public void testHealthyPipelineSafeModeRuleWithMixedPipelines()
} finally {
scmMetadataStore.getStore().close();
- FileUtil.fullyDelete(new File(storageDir));
}
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
index 79adf009f00..319caabe40a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
@@ -19,17 +19,17 @@
import java.io.File;
import java.io.IOException;
-import java.nio.file.Path;
import java.time.Clock;
import java.time.ZoneOffset;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
-import java.util.UUID;
+import java.util.Map;
+import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.hadoop.fs.FileUtil;
+import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -85,9 +85,11 @@ public class TestSCMSafeModeManager {
private List containers = Collections.emptyList();
private SCMMetadataStore scmMetadataStore;
+ @TempDir
+ private File tempDir;
@BeforeEach
- public void setUp(@TempDir Path tempDir) throws IOException {
+ public void setUp() throws IOException {
queue = new EventQueue();
scmContext = SCMContext.emptyContext();
serviceManager = new SCMServiceManager();
@@ -95,7 +97,7 @@ public void setUp(@TempDir Path tempDir) throws IOException {
config.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION,
false);
config.set(HddsConfigKeys.OZONE_METADATA_DIRS,
- tempDir.toAbsolutePath().toString());
+ tempDir.getAbsolutePath().toString());
scmMetadataStore = new SCMMetadataStoreImpl(config);
}
@@ -135,6 +137,7 @@ private void testSafeMode(int numContainers) throws Exception {
serviceManager, scmContext);
assertTrue(scmSafeModeManager.getInSafeMode());
+ validateRuleStatus("DatanodeSafeModeRule", "registered datanodes 0");
queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT,
HddsTestUtils.createNodeRegistrationContainerReport(containers));
@@ -176,7 +179,8 @@ public void testSafeModeExitRule() throws Exception {
.getNumContainerWithOneReplicaReportedThreshold().value());
assertTrue(scmSafeModeManager.getInSafeMode());
-
+ validateRuleStatus("ContainerSafeModeRule",
+ "% of containers with at least one reported");
testContainerThreshold(containers.subList(0, 25), 0.25);
assertEquals(25, scmSafeModeManager.getSafeModeMetrics()
.getCurrentContainersWithOneReplicaReportedCount().value());
@@ -316,6 +320,13 @@ public void testSafeModeExitRuleWithPipelineAvailabilityCheck(
scmContext);
assertTrue(scmSafeModeManager.getInSafeMode());
+ if (healthyPipelinePercent > 0) {
+ validateRuleStatus("HealthyPipelineSafeModeRule",
+ "healthy Ratis/THREE pipelines");
+ }
+ validateRuleStatus("OneReplicaPipelineSafeModeRule",
+ "reported Ratis/THREE pipelines with at least one datanode");
+
testContainerThreshold(containers, 1.0);
List pipelines = pipelineManager.getPipelines();
@@ -374,6 +385,22 @@ public void testSafeModeExitRuleWithPipelineAvailabilityCheck(
100, 1000 * 5);
}
+ /**
+ * @param safeModeRule verify that this rule is not satisfied
+ * @param stringToMatch string to match in the rule status.
+ */
+ private void validateRuleStatus(String safeModeRule, String stringToMatch) {
+ Set>> ruleStatuses =
+ scmSafeModeManager.getRuleStatus().entrySet();
+ for (Map.Entry> entry : ruleStatuses) {
+ if (entry.getKey().equals(safeModeRule)) {
+ Pair value = entry.getValue();
+ assertEquals(false, value.getLeft());
+ assertThat(value.getRight()).contains(stringToMatch);
+ }
+ }
+ }
+
private void checkHealthy(int expectedCount) throws Exception {
GenericTestUtils.waitFor(() -> scmSafeModeManager
.getHealthyPipelineSafeModeRule()
@@ -528,11 +555,8 @@ private void testContainerThreshold(List dnContainers,
public void testSafeModePipelineExitRule() throws Exception {
containers = new ArrayList<>();
containers.addAll(HddsTestUtils.getContainerInfo(25 * 4));
- String storageDir = GenericTestUtils.getTempPath(
- TestSCMSafeModeManager.class.getName() + UUID.randomUUID());
try {
MockNodeManager nodeManager = new MockNodeManager(true, 3);
- config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
// enable pipeline check
config.setBoolean(
HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true);
@@ -578,13 +602,11 @@ public void testSafeModePipelineExitRule() throws Exception {
config.setBoolean(
HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK,
false);
- FileUtil.fullyDelete(new File(storageDir));
}
}
@Test
- public void testPipelinesNotCreatedUntilPreCheckPasses()
- throws Exception {
+ public void testPipelinesNotCreatedUntilPreCheckPasses() throws Exception {
int numOfDns = 5;
// enable pipeline check
config.setBoolean(
@@ -594,12 +616,6 @@ public void testPipelinesNotCreatedUntilPreCheckPasses()
true);
MockNodeManager nodeManager = new MockNodeManager(true, numOfDns);
- String storageDir = GenericTestUtils.getTempPath(
- TestSCMSafeModeManager.class.getName() + UUID.randomUUID());
- config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
- // enable pipeline check
- config.setBoolean(
- HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true);
PipelineManagerImpl pipelineManager =
PipelineManagerImpl.newPipelineManager(
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/security/TestRootCARotationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/security/TestRootCARotationManager.java
index 7089f68ec71..b82ce15a384 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/security/TestRootCARotationManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/security/TestRootCARotationManager.java
@@ -17,7 +17,6 @@
package org.apache.hadoop.hdds.scm.security;
import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.ha.SCMContext;
@@ -40,6 +39,7 @@
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.IOException;
@@ -56,7 +56,7 @@
import java.util.concurrent.TimeoutException;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_CA_ROTATION_ACK_TIMEOUT;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_CA_ROTATION_CHECK_INTERNAL;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_CA_ROTATION_ENABLED;
@@ -66,7 +66,6 @@
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_RENEW_GRACE_DURATION;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_ROOTCA_CERTIFICATE_POLLING_INTERVAL;
import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
@@ -94,6 +93,7 @@ public class TestRootCARotationManager {
private SCMSecurityProtocolServer scmSecurityProtocolServer;
private RootCARotationHandlerImpl handler;
private StatefulServiceStateManager statefulServiceStateManager;
+ @TempDir
private File testDir;
private String cID = UUID.randomUUID().toString();
private String scmID = UUID.randomUUID().toString();
@@ -103,8 +103,6 @@ public class TestRootCARotationManager {
public void init() throws IOException, TimeoutException,
CertificateException {
ozoneConfig = new OzoneConfiguration();
- testDir = GenericTestUtils.getTestDir(
- TestRootCARotationManager.class.getSimpleName() + UUID.randomUUID());
ozoneConfig
.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
ozoneConfig
@@ -146,60 +144,37 @@ public void tearDown() throws Exception {
if (rootCARotationManager != null) {
rootCARotationManager.stop();
}
-
- FileUtil.fullyDelete(testDir);
}
@Test
- public void testProperties() {
+ void testProperties() throws Exception {
// invalid check interval
ozoneConfig.set(HDDS_X509_CA_ROTATION_CHECK_INTERNAL, "P28");
- try {
- rootCARotationManager = new RootCARotationManager(scm);
- fail("Should fail");
- } catch (Exception e) {
- assertInstanceOf(DateTimeParseException.class, e);
- }
+ assertThrows(DateTimeParseException.class, () -> rootCARotationManager = new RootCARotationManager(scm));
// check interval should be less than grace period
ozoneConfig.set(HDDS_X509_CA_ROTATION_CHECK_INTERNAL, "P28D");
- try {
- rootCARotationManager = new RootCARotationManager(scm);
- fail("Should fail");
- } catch (Exception e) {
- assertInstanceOf(IllegalArgumentException.class, e);
- assertThat(e.getMessage()).contains("should be smaller than");
- }
+ IllegalArgumentException ex =
+ assertThrows(IllegalArgumentException.class, () -> rootCARotationManager = new RootCARotationManager(scm));
+ assertThat(ex.getMessage()).contains("should be smaller than");
// invalid time of day format
ozoneConfig.set(HDDS_X509_CA_ROTATION_CHECK_INTERNAL, "P1D");
ozoneConfig.set(HDDS_X509_CA_ROTATION_TIME_OF_DAY, "01:00");
- try {
- rootCARotationManager = new RootCARotationManager(scm);
- fail("Should fail");
- } catch (Exception e) {
- assertInstanceOf(IllegalArgumentException.class, e);
- assertThat(e.getMessage()).contains("should follow the hh:mm:ss format");
- }
+ ex = assertThrows(IllegalArgumentException.class, () -> rootCARotationManager = new RootCARotationManager(scm));
+ assertThat(ex.getMessage()).contains("should follow the hh:mm:ss format");
// valid properties
ozoneConfig.set(HDDS_X509_CA_ROTATION_CHECK_INTERNAL, "P1D");
ozoneConfig.set(HDDS_X509_CA_ROTATION_TIME_OF_DAY, "01:00:00");
- try {
- rootCARotationManager = new RootCARotationManager(scm);
- } catch (Exception e) {
- fail("Should succeed");
- }
+ rootCARotationManager = new RootCARotationManager(scm);
// invalid property value is ignored when auto rotation is disabled.
ozoneConfig.setBoolean(HDDS_X509_CA_ROTATION_ENABLED, false);
ozoneConfig.set(HDDS_X509_CA_ROTATION_CHECK_INTERNAL, "P28D");
- try {
- rootCARotationManager = new RootCARotationManager(scm);
- } catch (Exception e) {
- fail("Should succeed");
- }
+
+ rootCARotationManager = new RootCARotationManager(scm);
}
@Test
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java
index 79be275788a..7c06b79a2ff 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java
@@ -31,6 +31,9 @@
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
+
+import java.io.File;
import java.io.IOException;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_READONLY_ADMINISTRATORS;
@@ -50,8 +53,8 @@ public class TestSCMClientProtocolServer {
private StorageContainerLocationProtocolServerSideTranslatorPB service;
@BeforeEach
- void setUp() throws Exception {
- config = SCMTestUtils.getConf();
+ void setUp(@TempDir File testDir) throws Exception {
+ config = SCMTestUtils.getConf(testDir);
SCMConfigurator configurator = new SCMConfigurator();
configurator.setSCMHAManager(SCMHAManagerStub.getInstance(true));
configurator.setScmContext(SCMContext.emptyContext());
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index 46cd784c47e..58f65df8fd8 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -100,6 +100,9 @@ public class TestEndPoint {
private static DatanodeLayoutStorage layoutStorage;
private static DatanodeDetails dnDetails;
+ @TempDir
+ private File tempDir;
+
@AfterAll
public static void tearDown() throws Exception {
if (scmServer != null) {
@@ -110,7 +113,7 @@ public static void tearDown() throws Exception {
@BeforeAll
static void setUp() throws Exception {
serverAddress = SCMTestUtils.getReuseableAddress();
- ozoneConf = SCMTestUtils.getConf();
+ ozoneConf = SCMTestUtils.getConf(testDir);
scmServerImpl = new ScmTestMock();
dnDetails = randomDatanodeDetails();
layoutStorage = new DatanodeLayoutStorage(ozoneConf,
@@ -128,7 +131,7 @@ static void setUp() throws Exception {
@Test
public void testGetVersion() throws Exception {
try (EndpointStateMachine rpcEndPoint =
- createEndpoint(SCMTestUtils.getConf(),
+ createEndpoint(SCMTestUtils.getConf(tempDir),
serverAddress, 1000)) {
SCMVersionResponseProto responseProto = rpcEndPoint.getEndPoint()
.getVersion(null);
@@ -316,7 +319,7 @@ public void testDnLayoutVersionFile() throws Exception {
*/
@Test
public void testGetVersionToInvalidEndpoint() throws Exception {
- OzoneConfiguration conf = SCMTestUtils.getConf();
+ OzoneConfiguration conf = SCMTestUtils.getConf(tempDir);
InetSocketAddress nonExistentServerAddress = SCMTestUtils
.getReuseableAddress();
try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
@@ -344,7 +347,7 @@ public void testGetVersionToInvalidEndpoint() throws Exception {
public void testGetVersionAssertRpcTimeOut() throws Exception {
final long rpcTimeout = 1000;
final long tolerance = 100;
- OzoneConfiguration conf = SCMTestUtils.getConf();
+ OzoneConfiguration conf = SCMTestUtils.getConf(tempDir);
try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
serverAddress, (int) rpcTimeout)) {
@@ -369,7 +372,7 @@ public void testGetVersionAssertRpcTimeOut() throws Exception {
public void testRegister() throws Exception {
DatanodeDetails nodeToRegister = randomDatanodeDetails();
try (EndpointStateMachine rpcEndPoint = createEndpoint(
- SCMTestUtils.getConf(), serverAddress, 1000)) {
+ SCMTestUtils.getConf(tempDir), serverAddress, 1000)) {
SCMRegisteredResponseProto responseProto = rpcEndPoint.getEndPoint()
.register(nodeToRegister.getExtendedProtoBufMessage(), HddsTestUtils
.createNodeReport(
@@ -403,7 +406,7 @@ private MetadataStorageReportProto getMetadataStorageReports(UUID id) {
private EndpointStateMachine registerTaskHelper(InetSocketAddress scmAddress,
int rpcTimeout, boolean clearDatanodeDetails
) throws Exception {
- OzoneConfiguration conf = SCMTestUtils.getConf();
+ OzoneConfiguration conf = SCMTestUtils.getConf(tempDir);
EndpointStateMachine rpcEndPoint =
createEndpoint(conf,
scmAddress, rpcTimeout);
@@ -481,7 +484,7 @@ public void testRegisterRpcTimeout() throws Exception {
public void testHeartbeat() throws Exception {
DatanodeDetails dataNode = randomDatanodeDetails();
try (EndpointStateMachine rpcEndPoint =
- createEndpoint(SCMTestUtils.getConf(),
+ createEndpoint(SCMTestUtils.getConf(tempDir),
serverAddress, 1000)) {
SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder()
.setDatanodeDetails(dataNode.getProtoBufMessage())
@@ -501,7 +504,7 @@ public void testHeartbeat() throws Exception {
public void testHeartbeatWithCommandStatusReport() throws Exception {
DatanodeDetails dataNode = randomDatanodeDetails();
try (EndpointStateMachine rpcEndPoint =
- createEndpoint(SCMTestUtils.getConf(),
+ createEndpoint(SCMTestUtils.getConf(tempDir),
serverAddress, 1000)) {
// Add some scmCommands for heartbeat response
addScmCommands();
@@ -572,7 +575,7 @@ private StateContext heartbeatTaskHelper(
InetSocketAddress scmAddress,
int rpcTimeout
) throws Exception {
- OzoneConfiguration conf = SCMTestUtils.getConf();
+ OzoneConfiguration conf = SCMTestUtils.getConf(tempDir);
// Mini Ozone cluster will not come up if the port is not true, since
// Ratis will exit if the server port cannot be bound. We can remove this
// hard coding once we fix the Ratis default behaviour.
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
index 0a865043356..92a6fd455d8 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
@@ -371,13 +371,11 @@ public RegisteredCommand register(DatanodeDetails dd,
* Send heartbeat to indicate the datanode is alive and doing well.
*
* @param dd - Datanode Details.
- * @param layoutInfo - Layout Version Proto
* @param commandQueueReportProto - Command Queue Report Proto
* @return SCMheartbeat response list
*/
@Override
public List processHeartbeat(DatanodeDetails dd,
- LayoutVersionProto layoutInfo,
CommandQueueReportProto commandQueueReportProto) {
return null;
}
diff --git a/hadoop-hdds/test-utils/pom.xml b/hadoop-hdds/test-utils/pom.xml
index 4f78bd1f14d..ea3c94b65a6 100644
--- a/hadoop-hdds/test-utils/pom.xml
+++ b/hadoop-hdds/test-utils/pom.xml
@@ -49,10 +49,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
commons-logging
commons-logging
-
- junit
- junit
-
org.junit.jupiter
junit-jupiter-api
@@ -66,6 +62,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
ch.qos.reload4j
reload4j
+
+ jakarta.annotation
+ jakarta.annotation-api
+
org.apache.commons
commons-lang3
diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/DisableOnProperty.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/DisableOnProperty.java
deleted file mode 100644
index cddbbd18080..00000000000
--- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/DisableOnProperty.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ozone.test;
-
-import org.junit.rules.TestRule;
-import org.junit.runner.Description;
-import org.junit.runners.model.Statement;
-
-import java.util.Objects;
-
-/**
- * Disables the delegate rule if the given system property matches a specific
- * value.
- */
-public class DisableOnProperty implements TestRule {
-
- private final TestRule delegate;
- private final boolean enabled;
-
- public DisableOnProperty(TestRule delegate, String key, String value) {
- this.delegate = delegate;
- enabled = !Objects.equals(value, System.getProperty(key, ""));
- }
-
- @Override
- public Statement apply(Statement base, Description description) {
- return enabled ? delegate.apply(base, description) : base;
- }
-}
diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java
index 406a58768a8..c9fa668445d 100644
--- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java
+++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java
@@ -24,6 +24,9 @@
import java.io.PrintStream;
import java.io.StringWriter;
import java.io.UnsupportedEncodingException;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.time.Instant;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeoutException;
@@ -73,6 +76,18 @@ public abstract class GenericTestUtils {
"target" + File.separator + "test" + File.separator + "data";
}
+ /**
+ * Return current time in millis as an {@code Instant}. This may be
+ * before {@link Instant#now()}, since the latter includes nanoseconds, too.
+ * This is needed for some tests that verify volume/bucket creation time,
+ * which also uses {@link Instant#ofEpochMilli(long)}.
+ *
+ * @return current time as {@code Instant};
+ */
+ public static Instant getTestStartTime() {
+ return Instant.ofEpochMilli(System.currentTimeMillis());
+ }
+
/**
* Get the (created) base directory for tests.
*
@@ -203,17 +218,13 @@ public static void setLogLevel(org.slf4j.Logger logger,
setLogLevel(toLog4j(logger), Level.toLevel(level.toString()));
}
- public static void setRootLogLevel(org.slf4j.event.Level level) {
- setLogLevel(LogManager.getRootLogger(), Level.toLevel(level.toString()));
- }
-
public static T mockFieldReflection(Object object, String fieldName)
throws NoSuchFieldException, IllegalAccessException {
Field field = object.getClass().getDeclaredField(fieldName);
boolean isAccessible = field.isAccessible();
field.setAccessible(true);
- Field modifiersField = Field.class.getDeclaredField("modifiers");
+ Field modifiersField = ReflectionUtils.getModifiersField();
boolean modifierFieldAccessible = modifiersField.isAccessible();
modifiersField.setAccessible(true);
int modifierVal = modifiersField.getInt(field);
@@ -233,7 +244,7 @@ public static T getFieldReflection(Object object, String fieldName)
boolean isAccessible = field.isAccessible();
field.setAccessible(true);
- Field modifiersField = Field.class.getDeclaredField("modifiers");
+ Field modifiersField = ReflectionUtils.getModifiersField();
boolean modifierFieldAccessible = modifiersField.isAccessible();
modifiersField.setAccessible(true);
int modifierVal = modifiersField.getInt(field);
@@ -455,4 +466,45 @@ public static String anyHostWithFreePort() {
}
}
+ /**
+ * This class is a utility class for java reflection operations.
+ */
+ public static final class ReflectionUtils {
+
+ /**
+ * This method provides the modifiers field using reflection approach which is compatible
+ * for both pre Java 9 and post java 9 versions.
+ * @return modifiers field
+ * @throws IllegalAccessException
+ * @throws NoSuchFieldException
+ */
+ public static Field getModifiersField() throws IllegalAccessException, NoSuchFieldException {
+ Field modifiersField = null;
+ try {
+ modifiersField = Field.class.getDeclaredField("modifiers");
+ } catch (NoSuchFieldException e) {
+ try {
+ Method getDeclaredFields0 = Class.class.getDeclaredMethod(
+ "getDeclaredFields0", boolean.class);
+ boolean accessibleBeforeSet = getDeclaredFields0.isAccessible();
+ getDeclaredFields0.setAccessible(true);
+ Field[] fields = (Field[]) getDeclaredFields0.invoke(Field.class, false);
+ getDeclaredFields0.setAccessible(accessibleBeforeSet);
+ for (Field field : fields) {
+ if ("modifiers".equals(field.getName())) {
+ modifiersField = field;
+ break;
+ }
+ }
+ if (modifiersField == null) {
+ throw e;
+ }
+ } catch (InvocationTargetException | NoSuchMethodException ex) {
+ e.addSuppressed(ex);
+ throw e;
+ }
+ }
+ return modifiersField;
+ }
+ }
}
diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java
index 28d3b936eca..83cc8465169 100644
--- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java
+++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java
@@ -20,8 +20,8 @@
import static org.apache.hadoop.metrics2.lib.Interns.info;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.AdditionalMatchers.geq;
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.ArgumentMatchers.argThat;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.argThat;
import static org.mockito.Mockito.atLeast;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
@@ -47,6 +47,8 @@
/**
* Helpers for metrics source tests.
+ *
+ * Copied from Hadoop and migrated to AssertJ.
*/
public final class MetricsAsserts {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/Predicates.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/OzoneTestBase.java
similarity index 60%
rename from hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/Predicates.java
rename to hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/OzoneTestBase.java
index 58e79ef05e7..bb675bddafd 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/Predicates.java
+++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/OzoneTestBase.java
@@ -6,42 +6,39 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
- *
+ *
* http://www.apache.org/licenses/LICENSE-2.0
- *
+ *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdds.function;
+package org.apache.ozone.test;
-import java.util.function.BiPredicate;
-import java.util.function.Predicate;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.TestInfo;
+
+import java.lang.reflect.Method;
/**
- * Common predicates.
+ * Base class for Ozone JUnit tests.
+ * Provides test method name, which can be used to create unique items.
*/
-public final class Predicates {
+public abstract class OzoneTestBase {
- public static Predicate yes() {
- return x -> true;
- }
+ private TestInfo info;
- public static Predicate no() {
- return x -> false;
+ @BeforeEach
+ void storeTestInfo(TestInfo testInfo) {
+ this.info = testInfo;
}
- public static BiPredicate yesBi() {
- return (t, u) -> true;
+ protected String getTestName() {
+ return info.getTestMethod()
+ .map(Method::getName)
+ .orElse("unknown");
}
- public static BiPredicate noBi() {
- return (t, u) -> false;
- }
-
- private Predicates() {
- // no instances
- }
}
diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/TimedOutTestsListener.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/TimedOutTestsListener.java
index 390d69a083c..e27776c9e98 100644
--- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/TimedOutTestsListener.java
+++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/TimedOutTestsListener.java
@@ -35,7 +35,7 @@
import org.junit.platform.launcher.TestExecutionListener;
import org.junit.platform.launcher.TestIdentifier;
-import javax.annotation.Nullable;
+import jakarta.annotation.Nullable;
/**
* JUnit test execution listener which prints full thread dump to System.err
diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml
index 665b56d3ab0..5122f1d4a45 100644
--- a/hadoop-hdds/tools/pom.xml
+++ b/hadoop-hdds/tools/pom.xml
@@ -30,7 +30,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
jar
- false
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
index 7aa91cec73c..d07e696e7ef 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
@@ -59,6 +59,7 @@
import java.util.List;
import java.util.Map;
import java.util.Optional;
+import java.util.UUID;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_TOKEN_ENABLED;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_TOKEN_ENABLED_DEFAULT;
@@ -215,6 +216,11 @@ public ContainerWithPipeline createContainer(HddsProtos.ReplicationType type,
}
}
+ @Override
+ public Map> getContainersOnDecomNode(DatanodeDetails dn) throws IOException {
+ return storageContainerLocationClient.getContainersOnDecomNode(dn);
+ }
+
@Override
public List queryNode(
HddsProtos.NodeOperationalState opState,
@@ -225,6 +231,11 @@ public List queryNode(
queryScope, poolName, ClientVersion.CURRENT_VERSION);
}
+ @Override
+ public HddsProtos.Node queryNode(UUID uuid) throws IOException {
+ return storageContainerLocationClient.queryNode(uuid);
+ }
+
@Override
public List decommissionNodes(List hosts)
throws IOException {
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ReportSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ReportSubcommand.java
index 554316c2e92..7ef34236bf2 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ReportSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ReportSubcommand.java
@@ -52,6 +52,10 @@ public class ReportSubcommand extends ScmSubcommand {
@Override
public void execute(ScmClient scmClient) throws IOException {
ReplicationManagerReport report = scmClient.getReplicationManagerReport();
+ if (report.getReportTimeStamp() == 0) {
+ System.err.println("The Container Report is not available until Replication Manager completes" +
+ " its first run after startup or fail over. All values will be zero until that time.\n");
+ }
if (json) {
output(JsonUtils.toJsonStringWithDefaultPrettyPrinter(report));
@@ -68,9 +72,11 @@ public void execute(ScmClient scmClient) throws IOException {
}
private void outputHeader(long epochMs) {
+ if (epochMs == 0) {
+ epochMs = Instant.now().toEpochMilli();
+ }
Instant reportTime = Instant.ofEpochSecond(epochMs / 1000);
outputHeading("Container Summary Report generated at " + reportTime);
-
}
private void outputContainerStats(ReplicationManagerReport report) {
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java
index bbf1d840760..b53632f8eec 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java
@@ -23,10 +23,12 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.cli.ScmSubcommand;
import org.apache.hadoop.hdds.scm.client.ScmClient;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
import picocli.CommandLine;
import java.io.IOException;
import java.util.List;
+import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@@ -83,6 +85,8 @@ public void execute(ScmClient scmClient) throws IOException {
DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf(
node.getNodeID());
printDetails(datanode);
+ Map> containers = scmClient.getContainersOnDecomNode(datanode);
+ System.out.println(containers);
}
}
private void printDetails(DatanodeDetails datanode) {
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java
index 23ff9176df9..e7d3a444383 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java
@@ -27,6 +27,7 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
+import java.util.Scanner;
/**
* Decommission one or more datanodes.
@@ -41,12 +42,26 @@ public class DecommissionSubCommand extends ScmSubcommand {
@CommandLine.Spec
private CommandLine.Model.CommandSpec spec;
- @CommandLine.Parameters(description = "List of fully qualified host names")
- private List hosts = new ArrayList<>();
+ @CommandLine.Parameters(description = "One or more host names separated by spaces. " +
+ "To read from stdin, specify '-' and supply the host names " +
+ "separated by newlines.",
+ paramLabel = "")
+ private List parameters = new ArrayList<>();
@Override
public void execute(ScmClient scmClient) throws IOException {
- if (hosts.size() > 0) {
+ if (parameters.size() > 0) {
+ List hosts;
+ // Whether to read from stdin
+ if (parameters.get(0).equals("-")) {
+ hosts = new ArrayList<>();
+ Scanner scanner = new Scanner(System.in, "UTF-8");
+ while (scanner.hasNextLine()) {
+ hosts.add(scanner.nextLine().trim());
+ }
+ } else {
+ hosts = parameters;
+ }
List errors = scmClient.decommissionNodes(hosts);
System.out.println("Started decommissioning datanode(s):\n" +
String.join("\n", hosts));
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java
index db12ee2aacb..325e362d4f4 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java
@@ -29,6 +29,7 @@
import java.io.IOException;
import java.util.List;
+import java.util.UUID;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@@ -82,6 +83,15 @@ public class ListInfoSubcommand extends ScmSubcommand {
@Override
public void execute(ScmClient scmClient) throws IOException {
pipelines = scmClient.listPipelines();
+ if (!Strings.isNullOrEmpty(uuid)) {
+ HddsProtos.Node node = scmClient.queryNode(UUID.fromString(uuid));
+ DatanodeWithAttributes dwa = new DatanodeWithAttributes(DatanodeDetails
+ .getFromProtoBuf(node.getNodeID()),
+ node.getNodeOperationalStates(0),
+ node.getNodeStates(0));
+ printDatanodeInfo(dwa);
+ return;
+ }
Stream allNodes = getAllNodes(scmClient).stream();
if (!Strings.isNullOrEmpty(ipaddress)) {
allNodes = allNodes.filter(p -> p.getDatanodeDetails().getIpAddress()
@@ -91,10 +101,6 @@ public void execute(ScmClient scmClient) throws IOException {
allNodes = allNodes.filter(p -> p.getDatanodeDetails().getHostName()
.compareToIgnoreCase(hostname) == 0);
}
- if (!Strings.isNullOrEmpty(uuid)) {
- allNodes = allNodes.filter(p ->
- p.getDatanodeDetails().getUuidString().equals(uuid));
- }
if (!Strings.isNullOrEmpty(nodeOperationalState)) {
allNodes = allNodes.filter(p -> p.getOpState().toString()
.compareToIgnoreCase(nodeOperationalState) == 0);
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java
index a64c400f66f..82d263b416f 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java
@@ -27,6 +27,7 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
+import java.util.Scanner;
/**
* Place one or more datanodes into Maintenance Mode.
@@ -41,8 +42,11 @@ public class MaintenanceSubCommand extends ScmSubcommand {
@CommandLine.Spec
private CommandLine.Model.CommandSpec spec;
- @CommandLine.Parameters(description = "List of fully qualified host names")
- private List hosts = new ArrayList<>();
+ @CommandLine.Parameters(description = "One or more host names separated by spaces. " +
+ "To read from stdin, specify '-' and supply the host names " +
+ "separated by newlines.",
+ paramLabel = "")
+ private List parameters = new ArrayList<>();
@CommandLine.Option(names = {"--end"},
description = "Automatically end maintenance after the given hours. " +
@@ -51,7 +55,18 @@ public class MaintenanceSubCommand extends ScmSubcommand {
@Override
public void execute(ScmClient scmClient) throws IOException {
- if (hosts.size() > 0) {
+ if (parameters.size() > 0) {
+ List hosts;
+ // Whether to read from stdin
+ if (parameters.get(0).equals("-")) {
+ hosts = new ArrayList<>();
+ Scanner scanner = new Scanner(System.in, "UTF-8");
+ while (scanner.hasNextLine()) {
+ hosts.add(scanner.nextLine().trim());
+ }
+ } else {
+ hosts = parameters;
+ }
List errors =
scmClient.startMaintenanceNodes(hosts, endInHours);
System.out.println("Entering maintenance mode on datanode(s):\n" +
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java
index 61f7826cf64..e21d61ed3d7 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java
@@ -27,6 +27,7 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
+import java.util.Scanner;
/**
* Recommission one or more datanodes.
@@ -42,12 +43,26 @@ public class RecommissionSubCommand extends ScmSubcommand {
@CommandLine.Spec
private CommandLine.Model.CommandSpec spec;
- @CommandLine.Parameters(description = "List of fully qualified host names")
- private List hosts = new ArrayList<>();
+ @CommandLine.Parameters(description = "One or more host names separated by spaces. " +
+ "To read from stdin, specify '-' and supply the host names " +
+ "separated by newlines.",
+ paramLabel = "")
+ private List parameters = new ArrayList<>();
@Override
public void execute(ScmClient scmClient) throws IOException {
- if (hosts.size() > 0) {
+ if (parameters.size() > 0) {
+ List hosts;
+ // Whether to read from stdin
+ if (parameters.get(0).equals("-")) {
+ hosts = new ArrayList<>();
+ Scanner scanner = new Scanner(System.in, "UTF-8");
+ while (scanner.hasNextLine()) {
+ hosts.add(scanner.nextLine().trim());
+ }
+ } else {
+ hosts = parameters;
+ }
List errors = scmClient.recommissionNodes(hosts);
System.out.println("Started recommissioning datanode(s):\n" +
String.join("\n", hosts));
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java
index c0950e0143f..d8c1addb78e 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java
@@ -56,10 +56,10 @@
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.Mockito.anyLong;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.ArgumentMatchers.any;
/**
* Tests for InfoSubCommand class.
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestReportSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestReportSubCommand.java
index 58eeaee3d28..87d88617e78 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestReportSubCommand.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestReportSubCommand.java
@@ -74,18 +74,20 @@ public void testCorrectValuesAppearInEmptyReport() throws IOException {
cmd.execute(scmClient);
+ Pattern p = Pattern.compile("^The Container Report is not available until Replication Manager completes.*");
+ Matcher m = p.matcher(errContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+
for (HddsProtos.LifeCycleState state : HddsProtos.LifeCycleState.values()) {
- Pattern p = Pattern.compile(
- "^" + state.toString() + ": 0$", Pattern.MULTILINE);
- Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ p = Pattern.compile("^" + state.toString() + ": 0$", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
assertTrue(m.find());
}
for (ReplicationManagerReport.HealthState state :
ReplicationManagerReport.HealthState.values()) {
- Pattern p = Pattern.compile(
- "^" + state.toString() + ": 0$", Pattern.MULTILINE);
- Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ p = Pattern.compile("^" + state.toString() + ": 0$", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
assertTrue(m.find());
}
}
@@ -101,6 +103,10 @@ public void testValidJsonOutput() throws IOException {
c.parseArgs("--json");
cmd.execute(scmClient);
+ Pattern p = Pattern.compile("^The Container Report is not available until Replication Manager completes.*");
+ Matcher m = p.matcher(errContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+
ObjectMapper mapper = new ObjectMapper();
JsonNode json = mapper.readTree(outContent.toString("UTF-8"));
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java
index 33c01e4abd9..3be931c1321 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hdds.scm.cli.container.upgrade;
import com.google.common.collect.Lists;
-import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -45,10 +44,9 @@
import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition;
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
-import org.apache.ozone.test.GenericTestUtils;
-import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.IOException;
@@ -66,8 +64,8 @@
import static org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask.LOG;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.mockito.ArgumentMatchers.anyList;
-import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.Mockito.anyList;
+import static org.mockito.Mockito.anyLong;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -78,6 +76,7 @@ public class TestUpgradeManager {
private static final String SCM_ID = UUID.randomUUID().toString();
private static final OzoneConfiguration CONF = new OzoneConfiguration();
+ @TempDir
private File testRoot;
private MutableVolumeSet volumeSet;
private UUID datanodeId;
@@ -93,12 +92,6 @@ public void setup() throws Exception {
dc.setContainerSchemaV3Enabled(true);
CONF.setFromObject(dc);
- testRoot =
- GenericTestUtils.getTestDir(TestUpgradeManager.class.getSimpleName());
- if (testRoot.exists()) {
- FileUtils.cleanDirectory(testRoot);
- }
-
final File volume1Path = new File(testRoot, "volume1");
final File volume2Path = new File(testRoot, "volume2");
@@ -142,11 +135,6 @@ public void setup() throws Exception {
chunkManager = new FilePerBlockStrategy(true, blockManager, null);
}
- @AfterEach
- public void after() throws IOException {
- FileUtils.deleteDirectory(testRoot);
- }
-
@Test
public void testUpgrade() throws IOException {
int num = 2;
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java
index 902ee5e7a8d..41c31caf1f0 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java
@@ -17,8 +17,10 @@
*/
package org.apache.hadoop.hdds.scm.cli.datanode;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.client.ScmClient;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -29,7 +31,9 @@
import java.io.UnsupportedEncodingException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import java.util.UUID;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@@ -38,7 +42,7 @@
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -55,6 +59,7 @@ public class TestDecommissionStatusSubCommand {
private final PrintStream originalErr = System.err;
private DecommissionStatusSubCommand cmd;
private List nodes = getNodeDetails(2);
+ private Map> containerOnDecom = getContainersOnDecomNodes();
@BeforeEach
public void setup() throws UnsupportedEncodingException {
@@ -74,6 +79,7 @@ public void testSuccessWhenDecommissionStatus() throws IOException {
ScmClient scmClient = mock(ScmClient.class);
when(scmClient.queryNode(any(), any(), any(), any()))
.thenAnswer(invocation -> nodes); // 2 nodes decommissioning
+ when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom);
cmd.execute(scmClient);
Pattern p = Pattern.compile("Decommission\\sStatus:\\s" +
@@ -85,9 +91,15 @@ public void testSuccessWhenDecommissionStatus() throws IOException {
p = Pattern.compile("Datanode:\\s.*host0\\)");
m = p.matcher(outContent.toString(DEFAULT_ENCODING));
assertTrue(m.find());
+ p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
p = Pattern.compile("Datanode:\\s.*host1\\)");
m = p.matcher(outContent.toString(DEFAULT_ENCODING));
assertTrue(m.find());
+ p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
}
@Test
@@ -96,6 +108,7 @@ public void testNoNodesWhenDecommissionStatus() throws IOException {
// No nodes in decommissioning. No error is printed
when(scmClient.queryNode(any(), any(), any(), any()))
.thenReturn(new ArrayList<>());
+ when(scmClient.getContainersOnDecomNode(any())).thenReturn(new HashMap<>());
cmd.execute(scmClient);
Pattern p = Pattern.compile("Decommission\\sStatus:\\s" +
@@ -117,6 +130,7 @@ public void testIdOptionDecommissionStatusSuccess() throws IOException {
ScmClient scmClient = mock(ScmClient.class);
when(scmClient.queryNode(any(), any(), any(), any()))
.thenAnswer(invocation -> nodes); // 2 nodes decommissioning
+ when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom);
CommandLine c = new CommandLine(cmd);
c.parseArgs("--id", nodes.get(0).getNodeID().getUuid());
@@ -125,11 +139,17 @@ public void testIdOptionDecommissionStatusSuccess() throws IOException {
Pattern p = Pattern.compile("Datanode:\\s.*host0\\)", Pattern.MULTILINE);
Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING));
assertTrue(m.find());
+ p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
// as uuid of only host0 is passed, host1 should NOT be displayed
p = Pattern.compile("Datanode:\\s.*host1.\\)", Pattern.MULTILINE);
m = p.matcher(outContent.toString(DEFAULT_ENCODING));
assertFalse(m.find());
+ p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertFalse(m.find());
}
@Test
@@ -137,6 +157,10 @@ public void testIdOptionDecommissionStatusFail() throws IOException {
ScmClient scmClient = mock(ScmClient.class);
when(scmClient.queryNode(any(), any(), any(), any()))
.thenAnswer(invocation -> nodes.subList(0, 1)); // host0 decommissioning
+ when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(0).getNodeID())))
+ .thenReturn(containerOnDecom);
+ when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(1).getNodeID())))
+ .thenReturn(new HashMap<>());
CommandLine c = new CommandLine(cmd);
c.parseArgs("--id", nodes.get(1).getNodeID().getUuid());
@@ -161,6 +185,7 @@ public void testIpOptionDecommissionStatusSuccess() throws IOException {
ScmClient scmClient = mock(ScmClient.class);
when(scmClient.queryNode(any(), any(), any(), any()))
.thenAnswer(invocation -> nodes); // 2 nodes decommissioning
+ when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom);
CommandLine c = new CommandLine(cmd);
c.parseArgs("--ip", nodes.get(1).getNodeID().getIpAddress());
@@ -169,11 +194,17 @@ public void testIpOptionDecommissionStatusSuccess() throws IOException {
Pattern p = Pattern.compile("Datanode:\\s.*host1\\)", Pattern.MULTILINE);
Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING));
assertTrue(m.find());
+ p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
// as IpAddress of only host1 is passed, host0 should NOT be displayed
p = Pattern.compile("Datanode:\\s.*host0.\\)", Pattern.MULTILINE);
m = p.matcher(outContent.toString(DEFAULT_ENCODING));
assertFalse(m.find());
+ p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertFalse(m.find());
}
@Test
@@ -181,6 +212,10 @@ public void testIpOptionDecommissionStatusFail() throws IOException {
ScmClient scmClient = mock(ScmClient.class);
when(scmClient.queryNode(any(), any(), any(), any()))
.thenAnswer(invocation -> nodes.subList(0, 1)); // host0 decommissioning
+ when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(0).getNodeID())))
+ .thenReturn(containerOnDecom);
+ when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(1).getNodeID())))
+ .thenReturn(new HashMap<>());
CommandLine c = new CommandLine(cmd);
c.parseArgs("--ip", nodes.get(1).getNodeID().getIpAddress());
@@ -225,4 +260,19 @@ private List getNodeDetails(int n) {
return nodesList;
}
+ private Map> getContainersOnDecomNodes() {
+ Map> containerMap = new HashMap<>();
+ List underReplicated = new ArrayList<>();
+ underReplicated.add(new ContainerID(1L));
+ underReplicated.add(new ContainerID(2L));
+ underReplicated.add(new ContainerID(3L));
+ containerMap.put("UnderReplicated", underReplicated);
+ List unclosed = new ArrayList<>();
+ unclosed.add(new ContainerID(10L));
+ unclosed.add(new ContainerID(11L));
+ unclosed.add(new ContainerID(12L));
+ containerMap.put("UnClosed", unclosed);
+ return containerMap;
+ }
+
}
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java
index 7e5b857d179..e7e01ffaa1a 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java
@@ -23,6 +23,7 @@
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
@@ -34,8 +35,8 @@
import picocli.CommandLine;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.Mockito.anyList;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -47,6 +48,7 @@
public class TestDecommissionSubCommand {
private DecommissionSubCommand cmd;
+ private ScmClient scmClient;
private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
private final ByteArrayOutputStream errContent = new ByteArrayOutputStream();
private final PrintStream originalOut = System.out;
@@ -56,6 +58,7 @@ public class TestDecommissionSubCommand {
@BeforeEach
public void setup() throws UnsupportedEncodingException {
cmd = new DecommissionSubCommand();
+ scmClient = mock(ScmClient.class);
System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING));
System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING));
}
@@ -66,9 +69,37 @@ public void tearDown() {
System.setErr(originalErr);
}
+ @Test
+ public void testMultipleHostnamesCanBeReadFromStdin() throws Exception {
+ when(scmClient.decommissionNodes(anyList()))
+ .thenAnswer(invocation -> new ArrayList());
+
+ String input = "host1\nhost2\nhost3\n";
+ System.setIn(new ByteArrayInputStream(input.getBytes(DEFAULT_ENCODING)));
+ CommandLine c = new CommandLine(cmd);
+ c.parseArgs("-");
+ cmd.execute(scmClient);
+
+ Pattern p = Pattern.compile(
+ "^Started\\sdecommissioning\\sdatanode\\(s\\)", Pattern.MULTILINE);
+ Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+
+ p = Pattern.compile("^host1$", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+
+ p = Pattern.compile("^host2$", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+
+ p = Pattern.compile("^host3$", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+ }
+
@Test
public void testNoErrorsWhenDecommissioning() throws IOException {
- ScmClient scmClient = mock(ScmClient.class);
when(scmClient.decommissionNodes(anyList()))
.thenAnswer(invocation -> new ArrayList());
@@ -92,7 +123,6 @@ public void testNoErrorsWhenDecommissioning() throws IOException {
@Test
public void testErrorsReportedWhenDecommissioning() throws IOException {
- ScmClient scmClient = mock(ScmClient.class);
when(scmClient.decommissionNodes(anyList()))
.thenAnswer(invocation -> {
ArrayList e = new ArrayList<>();
@@ -102,12 +132,7 @@ public void testErrorsReportedWhenDecommissioning() throws IOException {
CommandLine c = new CommandLine(cmd);
c.parseArgs("host1", "host2");
- try {
- cmd.execute(scmClient);
- fail("Should not succeed without an exception");
- } catch (IOException e) {
- // Expected
- }
+ assertThrows(IOException.class, () -> cmd.execute(scmClient));
Pattern p = Pattern.compile(
"^Started\\sdecommissioning\\sdatanode\\(s\\)", Pattern.MULTILINE);
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java
index b6ae0a8ff4f..1247b783b5c 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java
@@ -32,6 +32,7 @@
import java.util.UUID;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import picocli.CommandLine;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.mock;
@@ -101,6 +102,32 @@ public void testDataNodeOperationalStateAndHealthIncludedInOutput()
assertTrue(m.find());
}
+ @Test
+ public void testDataNodeByUuidOutput()
+ throws Exception {
+ List nodes = getNodeDetails();
+
+ ScmClient scmClient = mock(ScmClient.class);
+ when(scmClient.queryNode(any()))
+ .thenAnswer(invocation -> nodes.get(0));
+ when(scmClient.listPipelines())
+ .thenReturn(new ArrayList<>());
+
+ CommandLine c = new CommandLine(cmd);
+ c.parseArgs("--id", nodes.get(0).getNodeID().getUuid());
+ cmd.execute(scmClient);
+
+ Pattern p = Pattern.compile(
+ "^Operational State:\\s+IN_SERVICE$", Pattern.MULTILINE);
+ Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+
+ p = Pattern.compile(nodes.get(0).getNodeID().getUuid().toString(),
+ Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+ }
+
private List getNodeDetails() {
List nodes = new ArrayList<>();
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java
index d3f7f026ddb..d2a4c54b8bf 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java
@@ -23,6 +23,7 @@
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
@@ -34,8 +35,8 @@
import picocli.CommandLine;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.Mockito.anyInt;
import static org.mockito.Mockito.anyList;
import static org.mockito.Mockito.mock;
@@ -48,6 +49,7 @@
public class TestMaintenanceSubCommand {
private MaintenanceSubCommand cmd;
+ private ScmClient scmClient;
private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
private final ByteArrayOutputStream errContent = new ByteArrayOutputStream();
private final PrintStream originalOut = System.out;
@@ -57,6 +59,7 @@ public class TestMaintenanceSubCommand {
@BeforeEach
public void setup() throws UnsupportedEncodingException {
cmd = new MaintenanceSubCommand();
+ scmClient = mock(ScmClient.class);
System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING));
System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING));
}
@@ -67,9 +70,37 @@ public void tearDown() {
System.setErr(originalErr);
}
+ @Test
+ public void testMultipleHostnamesCanBeReadFromStdin() throws Exception {
+ when(scmClient.decommissionNodes(anyList()))
+ .thenAnswer(invocation -> new ArrayList());
+
+ String input = "host1\nhost2\nhost3\n";
+ System.setIn(new ByteArrayInputStream(input.getBytes(DEFAULT_ENCODING)));
+ CommandLine c = new CommandLine(cmd);
+ c.parseArgs("-");
+ cmd.execute(scmClient);
+
+ Pattern p = Pattern.compile(
+ "^Entering\\smaintenance\\smode\\son\\sdatanode\\(s\\)", Pattern.MULTILINE);
+ Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+
+ p = Pattern.compile("^host1$", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+
+ p = Pattern.compile("^host2$", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+
+ p = Pattern.compile("^host3$", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+ }
+
@Test
public void testNoErrorsWhenEnteringMaintenance() throws IOException {
- ScmClient scmClient = mock(ScmClient.class);
when(scmClient.startMaintenanceNodes(anyList(), anyInt()))
.thenAnswer(invocation -> new ArrayList());
@@ -94,7 +125,6 @@ public void testNoErrorsWhenEnteringMaintenance() throws IOException {
@Test
public void testErrorsReportedWhenEnteringMaintenance() throws IOException {
- ScmClient scmClient = mock(ScmClient.class);
when(scmClient.startMaintenanceNodes(anyList(), anyInt()))
.thenAnswer(invocation -> {
ArrayList e = new ArrayList<>();
@@ -104,12 +134,7 @@ public void testErrorsReportedWhenEnteringMaintenance() throws IOException {
CommandLine c = new CommandLine(cmd);
c.parseArgs("host1", "host2");
- try {
- cmd.execute(scmClient);
- fail("Should not succeed without an exception");
- } catch (IOException e) {
- // Expected
- }
+ assertThrows(IOException.class, () -> cmd.execute(scmClient));
Pattern p = Pattern.compile(
"^Entering\\smaintenance\\smode\\son\\sdatanode\\(s\\)",
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java
index 41ce0d90cb7..e274cd4fd54 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java
@@ -23,6 +23,7 @@
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
@@ -34,8 +35,8 @@
import picocli.CommandLine;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.Mockito.anyList;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -47,6 +48,7 @@
public class TestRecommissionSubCommand {
private RecommissionSubCommand cmd;
+ private ScmClient scmClient;
private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
private final ByteArrayOutputStream errContent = new ByteArrayOutputStream();
private final PrintStream originalOut = System.out;
@@ -56,6 +58,7 @@ public class TestRecommissionSubCommand {
@BeforeEach
public void setup() throws UnsupportedEncodingException {
cmd = new RecommissionSubCommand();
+ scmClient = mock(ScmClient.class);
System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING));
System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING));
}
@@ -66,9 +69,37 @@ public void tearDown() {
System.setErr(originalErr);
}
+ @Test
+ public void testMultipleHostnamesCanBeReadFromStdin() throws Exception {
+ when(scmClient.decommissionNodes(anyList()))
+ .thenAnswer(invocation -> new ArrayList());
+
+ String input = "host1\nhost2\nhost3\n";
+ System.setIn(new ByteArrayInputStream(input.getBytes(DEFAULT_ENCODING)));
+ CommandLine c = new CommandLine(cmd);
+ c.parseArgs("-");
+ cmd.execute(scmClient);
+
+ Pattern p = Pattern.compile(
+ "^Started\\srecommissioning\\sdatanode\\(s\\)", Pattern.MULTILINE);
+ Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+
+ p = Pattern.compile("^host1$", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+
+ p = Pattern.compile("^host2$", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+
+ p = Pattern.compile("^host3$", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+ }
+
@Test
public void testNoErrorsWhenRecommissioning() throws IOException {
- ScmClient scmClient = mock(ScmClient.class);
when(scmClient.recommissionNodes(anyList()))
.thenAnswer(invocation -> new ArrayList());
@@ -92,7 +123,6 @@ public void testNoErrorsWhenRecommissioning() throws IOException {
@Test
public void testErrorsReportedWhenRecommissioning() throws IOException {
- ScmClient scmClient = mock(ScmClient.class);
when(scmClient.recommissionNodes(anyList()))
.thenAnswer(invocation -> {
ArrayList e = new ArrayList<>();
@@ -102,12 +132,7 @@ public void testErrorsReportedWhenRecommissioning() throws IOException {
CommandLine c = new CommandLine(cmd);
c.parseArgs("host1", "host2");
- try {
- cmd.execute(scmClient);
- fail("Should not succeed without an exception");
- } catch (IOException e) {
- // Expected
- }
+ assertThrows(IOException.class, () -> cmd.execute(scmClient));
Pattern p = Pattern.compile(
"^Started\\srecommissioning\\sdatanode\\(s\\)", Pattern.MULTILINE);
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java
index db777e4396e..09f6621735e 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java
@@ -37,9 +37,9 @@
import java.util.List;
import static com.fasterxml.jackson.databind.node.JsonNodeType.ARRAY;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.anyBoolean;
import static org.mockito.Mockito.anyInt;
import static org.mockito.Mockito.mock;
@@ -111,20 +111,20 @@ public void testOutputDataFieldsAligning() throws IOException {
// then
String output = outContent.toString(CharEncoding.UTF_8);
- assertTrue(output.contains("UUID :"));
- assertTrue(output.contains("IP Address :"));
- assertTrue(output.contains("Hostname :"));
- assertTrue(output.contains("Capacity :"));
- assertTrue(output.contains("Total Used :"));
- assertTrue(output.contains("Total Used % :"));
- assertTrue(output.contains("Ozone Used :"));
- assertTrue(output.contains("Ozone Used % :"));
- assertTrue(output.contains("Remaining :"));
- assertTrue(output.contains("Remaining % :"));
- assertTrue(output.contains("Container(s) :"));
- assertTrue(output.contains("Container Pre-allocated :"));
- assertTrue(output.contains("Remaining Allocatable :"));
- assertTrue(output.contains("Free Space To Spare :"));
+ assertThat(output).contains("UUID :");
+ assertThat(output).contains("IP Address :");
+ assertThat(output).contains("Hostname :");
+ assertThat(output).contains("Capacity :");
+ assertThat(output).contains("Total Used :");
+ assertThat(output).contains("Total Used % :");
+ assertThat(output).contains("Ozone Used :");
+ assertThat(output).contains("Ozone Used % :");
+ assertThat(output).contains("Remaining :");
+ assertThat(output).contains("Remaining % :");
+ assertThat(output).contains("Container(s) :");
+ assertThat(output).contains("Container Pre-allocated :");
+ assertThat(output).contains("Remaining Allocatable :");
+ assertThat(output).contains("Free Space To Spare :");
}
private List getUsageProto() {
diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml
index a5598311c4c..a5a43643618 100644
--- a/hadoop-ozone/client/pom.xml
+++ b/hadoop-ozone/client/pom.xml
@@ -28,7 +28,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
Apache Ozone Client
jar
- false
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index 441d9143b59..ca885b3b6b0 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -413,6 +413,12 @@ public void setListCacheSize(int listCacheSize) {
this.listCacheSize = listCacheSize;
}
+ @Deprecated
+ public void setEncryptionKey(String bekName) throws IOException {
+ proxy.setEncryptionKey(volumeName, name, bekName);
+ encryptionKeyName = bekName;
+ }
+
/**
* Creates a new key in the bucket, with default replication type RATIS and
* with replication factor THREE.
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 5316f7a99e9..46e7e20b51b 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -23,7 +23,7 @@
import java.util.List;
import java.util.Map;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
@@ -997,6 +997,24 @@ void setBucketQuota(String volumeName, String bucketName,
void setReplicationConfig(String volumeName, String bucketName,
ReplicationConfig replicationConfig) throws IOException;
+ /**
+ * Set Bucket Encryption Key (BEK).
+ *
+ * @param volumeName
+ * @param bucketName
+ * @param bekName
+ * @throws IOException
+ * @deprecated This functionality is deprecated as it is not intended for
+ * users to reset bucket encryption under normal circumstances and may be
+ * removed in the future. Users are advised to exercise caution and consider
+ * alternative approaches for managing bucket encryption unless HDDS-7449 or
+ * HDDS-7526 is encountered. As a result, the setter methods for this
+ * functionality have been marked as deprecated.
+ */
+ @Deprecated
+ void setEncryptionKey(String volumeName, String bucketName,
+ String bekName) throws IOException;
+
/**
* Returns OzoneKey that contains the application generated/visible
* metadata for an Ozone Object.
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 850ae0d1937..7e1e6fe4560 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -25,7 +25,7 @@
import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import javax.crypto.Cipher;
import javax.crypto.CipherInputStream;
import org.apache.commons.lang3.StringUtils;
@@ -145,7 +145,6 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.ratis.protocol.ClientId;
-import org.jetbrains.annotations.NotNull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -364,7 +363,7 @@ static boolean validateOmVersion(OzoneManagerVersion minimumVersion,
return found;
}
- @NotNull
+ @Nonnull
@VisibleForTesting
protected XceiverClientFactory createXceiverClientFactory(
ServiceInfoEx serviceInfo) throws IOException {
@@ -1213,6 +1212,22 @@ public void setBucketQuota(String volumeName, String bucketName,
}
+ @Deprecated
+ @Override
+ public void setEncryptionKey(String volumeName, String bucketName,
+ String bekName) throws IOException {
+ verifyVolumeName(volumeName);
+ verifyBucketName(bucketName);
+ OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
+ BucketEncryptionKeyInfo bek = new BucketEncryptionKeyInfo.Builder()
+ .setKeyName(bekName).build();
+ builder.setVolumeName(volumeName)
+ .setBucketName(bucketName)
+ .setBucketEncryptionKey(bek);
+ OmBucketArgs finalArgs = builder.build();
+ ozoneManagerClient.setBucketProperty(finalArgs);
+ }
+
@Override
public void setReplicationConfig(
String volumeName, String bucketName, ReplicationConfig replicationConfig)
@@ -1640,7 +1655,7 @@ public OzoneKeyDetails getKeyDetails(
return getOzoneKeyDetails(keyInfo);
}
- @NotNull
+ @Nonnull
private OzoneKeyDetails getOzoneKeyDetails(OmKeyInfo keyInfo) {
List ozoneKeyLocations = new ArrayList<>();
long lastKeyOffset = 0L;
@@ -1684,7 +1699,7 @@ public OzoneKeyDetails getS3KeyDetails(String bucketName, String keyName,
return getOzoneKeyDetails(keyInfo);
}
- @NotNull
+ @Nonnull
private OmKeyInfo getS3KeyInfo(
String bucketName, String keyName, boolean isHeadOp) throws IOException {
verifyBucketName(bucketName);
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockOmTransport.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockOmTransport.java
index e4a8a80a631..31f5e20bc88 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockOmTransport.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockOmTransport.java
@@ -56,6 +56,8 @@
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.HashMap;
@@ -67,6 +69,8 @@
* OM transport for testing with in-memory state.
*/
public class MockOmTransport implements OmTransport {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(MockOmTransport.class);
private final MockBlockAllocator blockAllocator;
//volumename -> volumeinfo
@@ -185,11 +189,44 @@ private GetKeyInfoResponse getKeyInfo(GetKeyInfoRequest request) {
.build();
}
+ private boolean isHSync(CommitKeyRequest commitKeyRequest) {
+ return commitKeyRequest.hasHsync() && commitKeyRequest.getHsync();
+ }
+
+ private boolean isRecovery(CommitKeyRequest commitKeyRequest) {
+ return commitKeyRequest.hasRecovery() && commitKeyRequest.getRecovery();
+ }
+
+ private String toOperationString(CommitKeyRequest commitKeyRequest) {
+ boolean hsync = isHSync(commitKeyRequest);
+ boolean recovery = isRecovery(commitKeyRequest);
+ if (hsync) {
+ return "hsync";
+ }
+ if (recovery) {
+ return "recover";
+ }
+ return "commit";
+ }
+
+
private CommitKeyResponse commitKey(CommitKeyRequest commitKeyRequest) {
final KeyArgs keyArgs = commitKeyRequest.getKeyArgs();
final KeyInfo openKey =
openKeys.get(keyArgs.getVolumeName()).get(keyArgs.getBucketName())
- .remove(keyArgs.getKeyName());
+ .get(keyArgs.getKeyName());
+ LOG.debug("{} open key vol: {} bucket: {} key: {}",
+ toOperationString(commitKeyRequest),
+ keyArgs.getVolumeName(),
+ keyArgs.getBucketName(),
+ keyArgs.getKeyName());
+ boolean hsync = isHSync(commitKeyRequest);
+ if (!hsync) {
+ KeyInfo deleteKey = openKeys.get(keyArgs.getVolumeName())
+ .get(keyArgs.getBucketName())
+ .remove(keyArgs.getKeyName());
+ assert deleteKey != null;
+ }
final KeyInfo.Builder committedKeyInfoWithLocations =
KeyInfo.newBuilder().setVolumeName(keyArgs.getVolumeName())
.setBucketName(keyArgs.getBucketName())
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java
new file mode 100644
index 00000000000..1014b943a2a
--- /dev/null
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestBlockOutputStreamIncrementalPutBlock.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.client;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.UUID;
+
+import jakarta.annotation.Nonnull;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.InMemoryConfiguration;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+import org.apache.hadoop.hdds.scm.XceiverClientFactory;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.client.rpc.RpcClient;
+import org.apache.hadoop.ozone.om.protocolPB.OmTransport;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+
+
+/**
+ * Verify BlockOutputStream with incremental PutBlock feature.
+ * (ozone.client.incremental.chunk.list = true)
+ */
+public class TestBlockOutputStreamIncrementalPutBlock {
+ private OzoneClient client;
+ private final String keyName = UUID.randomUUID().toString();
+ private final String volumeName = UUID.randomUUID().toString();
+ private final String bucketName = UUID.randomUUID().toString();
+ private OzoneBucket bucket;
+ private final ConfigurationSource config = new InMemoryConfiguration();
+
+ public static Iterable parameters() {
+ return Arrays.asList(true, false);
+ }
+
+ private void init(boolean incrementalChunkList) throws IOException {
+ OzoneClientConfig clientConfig = config.getObject(OzoneClientConfig.class);
+
+ clientConfig.setIncrementalChunkList(incrementalChunkList);
+ clientConfig.setChecksumType(ContainerProtos.ChecksumType.CRC32C);
+
+ ((InMemoryConfiguration)config).setFromObject(clientConfig);
+
+ ((InMemoryConfiguration) config).setBoolean(
+ OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true);
+ ((InMemoryConfiguration) config).setBoolean(
+ OZONE_CHUNK_LIST_INCREMENTAL, incrementalChunkList);
+
+ RpcClient rpcClient = new RpcClient(config, null) {
+
+ @Override
+ protected OmTransport createOmTransport(
+ String omServiceId)
+ throws IOException {
+ return new MockOmTransport();
+ }
+
+ @Nonnull
+ @Override
+ protected XceiverClientFactory createXceiverClientFactory(
+ ServiceInfoEx serviceInfo) throws IOException {
+ return new MockXceiverClientFactory();
+ }
+ };
+
+ client = new OzoneClient(config, rpcClient);
+ ObjectStore store = client.getObjectStore();
+
+ store.createVolume(volumeName);
+ OzoneVolume volume = store.getVolume(volumeName);
+ volume.createBucket(bucketName);
+ bucket = volume.getBucket(bucketName);
+ }
+
+ @AfterEach
+ public void close() throws IOException {
+ client.close();
+ }
+
+ @ParameterizedTest
+ @MethodSource("parameters")
+ public void writeSmallChunk(boolean incrementalChunkList)
+ throws IOException {
+ init(incrementalChunkList);
+
+ int size = 1024;
+ String s = RandomStringUtils.randomAlphabetic(1024);
+ ByteBuffer byteBuffer = ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8));
+
+ try (OzoneOutputStream out = bucket.createKey(keyName, size,
+ ReplicationConfig.getDefault(config), new HashMap<>())) {
+ for (int i = 0; i < 4097; i++) {
+ out.write(byteBuffer);
+ out.hsync();
+ }
+ }
+
+ try (OzoneInputStream is = bucket.readKey(keyName)) {
+ ByteBuffer readBuffer = ByteBuffer.allocate(size);
+ for (int i = 0; i < 4097; i++) {
+ is.read(readBuffer);
+ assertArrayEquals(readBuffer.array(), byteBuffer.array());
+ }
+ }
+ }
+
+ @ParameterizedTest
+ @MethodSource("parameters")
+ public void writeLargeChunk(boolean incrementalChunkList)
+ throws IOException {
+ init(incrementalChunkList);
+
+ int size = 1024 * 1024 + 1;
+ ByteBuffer byteBuffer = ByteBuffer.allocate(size);
+
+ try (OzoneOutputStream out = bucket.createKey(keyName, size,
+ ReplicationConfig.getDefault(config), new HashMap<>())) {
+ for (int i = 0; i < 4; i++) {
+ out.write(byteBuffer);
+ out.hsync();
+ }
+ }
+
+ try (OzoneInputStream is = bucket.readKey(keyName)) {
+ ByteBuffer readBuffer = ByteBuffer.allocate(size);
+ for (int i = 0; i < 4; i++) {
+ is.read(readBuffer);
+ assertArrayEquals(readBuffer.array(), byteBuffer.array());
+ }
+ }
+ }
+}
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java
index 42b9d807671..09a6c0a5c0e 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java
@@ -35,7 +35,7 @@
import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx;
import org.apache.hadoop.ozone.om.protocolPB.OmTransport;
import org.apache.ozone.test.LambdaTestUtils.VoidCallable;
-import org.jetbrains.annotations.NotNull;
+import jakarta.annotation.Nonnull;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -47,7 +47,7 @@
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.apache.ozone.test.GenericTestUtils.getTestStartTime;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -67,12 +67,8 @@ public static void expectOmException(
OMException.ResultCodes code,
VoidCallable eval)
throws Exception {
- try {
- eval.call();
- fail("OMException is expected");
- } catch (OMException ex) {
- assertEquals(code, ex.getResult());
- }
+ OMException ex = assertThrows(OMException.class, () -> eval.call());
+ assertEquals(code, ex.getResult());
}
@BeforeEach
@@ -90,7 +86,7 @@ protected OmTransport createOmTransport(String omServiceId) {
return new MockOmTransport(blkAllocator);
}
- @NotNull
+ @Nonnull
@Override
protected XceiverClientFactory createXceiverClientFactory(
ServiceInfoEx serviceInfo) {
@@ -138,7 +134,7 @@ public void testCreateVolumeWithMetadata()
@Test
public void testCreateBucket()
throws IOException {
- Instant testStartTime = Instant.now();
+ Instant testStartTime = getTestStartTime();
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
store.createVolume(volumeName);
@@ -152,7 +148,7 @@ public void testCreateBucket()
@Test
public void testPutKeyRatisOneNode() throws IOException {
- Instant testStartTime = Instant.now();
+ Instant testStartTime = getTestStartTime();
String value = "sample value";
OzoneBucket bucket = getOzoneBucket();
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java
index a2287ecc524..25a3ad2d9c8 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java
@@ -66,7 +66,6 @@
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
@@ -706,20 +705,15 @@ public void testStripeWriteRetriesOn4FailuresWith3RetriesAllowed()
nodesIndexesToMarkFailure[2] = 10;
//To mark node failed in fourth block group.
nodesIndexesToMarkFailure[3] = 15;
- try {
- // Mocked MultiNodePipelineBlockAllocator#allocateBlock implementation can
- // pick good block group, but client retries should be limited
- // OZONE_CLIENT_MAX_EC_STRIPE_WRITE_RETRIES_ON_FAILURE(here it was
- // configured as 3). So, it should fail as we have marked 3 nodes as bad.
- testStripeWriteRetriesOnFailures(con, 20, nodesIndexesToMarkFailure);
- fail(
- "Expecting it to fail as retries should exceed the max allowed times:"
- + " " + 3);
- } catch (IOException e) {
- assertEquals(
- "Completed max allowed retries 3 on stripe failures.",
- e.getMessage());
- }
+ // Mocked MultiNodePipelineBlockAllocator#allocateBlock implementation can
+ // pick good block group, but client retries should be limited
+ // OZONE_CLIENT_MAX_EC_STRIPE_WRITE_RETRIES_ON_FAILURE(here it was
+ // configured as 3). So, it should fail as we have marked 3 nodes as bad.
+ IOException e = assertThrows(IOException.class,
+ () -> testStripeWriteRetriesOnFailures(con, 20, nodesIndexesToMarkFailure));
+ assertEquals(
+ "Completed max allowed retries 3 on stripe failures.",
+ e.getMessage());
}
public void testStripeWriteRetriesOnFailures(OzoneConfiguration con,
@@ -1035,7 +1029,7 @@ public void testPartialStripeWithPartialChunkRetry()
}
@Test
- public void testDiscardPreAllocatedBlocksPreventRetryExceeds()
+ void testDiscardPreAllocatedBlocksPreventRetryExceeds()
throws Exception {
close();
OzoneConfiguration con = createConfiguration();
@@ -1105,16 +1099,10 @@ public void testDiscardPreAllocatedBlocksPreventRetryExceeds()
factoryStub.setFailedStorages(failedDNs);
// Writes that will retry due to failed DNs
- try {
- for (int j = 0; j < numStripesAfterFailure; j++) {
- for (int i = 0; i < dataBlocks; i++) {
- out.write(inputChunks[i]);
- }
+ for (int j = 0; j < numStripesAfterFailure; j++) {
+ for (int i = 0; i < dataBlocks; i++) {
+ out.write(inputChunks[i]);
}
- } catch (IOException e) {
- // If we don't discard pre-allocated blocks,
- // retries should exceed the maxRetries and write will fail.
- fail("Max retries exceeded");
}
}
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java
index 5cf4401bae2..6162f1ae5a4 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.ozone.client.checksum;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.util.DataChecksum;
@@ -27,7 +28,6 @@
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
-import java.util.Random;
import static org.apache.hadoop.hdds.scm.OzoneClientConfig.ChecksumCombineMode.COMPOSITE_CRC;
import static org.apache.hadoop.hdds.scm.OzoneClientConfig.ChecksumCombineMode.MD5MD5CRC;
@@ -40,9 +40,8 @@ public class TestReplicatedBlockChecksumComputer {
@Test
public void testComputeMd5Crc() throws IOException {
final int lenOfBytes = 32;
- byte[] randomChunkChecksum = new byte[lenOfBytes];
- Random r = new Random();
- r.nextBytes(randomChunkChecksum);
+ byte[] randomChunkChecksum = RandomUtils.nextBytes(lenOfBytes);
+
MD5Hash emptyBlockMD5 = MD5Hash.digest(randomChunkChecksum);
byte[] emptyBlockMD5Hash = emptyBlockMD5.getDigest();
AbstractBlockChecksumComputer computer =
@@ -56,9 +55,7 @@ public void testComputeMd5Crc() throws IOException {
@Test
public void testComputeCompositeCrc() throws IOException {
final int lenOfBytes = 32;
- byte[] randomChunkChecksum = new byte[lenOfBytes];
- Random r = new Random();
- r.nextBytes(randomChunkChecksum);
+ byte[] randomChunkChecksum = RandomUtils.nextBytes(lenOfBytes);
CrcComposer crcComposer =
CrcComposer.newCrcComposer(DataChecksum.Type.CRC32C, 4);
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java
index fa80f72b7f3..702a450ee75 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java
@@ -52,7 +52,7 @@
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Time;
import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.jetbrains.annotations.NotNull;
+import jakarta.annotation.Nonnull;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -72,9 +72,9 @@
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.mockito.Mockito.any;
import static org.mockito.Mockito.when;
import static org.mockito.Mockito.mock;
-import static org.mockito.ArgumentMatchers.any;
/**
* Unit tests for ReplicatedFileChecksumHelper class.
@@ -101,7 +101,7 @@ protected OmTransport createOmTransport(
return new MockOmTransport();
}
- @NotNull
+ @Nonnull
@Override
protected XceiverClientFactory createXceiverClientFactory(
ServiceInfoEx serviceInfo) {
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java
index abf3e9c1323..6af5c4b4e0d 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java
@@ -38,8 +38,8 @@
import java.util.Map;
import static org.apache.hadoop.ozone.OzoneConsts.MB;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyBoolean;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyBoolean;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.junit.jupiter.api.Assertions.assertEquals;
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java
index ea70f19fdfe..8d9efd96325 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java
@@ -25,7 +25,7 @@
import java.io.IOException;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
/**
* Test class for {@link OzoneKMSUtil}.
@@ -41,12 +41,8 @@ public void setUp() {
@Test
public void getKeyProvider() {
- try {
- OzoneKMSUtil.getKeyProvider(config, null);
- fail("Expected IOException.");
- } catch (IOException ioe) {
- assertEquals(ioe.getMessage(), "KMS serverProviderUri is " +
- "not configured.");
- }
+ IOException ioe =
+ assertThrows(IOException.class, () -> OzoneKMSUtil.getKeyProvider(config, null));
+ assertEquals(ioe.getMessage(), "KMS serverProviderUri is " + "not configured.");
}
}
diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml
index 813edcb7d71..4af3fb18523 100644
--- a/hadoop-ozone/common/pom.xml
+++ b/hadoop-ozone/common/pom.xml
@@ -29,7 +29,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
jar
- false
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java
index 6f2ad0bfa88..8ffa3c45c09 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java
@@ -20,7 +20,7 @@
import org.apache.hadoop.fs.Syncable;
import org.apache.ratis.util.function.CheckedFunction;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Objects;
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3InMemoryCache.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3InMemoryCache.java
index 4f1f66faccb..122b04b715d 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3InMemoryCache.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3InMemoryCache.java
@@ -44,13 +44,7 @@ public void put(String id, S3SecretValue secretValue) {
@Override
public void invalidate(String id) {
- S3SecretValue secret = cache.getIfPresent(id);
- if (secret == null) {
- return;
- }
- secret.setDeleted(true);
- secret.setAwsSecret(null);
- cache.put(id, secret);
+ cache.asMap().computeIfPresent(id, (k, secret) -> secret.deleted());
}
/**
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
index f8c752aab27..e382377dff4 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketArgs;
import com.google.common.base.Preconditions;
+import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
/**
* A class that encapsulates Bucket Arguments.
@@ -50,6 +51,10 @@ public final class OmBucketArgs extends WithMetadata implements Auditable {
*/
private StorageType storageType;
+ /**
+ * Bucket encryption key info if encryption is enabled.
+ */
+ private BucketEncryptionKeyInfo bekInfo;
private long quotaInBytes = OzoneConsts.QUOTA_RESET;
private long quotaInNamespace = OzoneConsts.QUOTA_RESET;
private boolean quotaInBytesSet = false;
@@ -150,6 +155,10 @@ public DefaultReplicationConfig getDefaultReplicationConfig() {
return defaultReplicationConfig;
}
+ public BucketEncryptionKeyInfo getBucketEncryptionKeyInfo() {
+ return bekInfo;
+ }
+
/**
* Sets the Bucket default replication config.
*/
@@ -168,6 +177,12 @@ private void setQuotaInNamespace(long quotaInNamespace) {
this.quotaInNamespace = quotaInNamespace;
}
+ @Deprecated
+ private void setBucketEncryptionKey(
+ BucketEncryptionKeyInfo bucketEncryptionKey) {
+ this.bekInfo = bucketEncryptionKey;
+ }
+
/**
* Returns Bucket Owner Name.
*
@@ -216,6 +231,7 @@ public static class Builder {
private long quotaInBytes;
private boolean quotaInNamespaceSet = false;
private long quotaInNamespace;
+ private BucketEncryptionKeyInfo bekInfo;
private DefaultReplicationConfig defaultReplicationConfig;
private String ownerName;
/**
@@ -241,6 +257,12 @@ public Builder setIsVersionEnabled(Boolean versionFlag) {
return this;
}
+ @Deprecated
+ public Builder setBucketEncryptionKey(BucketEncryptionKeyInfo info) {
+ this.bekInfo = info;
+ return this;
+ }
+
public Builder addMetadata(Map metadataMap) {
this.metadata = metadataMap;
return this;
@@ -291,6 +313,9 @@ public OmBucketArgs build() {
if (quotaInNamespaceSet) {
omBucketArgs.setQuotaInNamespace(quotaInNamespace);
}
+ if (bekInfo != null && bekInfo.getKeyName() != null) {
+ omBucketArgs.setBucketEncryptionKey(bekInfo);
+ }
return omBucketArgs;
}
}
@@ -322,6 +347,11 @@ public BucketArgs getProtobuf() {
if (ownerName != null) {
builder.setOwnerName(ownerName);
}
+
+ if (bekInfo != null && bekInfo.getKeyName() != null) {
+ builder.setBekInfo(OMPBHelper.convert(bekInfo));
+ }
+
return builder.build();
}
@@ -355,6 +385,11 @@ public static OmBucketArgs getFromProtobuf(BucketArgs bucketArgs) {
if (bucketArgs.hasQuotaInNamespace()) {
omBucketArgs.setQuotaInNamespace(bucketArgs.getQuotaInNamespace());
}
+
+ if (bucketArgs.hasBekInfo()) {
+ omBucketArgs.setBucketEncryptionKey(
+ OMPBHelper.convert(bucketArgs.getBekInfo()));
+ }
return omBucketArgs;
}
}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
index 8e43d057729..453dc3b957c 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
@@ -23,7 +23,7 @@
import org.apache.hadoop.ozone.audit.Auditable;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
import org.apache.hadoop.ozone.security.GDPRSymmetricKey;
-import org.jetbrains.annotations.NotNull;
+import jakarta.annotation.Nonnull;
import java.util.ArrayList;
import java.util.HashMap;
@@ -196,7 +196,7 @@ public OmKeyArgs.Builder toBuilder() {
.setForceUpdateContainerCacheFromSCM(forceUpdateContainerCacheFromSCM);
}
- @NotNull
+ @Nonnull
public KeyArgs toProtobuf() {
return KeyArgs.newBuilder()
.setVolumeName(getVolumeName())
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
index 624e479ce3d..74effbd80a3 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
@@ -21,7 +21,7 @@
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import java.nio.file.Paths;
import java.util.UUID;
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java
index e97adc0a50f..cb1ed0976a0 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java
@@ -27,7 +27,7 @@
/**
* S3Secret to be saved in database.
*/
-public class S3SecretValue {
+public final class S3SecretValue {
private static final Codec CODEC = new DelegatedCodec<>(
Proto2Codec.get(S3Secret.getDefaultInstance()),
S3SecretValue::fromProtobuf,
@@ -38,16 +38,29 @@ public static Codec getCodec() {
}
// TODO: This field should be renamed to accessId for generalization.
- private String kerberosID;
- private String awsSecret;
- private boolean isDeleted;
- private long transactionLogIndex;
+ private final String kerberosID;
+ private final String awsSecret;
+ private final boolean isDeleted;
+ private final long transactionLogIndex;
- public S3SecretValue(String kerberosID, String awsSecret) {
- this(kerberosID, awsSecret, false, 0L);
+ public static S3SecretValue of(String kerberosID, String awsSecret) {
+ return of(kerberosID, awsSecret, 0);
}
- public S3SecretValue(String kerberosID, String awsSecret, boolean isDeleted,
+ public static S3SecretValue of(String kerberosID, String awsSecret, long transactionLogIndex) {
+ return new S3SecretValue(
+ Objects.requireNonNull(kerberosID),
+ Objects.requireNonNull(awsSecret),
+ false,
+ transactionLogIndex
+ );
+ }
+
+ public S3SecretValue deleted() {
+ return new S3SecretValue(kerberosID, "", true, transactionLogIndex);
+ }
+
+ private S3SecretValue(String kerberosID, String awsSecret, boolean isDeleted,
long transactionLogIndex) {
this.kerberosID = kerberosID;
this.awsSecret = awsSecret;
@@ -59,26 +72,14 @@ public String getKerberosID() {
return kerberosID;
}
- public void setKerberosID(String kerberosID) {
- this.kerberosID = kerberosID;
- }
-
public String getAwsSecret() {
return awsSecret;
}
- public void setAwsSecret(String awsSecret) {
- this.awsSecret = awsSecret;
- }
-
public boolean isDeleted() {
return isDeleted;
}
- public void setDeleted(boolean status) {
- this.isDeleted = status;
- }
-
public String getAwsAccessKey() {
return kerberosID;
}
@@ -87,12 +88,8 @@ public long getTransactionLogIndex() {
return transactionLogIndex;
}
- public void setTransactionLogIndex(long transactionLogIndex) {
- this.transactionLogIndex = transactionLogIndex;
- }
-
public static S3SecretValue fromProtobuf(S3Secret s3Secret) {
- return new S3SecretValue(s3Secret.getKerberosID(), s3Secret.getAwsSecret());
+ return S3SecretValue.of(s3Secret.getKerberosID(), s3Secret.getAwsSecret());
}
public S3Secret getProtobuf() {
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccessPolicy.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccessPolicy.java
deleted file mode 100644
index ee64d5ae092..00000000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/AccessPolicy.java
+++ /dev/null
@@ -1,152 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om.multitenant;
-
-import java.io.IOException;
-import java.security.Principal;
-import java.util.HashSet;
-import java.util.List;
-
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.hdds.annotation.InterfaceStability;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import com.google.gson.JsonObject;
-
-/**
- * AccessPolicy interface for Ozone Multi-Tenancy.
- */
-@InterfaceAudience.LimitedPrivate({"HDFS", "Yarn", "Ranger", "Hive", "HBase"})
-@InterfaceStability.Evolving
-public interface AccessPolicy {
-
- /**
- * Ozone could support different authorization engines e.g.
- * native-authorization, Ranger Authorization,
- * any-other-external-authorization. This interface is an in memory
- * version of a generic access policy. Any Ozone policy can be uniquely
- * identified by its policy-ID. Ozone can choose to persist this policy-ID
- * in its internal database. A remote/native authorizer can retrieve/update
- * an access policy associated with its Policy-ID ID.
- *
- */
- enum AccessPolicyType { NATIVE_ACL, RANGER_POLICY, AWS_POLICY, OTHER };
-
- /**
- * Allow or deny.
- */
- enum AccessGrantType { ALLOW, DENY };
-
- /**
- * Defines an access policy entry.
- */
- class AccessPolicyElem {
- private OzoneObj object;
- private Principal principal;
- private ACLType aclType;
- private AccessGrantType grantType;
-
- public AccessPolicyElem(OzoneObj obj, Principal id,
- ACLType acl, AccessGrantType grant) {
- object = obj;
- principal = id;
- aclType = acl;
- grantType = grant;
- }
-
- public OzoneObj getObject() {
- return object;
- }
-
- public Principal getPrincipal() {
- return principal;
- }
-
- public ACLType getAclType() {
- return aclType;
- }
-
- public AccessGrantType getAccessGrantType() {
- return grantType;
- }
- }
-
- /**
- * @param id This would be policy-ID that an external/native authorizer
- * could return.
- */
- void setPolicyName(String id);
-
- String getPolicyID();
-
- /**
- * @return unique policy-name for this policy.
- */
- String getPolicyName();
-
- /**
- *
- * @return Policy in a Json string format. Individual implementation can
- * choose different AccessPolicyType e.g. Ranger-Compatible-Json-Policy,
- * AWS-Compatible-Json-policy etc. It could be an Opaque data to the caller
- * and they can directly send it to an authorizer (e.g. Ranger).
- * All Authorizer policy engines are supposed to provide an implementation
- * of AccessPolicy interface.
- */
- String serializePolicyToJsonString() throws IOException;
-
- /**
- * Given a serialized accessPolicy in a Json format, deserializes and
- * constructs a valid access Policy.
- * @return
- * @throws IOException
- */
- String deserializePolicyFromJsonString(JsonObject jsonObject)
- throws IOException;
-
- /**
- * @return AccessPolicyType (Native or otherwise).
- */
- AccessPolicyType getAccessPolicyType();
-
- void addAccessPolicyElem(OzoneObj object,
- Principal principal, ACLType acl,
- AccessGrantType grant) throws IOException;
-
- void removeAccessPolicyElem(OzoneObj object,
- Principal principal,
- ACLType acl, AccessGrantType grant)
- throws IOException;
-
- List getAccessPolicyElem();
-
- /**
- * Sets the last update time to mtime.
- * @param mtime Time in epoch milliseconds
- */
- void setPolicyLastUpdateTime(long mtime);
-
- /**
- * Returns the last update time of Ranger policies.
- */
- long getPolicyLastUpdateTime();
-
- /**
- * @return list of roles associated with this policy
- */
- HashSet getRoleList();
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/RangerAccessPolicy.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/RangerAccessPolicy.java
deleted file mode 100644
index cebb540ba6d..00000000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/multitenant/RangerAccessPolicy.java
+++ /dev/null
@@ -1,316 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.om.multitenant;
-
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-
-import com.google.gson.JsonArray;
-import com.google.gson.JsonObject;
-
-import java.io.IOException;
-import java.security.Principal;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_TENANT_RANGER_POLICY_LABEL;
-import static org.apache.hadoop.ozone.om.multitenant.AccessPolicy.AccessPolicyType.RANGER_POLICY;
-
-/**
- * This is used for Ozone tenant access policy control.
- */
-public class RangerAccessPolicy implements AccessPolicy {
-
- // For now RangerAccessPolicy supports only one object per policy
- private OzoneObj accessObject;
- private final Map> policyMap;
- private final HashSet roleList;
- private String policyID;
- private String policyJsonString;
- private String policyName;
- private long lastPolicyUpdateTimeEpochMillis;
-
- public RangerAccessPolicy(String name) {
- policyMap = new ConcurrentHashMap<>();
- policyName = name;
- roleList = new HashSet<>();
- }
-
- public void setPolicyName(String id) {
- policyID = id;
- }
-
- public String getPolicyID() {
- return policyID;
- }
-
- public String getPolicyName() {
- return policyName;
- }
-
- public HashSet getRoleList() {
- return roleList;
- }
-
- @Override
- public void setPolicyLastUpdateTime(long mtime) {
- lastPolicyUpdateTimeEpochMillis = mtime;
- }
-
- @Override
- public long getPolicyLastUpdateTime() {
- return lastPolicyUpdateTimeEpochMillis;
- }
-
- @Override
- public String serializePolicyToJsonString() throws IOException {
- updatePolicyJsonString();
- return policyJsonString;
- }
-
- @Override
- public String deserializePolicyFromJsonString(JsonObject jsonObject) {
- setPolicyName(jsonObject.get("id").getAsString());
- try {
- JsonArray policyItems = jsonObject
- .getAsJsonArray("policyItems");
- for (int j = 0; j < policyItems.size(); ++j) {
- JsonObject policy = policyItems.get(j).getAsJsonObject();
- JsonArray roles = policy.getAsJsonArray("roles");
- for (int k = 0; k < roles.size(); ++k) {
- if (!roleList.contains(roles.get(k).getAsString())) {
- // We only get the role name here. We need to query and populate it.
- roleList.add(roles.get(k).getAsString());
- }
- }
- }
- } catch (Exception e) {
- // Ignore Exception here.
- }
- // TODO : retrieve other policy fields as well.
- try {
- setPolicyLastUpdateTime(jsonObject.get("updateTime").getAsLong());
- } catch (Exception e) {
- // lets ignore the exception in case the field is not set.
- }
- return null;
- }
-
- @Override
- public AccessPolicyType getAccessPolicyType() {
- return RANGER_POLICY;
- }
-
- @Override
- public void addAccessPolicyElem(OzoneObj object,
- Principal principal,
- ACLType acl, AccessGrantType grant)
- throws IOException {
- if (accessObject == null) {
- accessObject = object;
- } else if (!object.toString().equals(accessObject.toString())) {
- throw new IOException(
- "RangerAccessPolicy supports only one object per" + " policy");
- }
- AccessPolicyElem elem = new AccessPolicyElem(object, principal, acl, grant);
- if (!policyMap.containsKey(principal.getName())) {
- List elemList = new ArrayList<>();
- elemList.add(elem);
- policyMap.put(principal.getName(), elemList);
- return;
- }
- List elemList = policyMap.get(principal.getName());
- for (AccessPolicyElem e : elemList) {
- if (e.getAclType() == acl) {
- throw new IOException(
- "RangerAccessPolicy: Principal " + principal.getName()
- + " already exists with access " + acl);
- }
- }
- elemList.add(elem);
- }
-
- @Override
- public List getAccessPolicyElem() {
- List list = new ArrayList<>();
- for (Map.Entry> entry : policyMap
- .entrySet()) {
- list.addAll(entry.getValue());
- }
- return list;
- }
-
- @Override
- public void removeAccessPolicyElem(OzoneObj object,
- Principal principal, ACLType acl,
- AccessGrantType grant)
- throws IOException {
- if (accessObject == null) {
- throw new IOException("removeAccessPolicyElem: Invalid Arguments.");
- } else if (!object.toString().equals(accessObject.toString())) {
- throw new IOException(
- "removeAccessPolicyElem: Object not found." + object.toString());
- }
- if (!policyMap.containsKey(principal.getName())) {
- throw new IOException(
- "removeAccessPolicyElem: Principal not found." + object.toString());
- }
- List elemList = policyMap.get(principal.getName());
- for (AccessPolicyElem e : elemList) {
- if (e.getAclType() == acl) {
- elemList.remove(e);
- }
- }
- if (elemList.isEmpty()) {
- policyMap.remove(principal.toString());
- }
- throw new IOException(
- "removeAccessPolicyElem: aclType not found." + object.toString());
- }
-
- private String createRangerResourceItems() {
- StringBuilder resourceItems = new StringBuilder();
- resourceItems.append("\"resources\":{" +
- "\"volume\":{" +
- "\"values\":[\"");
- resourceItems.append(accessObject.getVolumeName());
- resourceItems.append("\"]," +
- "\"isRecursive\":false," +
- "\"isExcludes\":false" +
- "}");
- if ((accessObject.getResourceType() == OzoneObj.ResourceType.BUCKET) ||
- (accessObject.getResourceType() == OzoneObj.ResourceType.KEY)) {
- resourceItems.append(
- ",\"bucket\":{" +
- "\"values\":[\"");
- resourceItems.append(accessObject.getBucketName());
- resourceItems.append("\"]," +
- "\"isRecursive\":false," +
- "\"isExcludes\":false" +
- "}");
- }
- if (accessObject.getResourceType() == OzoneObj.ResourceType.KEY) {
- resourceItems.append(",\"key\":{" +
- "\"values\":[\"");
- resourceItems.append(accessObject.getKeyName());
- resourceItems.append("\"]," +
- "\"isRecursive\":true," +
- "\"isExcludes\":false" +
- "}");
- }
- resourceItems.append("},");
- return resourceItems.toString();
- }
-
- private String createRangerPolicyItems() throws IOException {
- StringBuilder policyItems = new StringBuilder();
- policyItems.append("\"policyItems\":[");
- int mapRemainingSize = policyMap.size();
- for (Map.Entry> mapElem : policyMap
- .entrySet()) {
- mapRemainingSize--;
- List list = mapElem.getValue();
- if (list.isEmpty()) {
- continue;
- }
- policyItems.append("{");
- if (list.get(0).getPrincipal() instanceof OzoneTenantRolePrincipal) {
- policyItems.append("\"roles\":[\"" + mapElem.getKey() + "\"],");
- } else {
- policyItems.append("\"users\":[\"" + mapElem.getKey() + "\"],");
- }
- policyItems.append("\"accesses\":[");
- Iterator iter = list.iterator();
- while (iter.hasNext()) {
- AccessPolicyElem elem = iter.next();
- policyItems.append("{");
- policyItems.append("\"type\":\"");
- policyItems.append(getRangerAclString(elem.getAclType()));
- policyItems.append("\",");
- if (elem.getAccessGrantType() == AccessGrantType.ALLOW) {
- policyItems.append("\"isAllowed\":true");
- } else {
- policyItems.append("\"isDenied\":true");
- }
- policyItems.append("}");
- if (iter.hasNext()) {
- policyItems.append(",");
- }
- }
- policyItems.append("]");
- policyItems.append("}");
- if (mapRemainingSize > 0) {
- policyItems.append(",");
- }
- }
- policyItems.append("],");
- return policyItems.toString();
- }
-
- private String getRangerAclString(ACLType aclType) throws IOException {
- switch (aclType) {
- case ALL:
- return "All";
- case LIST:
- return "List";
- case READ:
- return "Read";
- case WRITE:
- return "Write";
- case CREATE:
- return "Create";
- case DELETE:
- return "Delete";
- case READ_ACL:
- return "Read_ACL";
- case WRITE_ACL:
- return "Write_ACL";
- case NONE:
- return "";
- default:
- throw new IOException("Unknown ACLType");
- }
- }
-
- private void updatePolicyJsonString() throws IOException {
- policyJsonString =
- "{\"policyType\":\"0\"," + "\"name\":\"" + policyName + "\","
- + "\"isEnabled\":true," + "\"policyPriority\":0,"
- + "\"description\":\"Policy created by Ozone for Multi-Tenancy\","
- + "\"policyLabels\":[\"" + OZONE_TENANT_RANGER_POLICY_LABEL + "\"],"
- + "\"description\":\"\","
- + "\"isAuditEnabled\":true," + createRangerResourceItems()
- + "\"isDenyAllElse\":false," + createRangerPolicyItems()
- + "\"allowExceptions\":[]," + "\"denyPolicyItems\":[],"
- + "\"denyExceptions\":[]," + "\"service\":\"cm_ozone\"" + "}";
- }
-
- @Override
- public String toString() {
- return "RangerAccessPolicy{" + "accessObject=" + accessObject
- + ", policyMap=" + policyMap + ", roleList=" + roleList + ", policyID='"
- + policyID + '\'' + ", policyJsonString='" + policyJsonString + '\''
- + ", policyName='" + policyName + '\''
- + ", lastPolicyUpdateTimeEpochMillis=" + lastPolicyUpdateTimeEpochMillis
- + '}';
- }
-}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
index e769e3035ef..f41f89b181d 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
@@ -23,8 +23,7 @@
import java.util.List;
import java.util.UUID;
-import javax.annotation.Nonnull;
-
+import jakarta.annotation.Nonnull;
import org.apache.hadoop.fs.SafeModeAction;
import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
import org.apache.hadoop.ozone.OzoneAcl;
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index 7b8d7ef9b2b..bd40dfcf024 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -25,7 +25,7 @@
import java.util.UUID;
import java.util.stream.Collectors;
-import javax.annotation.Nonnull;
+import jakarta.annotation.Nonnull;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.SafeModeAction;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java
index a815b72deca..08ae1fbc65b 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java
@@ -36,7 +36,6 @@
import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE;
import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE_ACL;
import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertFalse;
@@ -122,14 +121,7 @@ void testAclParse() {
if (entry.getValue()) {
OzoneAcl.parseAcl(entry.getKey());
} else {
- try {
- OzoneAcl.parseAcl(entry.getKey());
- // should never get here since parseAcl will throw
- fail("An exception was expected but did not happen. Key: " +
- entry.getKey());
- } catch (IllegalArgumentException e) {
- // nothing to do
- }
+ assertThrows(IllegalArgumentException.class, () -> OzoneAcl.parseAcl(entry.getKey()));
}
}
}
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java
index 75adb7e6a11..4a814852067 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java
@@ -30,7 +30,7 @@
import java.util.concurrent.CountDownLatch;
import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertEquals;
/**
@@ -239,14 +239,11 @@ void testAcquireWriteBucketLockWhileAcquiredWriteKeyPathLock() {
higherResourceName = new String[]{volumeName, bucketName};
lock.acquireWriteLock(resource, resourceName);
- try {
- lock.acquireWriteLock(higherResource, higherResourceName);
- fail("testAcquireWriteBucketLockWhileAcquiredWriteKeyPathLock() failed");
- } catch (RuntimeException ex) {
- String message = "cannot acquire " + higherResource.getName() + " lock " +
- "while holding [" + resource.getName() + "] lock(s).";
- assertThat(ex).hasMessageContaining(message);
- }
+ RuntimeException ex =
+ assertThrows(RuntimeException.class, () -> lock.acquireWriteLock(higherResource, higherResourceName));
+ String message = "cannot acquire " + higherResource.getName() + " lock " +
+ "while holding [" + resource.getName() + "] lock(s).";
+ assertThat(ex).hasMessageContaining(message);
}
@Test
@@ -264,14 +261,11 @@ void testAcquireWriteBucketLockWhileAcquiredReadKeyPathLock() {
higherResourceName = new String[]{volumeName, bucketName};
lock.acquireReadLock(resource, resourceName);
- try {
- lock.acquireWriteLock(higherResource, higherResourceName);
- fail("testAcquireWriteBucketLockWhileAcquiredReadKeyPathLock() failed");
- } catch (RuntimeException ex) {
- String message = "cannot acquire " + higherResource.getName() + " lock " +
- "while holding [" + resource.getName() + "] lock(s).";
- assertThat(ex).hasMessageContaining(message);
- }
+ RuntimeException ex =
+ assertThrows(RuntimeException.class, () -> lock.acquireWriteLock(higherResource, higherResourceName));
+ String message = "cannot acquire " + higherResource.getName() + " lock " +
+ "while holding [" + resource.getName() + "] lock(s).";
+ assertThat(ex).hasMessageContaining(message);
}
@Test
@@ -289,14 +283,11 @@ void testAcquireReadBucketLockWhileAcquiredReadKeyPathLock() {
higherResourceName = new String[]{volumeName, bucketName};
lock.acquireReadLock(resource, resourceName);
- try {
- lock.acquireReadLock(higherResource, higherResourceName);
- fail("testAcquireReadBucketLockWhileAcquiredReadKeyPathLock() failed");
- } catch (RuntimeException ex) {
- String message = "cannot acquire " + higherResource.getName() + " lock " +
- "while holding [" + resource.getName() + "] lock(s).";
- assertThat(ex).hasMessageContaining(message);
- }
+ RuntimeException ex =
+ assertThrows(RuntimeException.class, () -> lock.acquireReadLock(higherResource, higherResourceName));
+ String message = "cannot acquire " + higherResource.getName() + " lock " +
+ "while holding [" + resource.getName() + "] lock(s).";
+ assertThat(ex).hasMessageContaining(message);
}
@Test
@@ -314,13 +305,10 @@ void testAcquireReadBucketLockWhileAcquiredWriteKeyPathLock() {
higherResourceName = new String[]{volumeName, bucketName};
lock.acquireWriteLock(resource, resourceName);
- try {
- lock.acquireReadLock(higherResource, higherResourceName);
- fail("testAcquireReadBucketLockWhileAcquiredWriteKeyPathLock() failed");
- } catch (RuntimeException ex) {
- String message = "cannot acquire " + higherResource.getName() + " lock " +
- "while holding [" + resource.getName() + "] lock(s).";
- assertThat(ex).hasMessageContaining(message);
- }
+ RuntimeException ex =
+ assertThrows(RuntimeException.class, () -> lock.acquireReadLock(higherResource, higherResourceName));
+ String message = "cannot acquire " + higherResource.getName() + " lock " +
+ "while holding [" + resource.getName() + "] lock(s).";
+ assertThat(ex).hasMessageContaining(message);
}
}
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java
index 856f2b238c0..54ab718ccf9 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java
@@ -40,7 +40,6 @@
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* Class tests OzoneManagerLock.
@@ -77,14 +76,11 @@ private void testResourceReacquireLock(String[] resourceName,
resource == Resource.S3_SECRET_LOCK ||
resource == Resource.PREFIX_LOCK) {
lock.acquireWriteLock(resource, resourceName);
- try {
- lock.acquireWriteLock(resource, resourceName);
- fail("reacquireResourceLock failed");
- } catch (RuntimeException ex) {
- String message = "cannot acquire " + resource.getName() + " lock " +
- "while holding [" + resource.getName() + "] lock(s).";
- assertThat(ex).hasMessageContaining(message);
- }
+ RuntimeException ex =
+ assertThrows(RuntimeException.class, () -> lock.acquireWriteLock(resource, resourceName));
+ String message = "cannot acquire " + resource.getName() + " lock " +
+ "while holding [" + resource.getName() + "] lock(s).";
+ assertThat(ex).hasMessageContaining(message);
assertDoesNotThrow(() -> lock.releaseWriteLock(resource, resourceName));
} else {
lock.acquireWriteLock(resource, resourceName);
@@ -162,15 +158,13 @@ void testLockViolations() {
stack.push(new ResourceInfo(resourceName, higherResource));
currentLocks.add(higherResource.getName());
// try to acquire lower level lock
- try {
- resourceName = generateResourceName(resource);
- lock.acquireWriteLock(resource, resourceName);
- fail("testLockViolations failed");
- } catch (RuntimeException ex) {
- String message = "cannot acquire " + resource.getName() + " lock " +
- "while holding " + currentLocks + " lock(s).";
- assertThat(ex).hasMessageContaining(message);
- }
+ RuntimeException ex = assertThrows(RuntimeException.class, () -> {
+ String[] resourceName1 = generateResourceName(resource);
+ lock.acquireWriteLock(resource, resourceName1);
+ });
+ String message = "cannot acquire " + resource.getName() + " lock " +
+ "while holding " + currentLocks + " lock(s).";
+ assertThat(ex).hasMessageContaining(message);
}
}
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
index 04bb4b240dd..3d73a42e694 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
@@ -20,6 +20,7 @@
import static org.apache.hadoop.ozone.ClientVersion.CURRENT_VERSION;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_GRPC_MAXIMUM_RESPONSE_LENGTH_DEFAULT;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.AdditionalAnswers.delegatesTo;
import static org.mockito.Mockito.mock;
@@ -46,8 +47,6 @@
import com.google.protobuf.ServiceException;
import org.apache.ratis.protocol.RaftPeerId;
-import static org.junit.jupiter.api.Assertions.fail;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.apache.hadoop.ozone.om.OMConfigKeys
.OZONE_OM_GRPC_MAXIMUM_RESPONSE_LENGTH;
@@ -61,13 +60,13 @@ public class TestS3GrpcOmTransport {
private static final Logger LOG =
LoggerFactory.getLogger(TestS3GrpcOmTransport.class);
- private final String leaderOMNodeId = "TestOM";
+ private static final String LEADER_OM_NODE_ID = "TestOM";
private final OMResponse omResponse = OMResponse.newBuilder()
.setSuccess(true)
.setStatus(org.apache.hadoop.ozone.protocol
.proto.OzoneManagerProtocolProtos.Status.OK)
- .setLeaderOMNodeId(leaderOMNodeId)
+ .setLeaderOMNodeId(LEADER_OM_NODE_ID)
.setCmdType(Type.AllocateBlock)
.build();
@@ -168,7 +167,7 @@ public void testSubmitRequestToServer() throws Exception {
final OMResponse resp = client.submitRequest(omRequest);
assertEquals(resp.getStatus(), org.apache.hadoop.ozone.protocol
.proto.OzoneManagerProtocolProtos.Status.OK);
- assertEquals(resp.getLeaderOMNodeId(), leaderOMNodeId);
+ assertEquals(resp.getLeaderOMNodeId(), LEADER_OM_NODE_ID);
}
@Test
@@ -192,7 +191,7 @@ public void testGrpcFailoverProxy() throws Exception {
final OMResponse resp = client.submitRequest(omRequest);
assertEquals(resp.getStatus(), org.apache.hadoop.ozone.protocol
.proto.OzoneManagerProtocolProtos.Status.OK);
- assertEquals(resp.getLeaderOMNodeId(), leaderOMNodeId);
+ assertEquals(resp.getLeaderOMNodeId(), LEADER_OM_NODE_ID);
}
@Test
@@ -216,12 +215,7 @@ public void testGrpcFailoverProxyExhaustRetry() throws Exception {
// OMFailoverProvider returns Fail retry due to #attempts >
// max failovers
- try {
- final OMResponse resp = client.submitRequest(omRequest);
- fail();
- } catch (Exception e) {
- assertTrue(true);
- }
+ assertThrows(Exception.class, () -> client.submitRequest(omRequest));
}
@Test
@@ -251,11 +245,6 @@ public void testGrpcFailoverExceedMaxMesgLen() throws Exception {
// len > 0, causing RESOURCE_EXHAUSTED exception.
// This exception should cause failover to NOT retry,
// rather to fail.
- try {
- final OMResponse resp = client.submitRequest(omRequest);
- fail();
- } catch (Exception e) {
- assertTrue(true);
- }
+ assertThrows(Exception.class, () -> client.submitRequest(omRequest));
}
}
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/grpc/TestClientAddressClientInterceptor.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/grpc/TestClientAddressClientInterceptor.java
index f6909e410f7..6d9f70b0f4f 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/grpc/TestClientAddressClientInterceptor.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/grpc/TestClientAddressClientInterceptor.java
@@ -28,7 +28,7 @@
import org.junit.jupiter.api.Test;
import org.mockito.MockedStatic;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/grpc/TestClientAddressServerInterceptor.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/grpc/TestClientAddressServerInterceptor.java
index 6a3cdf91d8a..e441a6d3820 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/grpc/TestClientAddressServerInterceptor.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/grpc/TestClientAddressServerInterceptor.java
@@ -28,7 +28,7 @@
import org.mockito.ArgumentCaptor;
import org.mockito.MockedStatic;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.mockito.Mockito.mockStatic;
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java
index 0b69d0dd9b3..1ab01ee3e00 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java
@@ -22,8 +22,8 @@
import java.security.SecureRandom;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* Tests GDPRSymmetricKey structure.
@@ -56,13 +56,8 @@ public void testKeyGenerationWithValidInput() throws Exception {
@Test
public void testKeyGenerationWithInvalidInput() throws Exception {
- try {
- new GDPRSymmetricKey(RandomStringUtils.randomAlphabetic(5),
- OzoneConsts.GDPR_ALGORITHM_NAME);
- fail("Expect length mismatched");
- } catch (IllegalArgumentException ex) {
- assertTrue(ex.getMessage()
- .equalsIgnoreCase("Secret must be exactly 16 characters"));
- }
+ IllegalArgumentException e = assertThrows(IllegalArgumentException.class,
+ () -> new GDPRSymmetricKey(RandomStringUtils.randomAlphabetic(5), OzoneConsts.GDPR_ALGORITHM_NAME));
+ assertTrue(e.getMessage().equalsIgnoreCase("Secret must be exactly 16 characters"));
}
}
diff --git a/hadoop-ozone/csi/src/main/resources/proto.lock b/hadoop-ozone/csi/src/main/resources/proto.lock
index 8f797b69db4..410598cbb66 100644
--- a/hadoop-ozone/csi/src/main/resources/proto.lock
+++ b/hadoop-ozone/csi/src/main/resources/proto.lock
@@ -204,12 +204,14 @@
{
"id": 1,
"name": "service",
- "type": "Service"
+ "type": "Service",
+ "oneof_parent": "type"
},
{
"id": 2,
"name": "volume_expansion",
- "type": "VolumeExpansion"
+ "type": "VolumeExpansion",
+ "oneof_parent": "type"
}
],
"messages": [
@@ -302,12 +304,14 @@
{
"id": 1,
"name": "snapshot",
- "type": "SnapshotSource"
+ "type": "SnapshotSource",
+ "oneof_parent": "type"
},
{
"id": 2,
"name": "volume",
- "type": "VolumeSource"
+ "type": "VolumeSource",
+ "oneof_parent": "type"
}
],
"messages": [
@@ -349,12 +353,14 @@
{
"id": 1,
"name": "block",
- "type": "BlockVolume"
+ "type": "BlockVolume",
+ "oneof_parent": "access_type"
},
{
"id": 2,
"name": "mount",
- "type": "MountVolume"
+ "type": "MountVolume",
+ "oneof_parent": "access_type"
},
{
"id": 3,
@@ -793,7 +799,8 @@
{
"id": 1,
"name": "rpc",
- "type": "RPC"
+ "type": "RPC",
+ "oneof_parent": "type"
}
],
"messages": [
@@ -1243,7 +1250,8 @@
{
"id": 1,
"name": "rpc",
- "type": "RPC"
+ "type": "RPC",
+ "oneof_parent": "type"
}
],
"messages": [
diff --git a/hadoop-ozone/dev-support/checks/junit.sh b/hadoop-ozone/dev-support/checks/junit.sh
index 417ae35e5e4..768a1f32a38 100755
--- a/hadoop-ozone/dev-support/checks/junit.sh
+++ b/hadoop-ozone/dev-support/checks/junit.sh
@@ -79,6 +79,12 @@ for i in $(seq 1 ${ITERATIONS}); do
fi
if [[ ${ITERATIONS} -gt 1 ]]; then
+ if ! grep -q "Tests run: [^0]" "${REPORT_DIR}/output.log"; then
+ echo "No tests were run" >> "${REPORT_DIR}/summary.txt"
+ irc=1
+ FAIL_FAST=true
+ fi
+
REPORT_DIR="${original_report_dir}"
echo "Iteration ${i} exit code: ${irc}" | tee -a "${REPORT_FILE}"
fi
diff --git a/hadoop-ozone/dev-support/checks/license.exceptions b/hadoop-ozone/dev-support/checks/license.exceptions
index 66f17fb670d..5b22b88c492 100644
--- a/hadoop-ozone/dev-support/checks/license.exceptions
+++ b/hadoop-ozone/dev-support/checks/license.exceptions
@@ -16,7 +16,7 @@
# This file lists dependencies with acceptable license that
# license-maven-plugin cannot find, or finds with unexpected license.
-com.google.re2j:re2j:1.1 BSD 3-Clause
+com.google.re2j:re2j:1.7 BSD 3-Clause
javax.servlet:servlet-api:2.5 CDDL 1.1
javax.servlet.jsp:jsp-api:2.1 CDDL 1.1
org.codehaus.jettison:jettison:1.1 Apache License 2.0
diff --git a/hadoop-ozone/dev-support/checks/native.sh b/hadoop-ozone/dev-support/checks/native.sh
index dc66f923a64..1eeca5c0f3d 100755
--- a/hadoop-ozone/dev-support/checks/native.sh
+++ b/hadoop-ozone/dev-support/checks/native.sh
@@ -19,6 +19,20 @@
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
CHECK=native
+zlib_version=$(mvn -N help:evaluate -Dexpression=zlib.version -q -DforceStdout)
+if [[ -z "${zlib_version}" ]]; then
+ echo "ERROR zlib.version not defined in pom.xml"
+ exit 1
+fi
+
+bzip2_version=$(mvn -N help:evaluate -Dexpression=bzip2.version -q -DforceStdout)
+if [[ -z "${bzip2_version}" ]]; then
+ echo "ERROR bzip2.version not defined in pom.xml"
+ exit 1
+fi
+
source "${DIR}/junit.sh" -Pnative -Drocks_tools_native \
+ -Dbzip2.url="https://github.com/libarchive/bzip2/archive/refs/tags/bzip2-${bzip2_version}.tar.gz" \
+ -Dzlib.url="https://github.com/madler/zlib/releases/download/v${zlib_version}/zlib-${zlib_version}.tar.gz" \
-DexcludedGroups="unhealthy" \
"$@"
diff --git a/hadoop-ozone/dev-support/checks/sonar.sh b/hadoop-ozone/dev-support/checks/sonar.sh
index 9a36c70a663..27a971f691c 100755
--- a/hadoop-ozone/dev-support/checks/sonar.sh
+++ b/hadoop-ozone/dev-support/checks/sonar.sh
@@ -23,11 +23,8 @@ if [ ! "$SONAR_TOKEN" ]; then
exit 1
fi
-#Workaround: Sonar expects per-project Sonar XML report, but we have one, combined. Sonar seems to handle it well.
-# Only the classes from the current project will be used. We can copy the same, combined report to all the subprojects.
-if [ -f "$PROJECT_DIR/target/coverage/all.xml" ]; then
- find "$PROJECT_DIR" -name pom.xml | grep -v target | xargs dirname | xargs -n1 -IDIR mkdir -p DIR/target/coverage/
- find "$PROJECT_DIR" -name pom.xml | grep -v target | xargs dirname | xargs -n1 -IDIR cp "$PROJECT_DIR/target/coverage/all.xml" DIR/target/coverage/
-fi
-mvn -B verify -DskipShade -DskipTests -Dskip.npx -Dskip.installnpx org.sonarsource.scanner.maven:sonar-maven-plugin:3.6.0.1398:sonar -Dsonar.host.url=https://sonarcloud.io -Dsonar.organization=apache -Dsonar.projectKey=hadoop-ozone
+mvn -V -B -DskipShade -DskipTests -Dskip.npx -Dskip.installnpx --no-transfer-progress \
+ -Dsonar.coverage.jacoco.xmlReportPaths="$(pwd)/target/coverage/all.xml" \
+ -Dsonar.host.url=https://sonarcloud.io -Dsonar.organization=apache -Dsonar.projectKey=hadoop-ozone \
+ verify org.sonarsource.scanner.maven:sonar-maven-plugin:3.6.0.1398:sonar
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/test.sh b/hadoop-ozone/dist/src/main/compose/upgrade/test.sh
index a267080bb19..9d7ec5d4e60 100755
--- a/hadoop-ozone/dist/src/main/compose/upgrade/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/test.sh
@@ -35,8 +35,9 @@ RESULT_DIR="$ALL_RESULT_DIR" create_results_dir
# This is the version of Ozone that should use the runner image to run the
# code that was built. Other versions will pull images from docker hub.
-export OZONE_CURRENT_VERSION=1.4.0
-run_test ha non-rolling-upgrade 1.3.0 "$OZONE_CURRENT_VERSION"
+export OZONE_CURRENT_VERSION=1.5.0
+run_test ha non-rolling-upgrade 1.4.0 "$OZONE_CURRENT_VERSION"
+# run_test ha non-rolling-upgrade 1.3.0 "$OZONE_CURRENT_VERSION"
# run_test ha non-rolling-upgrade 1.2.1 "$OZONE_CURRENT_VERSION"
# run_test om-ha non-rolling-upgrade 1.1.0 "$OZONE_CURRENT_VERSION"
diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml b/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml
index 15d4c7e427d..2057cdd8a99 100644
--- a/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml
+++ b/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml
@@ -45,6 +45,13 @@ services:
volumes:
- ../..:/opt/ozone
command: ["sleep","1000000"]
+ old_client_1_4_0:
+ image: apache/ozone:1.4.0
+ env_file:
+ - docker-config
+ volumes:
+ - ../..:/opt/ozone
+ command: ["sleep","1000000"]
new_client:
image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
env_file:
diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh
index baa239d56a8..419d397c19e 100755
--- a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh
@@ -21,8 +21,8 @@ COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
export COMPOSE_DIR
basename=$(basename ${COMPOSE_DIR})
-current_version=1.4.0
-old_versions="1.0.0 1.1.0 1.2.1 1.3.0" # container is needed for each version in clients.yaml
+current_version=1.5.0
+old_versions="1.0.0 1.1.0 1.2.1 1.3.0 1.4.0" # container is needed for each version in clients.yaml
# shellcheck source=hadoop-ozone/dist/src/main/compose/testlib.sh
source "${COMPOSE_DIR}/../testlib.sh"
@@ -77,7 +77,7 @@ test_cross_compatibility() {
test_ec_cross_compatibility() {
echo "Running Erasure Coded storage backward compatibility tests."
- local cluster_versions_with_ec="1.3.0"
+ local cluster_versions_with_ec="1.3.0 1.4.0"
local non_ec_client_versions="1.0.0 1.1.0 1.2.1"
for cluster_version in ${cluster_versions_with_ec}; do
diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
index f5f6644efeb..e75cc7a9127 100644
--- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
+++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt
@@ -222,6 +222,26 @@ EPL 2.0
jakarta.ws.rs:jakarta.ws.rs-api
org.aspectj:aspectjrt
org.aspectj:aspectjweaver
+ org.glassfish.hk2.external:aopalliance-repackaged
+ org.glassfish.hk2.external:jakarta.inject
+ org.glassfish.hk2.external:javax.inject
+ org.glassfish.hk2:guice-bridge
+ org.glassfish.hk2:hk2-api
+ org.glassfish.hk2:hk2-locator
+ org.glassfish.hk2:hk2-utils
+ org.glassfish.hk2:osgi-resource-locator
+ org.glassfish.jersey.containers:jersey-container-servlet
+ org.glassfish.jersey.containers:jersey-container-servlet-core
+ org.glassfish.jersey.core:jersey-client
+ org.glassfish.jersey.core:jersey-common
+ org.glassfish.jersey.core:jersey-server
+ org.glassfish.jersey.ext.cdi:jersey-cdi1x
+ org.glassfish.jersey.ext:jersey-entity-filtering
+ org.glassfish.jersey.inject:jersey-hk2
+ org.glassfish.jersey.media:jersey-media-jaxb
+ org.glassfish.jersey.media:jersey-media-json-jackson
+ org.jgrapht:jgrapht-core
+ org.jgrapht:jgrapht-ext
CDDL 1.1 + GPLv2 with classpath exception
@@ -239,26 +259,8 @@ CDDL 1.1 + GPLv2 with classpath exception
javax.servlet:javax.servlet-api
javax.servlet.jsp:jsp-api
javax.ws.rs:jsr311-api
- org.glassfish.hk2.external:aopalliance-repackaged
- org.glassfish.hk2.external:jakarta.inject
- org.glassfish.hk2.external:javax.inject
- org.glassfish.hk2:guice-bridge
- org.glassfish.hk2:hk2-api
- org.glassfish.hk2:hk2-locator
- org.glassfish.hk2:hk2-utils
- org.glassfish.hk2:osgi-resource-locator
org.glassfish.jaxb:jaxb-runtime
org.glassfish.jaxb:txw2
- org.glassfish.jersey.containers:jersey-container-servlet
- org.glassfish.jersey.containers:jersey-container-servlet-core
- org.glassfish.jersey.core:jersey-client
- org.glassfish.jersey.core:jersey-common
- org.glassfish.jersey.core:jersey-server
- org.glassfish.jersey.ext.cdi:jersey-cdi1x
- org.glassfish.jersey.ext:jersey-entity-filtering
- org.glassfish.jersey.inject:jersey-hk2
- org.glassfish.jersey.media:jersey-media-jaxb
- org.glassfish.jersey.media:jersey-media-json-jackson
Apache License 2.0
@@ -447,8 +449,9 @@ MIT
com.bettercloud:vault-java-driver
com.kstruct:gethostname4j
- org.bouncycastle:bcpkix-jdk15on
- org.bouncycastle:bcprov-jdk15on
+ org.bouncycastle:bcpkix-jdk18on
+ org.bouncycastle:bcprov-jdk18on
+ org.bouncycastle:bcutil-jdk18on
org.checkerframework:checker-qual
org.codehaus.mojo:animal-sniffer-annotations
org.kohsuke.metainf-services:metainf-services
@@ -456,24 +459,6 @@ MIT
org.slf4j:slf4j-reload4j
-EPL 2.0
-=====================
-
- jakarta.annotation:jakarta.annotation-api
- jakarta.ws.rs:jakarta.ws.rs-api
- org.jgrapht:jgrapht-core
- org.jgrapht:jgrapht-ext
-
-
-CDDL + GPLv2 with classpath exception
-=====================
-
- javax.annotation:javax.annotation-api
- javax.el:javax.el-api
- javax.interceptor:javax.interceptor-api
- javax.servlet:javax.servlet-api
-
-
Public Domain
=====================
diff --git a/hadoop-ozone/dist/src/main/license/bin/NOTICE.txt b/hadoop-ozone/dist/src/main/license/bin/NOTICE.txt
index dafb8905d0f..44492fd26f0 100644
--- a/hadoop-ozone/dist/src/main/license/bin/NOTICE.txt
+++ b/hadoop-ozone/dist/src/main/license/bin/NOTICE.txt
@@ -482,10 +482,10 @@ For additional credits (generally to people who reported problems)
see CREDITS file.
-org.bouncycastle:bcprov-jdk15on
+org.bouncycastle:bcpkix-jdk18on
====================
-Copyright (c) 2000 - 2019 The Legion of the Bouncy Castle Inc. (https://www.bouncycastle.org)
+Copyright (c) 2000 - 2023 The Legion of the Bouncy Castle Inc. (https://www.bouncycastle.org)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt
index 2b582ddaf64..51e30862366 100644
--- a/hadoop-ozone/dist/src/main/license/jar-report.txt
+++ b/hadoop-ozone/dist/src/main/license/jar-report.txt
@@ -9,8 +9,9 @@ share/ozone/lib/aspectjweaver.jar
share/ozone/lib/aws-java-sdk-core.jar
share/ozone/lib/aws-java-sdk-kms.jar
share/ozone/lib/aws-java-sdk-s3.jar
-share/ozone/lib/bcpkix-jdk15on.jar
-share/ozone/lib/bcprov-jdk15on.jar
+share/ozone/lib/bcpkix-jdk18on.jar
+share/ozone/lib/bcprov-jdk18on.jar
+share/ozone/lib/bcutil-jdk18on.jar
share/ozone/lib/bonecp.RELEASE.jar
share/ozone/lib/cdi-api.jar
share/ozone/lib/checker-qual.jar
diff --git a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot b/hadoop-ozone/dist/src/main/smoketest/commonlib.robot
index 7d9edcdef44..55ed9ddf504 100644
--- a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/commonlib.robot
@@ -32,10 +32,12 @@ Get test user principal
[return] ${user}/${instance}@EXAMPLE.COM
Kinit HTTP user
+ Pass Execution If '${SECURITY_ENABLED}' == 'false' Skip in unsecure cluster
${principal} = Get test user principal HTTP
Wait Until Keyword Succeeds 2min 10sec Execute kinit -k -t /etc/security/keytabs/HTTP.keytab ${principal}
Kinit test user
+ Pass Execution If '${SECURITY_ENABLED}' == 'false' Skip in unsecure cluster
[arguments] ${user} ${keytab}
${TEST_USER} = Get test user principal ${user}
Set Suite Variable ${TEST_USER}
diff --git a/hadoop-ozone/dist/src/main/smoketest/freon/metadata-generate.robot b/hadoop-ozone/dist/src/main/smoketest/freon/metadata-generate.robot
new file mode 100644
index 00000000000..a97fdda8f81
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/smoketest/freon/metadata-generate.robot
@@ -0,0 +1,75 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation Test freon ommg command
+Resource ../ozone-lib/freon.robot
+Test Timeout 5 minutes
+
+*** Variables ***
+${PREFIX} ${EMPTY}
+${n} 100
+${VOLUME} volume1
+${BUCKET_FSO} bucket-fso
+${BUCKET_OBJ} bucket-obj
+
+*** Test Cases ***
+[Setup] Create Volume and Buckets
+ ${result} = Execute ozone sh volume create /${VOLUME}
+ Should not contain ${result} Failed
+ ${result} = Execute ozone sh bucket create /${VOLUME}/${BUCKET_FSO} -l FILE_SYSTEM_OPTIMIZED
+ Should not contain ${result} Failed
+ ${result} = Execute ozone sh bucket create /${VOLUME}/${BUCKET_OBJ} -l OBJECT_STORE
+ Should not contain ${result} Failed
+
+[Read] Bucket Information
+ ${result} = Execute ozone freon ommg --operation INFO_BUCKET -n ${n} --bucket ${BUCKET_FSO}
+ Should contain ${result} Successful executions: ${n}
+
+[Create] File in FILE_SYSTEM_OPTIMIZED Bucket
+ ${result} = Execute ozone freon ommg --operation CREATE_FILE -n ${n} --size 4096 --volume ${VOLUME} --bucket ${BUCKET_FSO}
+ Should contain ${result} Successful executions: ${n}
+
+[Read] File in FILE_SYSTEM_OPTIMIZED Bucket
+ ${result} = Execute ozone freon ommg --operation READ_FILE -n ${n} --volume ${VOLUME} --bucket ${BUCKET_FSO} --size 4096
+ Should contain ${result} Successful executions: ${n}
+
+[List] File Status in FILE_SYSTEM_OPTIMIZED Bucket
+ ${result} = Execute ozone freon ommg --operation LIST_STATUS -n 1 -t 1 --volume ${VOLUME} --bucket ${BUCKET_FSO} --batch-size ${n}
+ Should contain ${result} Successful executions: 1
+
+[List] light File status in FILE_SYSTEM_OPTIMIZED Bucket
+ ${result} = Execute ozone freon ommg --operation LIST_STATUS_LIGHT -n 1 -t 1 --volume ${VOLUME} --bucket ${BUCKET_FSO} --batch-size ${n}
+ Should contain ${result} Successful executions: 1
+
+[Create] Key in OBJECT_STORE Bucket
+ ${result} = Execute ozone freon ommg --operation CREATE_KEY -n ${n} --size 4096 --volume ${VOLUME} --bucket ${BUCKET_OBJ}
+ Should contain ${result} Successful executions: ${n}
+
+[Read] Key in OBJECT_STORE Bucket
+ ${result} = Execute ozone freon ommg --operation READ_KEY -n ${n} --volume ${VOLUME} --bucket ${BUCKET_OBJ} --size 4096
+ Should contain ${result} Successful executions: ${n}
+
+[List] Keys in OBJECT_STORE Bucket
+ ${result} = Execute ozone freon ommg --operation LIST_KEYS -n 1 -t 1 --volume ${VOLUME} --bucket ${BUCKET_OBJ} --batch-size ${n}
+ Should contain ${result} Successful executions: 1
+
+[List] Light Keys in OBJECT_STORE Bucket
+ ${result} = Execute ozone freon ommg --operation LIST_KEYS_LIGHT -n 1 -t 1 --volume ${VOLUME} --bucket ${BUCKET_OBJ} --batch-size ${n}
+ Should contain ${result} Successful executions: 1
+
+[Get] Key Information in OBJECT_STORE Bucket
+ ${result} = Execute ozone freon ommg --operation GET_KEYINFO -n ${n} --volume ${VOLUME} --bucket ${BUCKET_OBJ}
+ Should contain ${result} Successful executions: ${n}
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot
index c0b2c9f7bfa..840fb963d8d 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot
@@ -207,3 +207,9 @@ Verify Multipart Upload
${tmp} = Catenate @{files}
Execute cat ${tmp} > /tmp/original${random}
Compare files /tmp/original${random} /tmp/verify${random}
+
+Revoke S3 secrets
+ Execute and Ignore Error ozone s3 revokesecret -y
+ Execute and Ignore Error ozone s3 revokesecret -y -u testuser
+ Execute and Ignore Error ozone s3 revokesecret -y -u testuser2
+
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot
index b9f6993f45e..70dcfa1abed 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot
@@ -21,30 +21,37 @@ Library String
Resource ../commonlib.robot
Resource ./commonawslib.robot
Test Timeout 5 minutes
-Suite Setup Setup s3 tests
Default Tags no-bucket-type
+Test Setup Run Keywords Kinit test user testuser testuser.keytab
+... AND Revoke S3 secrets
+Test Teardown Run Keyword Revoke S3 secrets
*** Variables ***
${ENDPOINT_URL} http://s3g:9878
+${SECURITY_ENABLED} true
*** Test Cases ***
S3 Gateway Generate Secret
- Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit HTTP user
+ Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled
${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret
- IF '${SECURITY_ENABLED}' == 'true'
- Should contain ${result} HTTP/1.1 200 OK ignore_case=True
- Should Match Regexp ${result} .*.*
- ELSE
- Should contain ${result} S3 Secret endpoint is disabled.
- END
+ Should contain ${result} HTTP/1.1 200 OK ignore_case=True
+ Should Match Regexp ${result} .*.*
+
+S3 Gateway Secret Already Exists
+ Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled
+ Execute ozone s3 getsecret ${OM_HA_PARAM}
+ ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret
+ Should contain ${result} HTTP/1.1 400 S3_SECRET_ALREADY_EXISTS ignore_case=True
S3 Gateway Generate Secret By Username
- Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab
+ Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled
+ ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser
+ Should contain ${result} HTTP/1.1 200 OK ignore_case=True
+ Should Match Regexp ${result} .*.*
+
+S3 Gateway Generate Secret By Username For Other User
+ Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled
${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser2
- IF '${SECURITY_ENABLED}' == 'true'
- Should contain ${result} HTTP/1.1 200 OK ignore_case=True
- Should Match Regexp ${result} .*.*
- ELSE
- Should contain ${result} S3 Secret endpoint is disabled.
- END
+ Should contain ${result} HTTP/1.1 200 OK ignore_case=True
+ Should Match Regexp ${result} .*.*
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot
index 27b4580f419..0f15f23067b 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot
@@ -21,8 +21,9 @@ Library String
Resource ../commonlib.robot
Resource ./commonawslib.robot
Test Timeout 5 minutes
-Suite Setup Setup s3 tests
Default Tags no-bucket-type
+Test Setup Run Keywords Kinit test user testuser testuser.keytab
+... AND Revoke S3 secrets
*** Variables ***
${ENDPOINT_URL} http://s3g:9878
@@ -31,19 +32,19 @@ ${SECURITY_ENABLED} true
*** Test Cases ***
S3 Gateway Revoke Secret
- Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit HTTP user
+ Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled
+ Execute ozone s3 getsecret ${OM_HA_PARAM}
${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret
- IF '${SECURITY_ENABLED}' == 'true'
- Should contain ${result} HTTP/1.1 200 OK ignore_case=True
- ELSE
- Should contain ${result} S3 Secret endpoint is disabled.
- END
+ Should contain ${result} HTTP/1.1 200 OK ignore_case=True
S3 Gateway Revoke Secret By Username
- Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab
+ Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled
+ Execute ozone s3 getsecret -u testuser ${OM_HA_PARAM}
+ ${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser
+ Should contain ${result} HTTP/1.1 200 OK ignore_case=True
+
+S3 Gateway Revoke Secret By Username For Other User
+ Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled
+ Execute ozone s3 getsecret -u testuser2 ${OM_HA_PARAM}
${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser2
- IF '${SECURITY_ENABLED}' == 'true'
- Should contain ${result} HTTP/1.1 200 OK ignore_case=True
- ELSE
- Should contain ${result} S3 Secret endpoint is disabled.
- END
\ No newline at end of file
+ Should contain ${result} HTTP/1.1 200 OK ignore_case=True
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/shell/ozone/ozone-functions.sh b/hadoop-ozone/dist/src/shell/ozone/ozone-functions.sh
index 392372bfc12..f9994438c2f 100755
--- a/hadoop-ozone/dist/src/shell/ozone/ozone-functions.sh
+++ b/hadoop-ozone/dist/src/shell/ozone/ozone-functions.sh
@@ -1428,12 +1428,13 @@ function ozone_set_module_access_args
# populate JVM args based on java version
if [[ "${JAVA_MAJOR_VERSION}" -ge 17 ]]; then
- OZONE_MODULE_ACCESS_ARGS="--add-opens java.base/java.lang=ALL-UNNAMED"
OZONE_MODULE_ACCESS_ARGS="${OZONE_MODULE_ACCESS_ARGS} --add-opens java.management/com.sun.jmx.mbeanserver=ALL-UNNAMED"
OZONE_MODULE_ACCESS_ARGS="${OZONE_MODULE_ACCESS_ARGS} --add-exports java.management/com.sun.jmx.mbeanserver=ALL-UNNAMED"
fi
if [[ "${JAVA_MAJOR_VERSION}" -ge 9 ]]; then
OZONE_MODULE_ACCESS_ARGS="${OZONE_MODULE_ACCESS_ARGS} --add-opens java.base/java.nio=ALL-UNNAMED"
+ OZONE_MODULE_ACCESS_ARGS="${OZONE_MODULE_ACCESS_ARGS} --add-opens java.base/java.lang=ALL-UNNAMED"
+ OZONE_MODULE_ACCESS_ARGS="${OZONE_MODULE_ACCESS_ARGS} --add-opens java.base/java.lang.reflect=ALL-UNNAMED"
fi
}
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml
index fa6e0ae5756..604608a07fb 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml
@@ -34,21 +34,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
-
- junit
- junit
- test
-
org.junit.jupiter
junit-jupiter-engine
test
-
- org.junit.vintage
- junit-vintage-engine
- test
-
org.junit.platform
junit-platform-launcher
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
index 1ffed5323aa..26f896663b8 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
@@ -63,7 +63,7 @@ public class MiniOzoneChaosCluster extends MiniOzoneHAClusterImpl {
private final FailureManager failureManager;
- private final int waitForClusterToBeReadyTimeout = 120000; // 2 min
+ private static final int WAIT_FOR_CLUSTER_TO_BE_READY_TIMEOUT = 120000; // 2 min
private final Set failedOmSet;
private final Set failedScmSet;
@@ -158,7 +158,7 @@ public void waitForClusterToBeReady()
}
}
return true;
- }, 1000, waitForClusterToBeReadyTimeout);
+ }, 1000, WAIT_FOR_CLUSTER_TO_BE_READY_TIMEOUT);
}
/**
@@ -232,7 +232,7 @@ public Builder addFailures(Class extends Failures> clazz) {
protected void initializeConfiguration() throws IOException {
super.initializeConfiguration();
- OzoneClientConfig clientConfig = new OzoneClientConfig();
+ OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
clientConfig.setStreamBufferFlushSize(8 * 1024 * 1024);
clientConfig.setStreamBufferMaxSize(16 * 1024 * 1024);
clientConfig.setStreamBufferSize(4 * 1024);
diff --git a/hadoop-ozone/insight/pom.xml b/hadoop-ozone/insight/pom.xml
index dcd03c04fa8..f5e044ddac2 100644
--- a/hadoop-ozone/insight/pom.xml
+++ b/hadoop-ozone/insight/pom.xml
@@ -28,7 +28,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
Apache Ozone Insight Tool
jar
- false
diff --git a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java
index b2d68545d06..85faf99419a 100644
--- a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java
+++ b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java
@@ -17,12 +17,14 @@
*/
package org.apache.hadoop.ozone.insight;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.util.HashMap;
import java.util.Map;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
/**
* Test common insight point utility methods.
*/
@@ -42,14 +44,14 @@ public String getDescription() {
Map filters = new HashMap<>();
filters.put("datanode", "123");
- Assertions.assertTrue(insightPoint
+ assertTrue(insightPoint
.filterLog(filters, "This a log specific to [datanode=123]"));
- Assertions.assertFalse(insightPoint
+ assertFalse(insightPoint
.filterLog(filters, "This a log specific to [datanode=234]"));
//with empty filters
- Assertions.assertTrue(insightPoint
+ assertTrue(insightPoint
.filterLog(new HashMap<>(), "This a log specific to [datanode=234]"));
//with multiple filters
@@ -57,14 +59,14 @@ public String getDescription() {
filters.put("datanode", "123");
filters.put("pipeline", "abcd");
- Assertions.assertFalse(insightPoint
+ assertFalse(insightPoint
.filterLog(filters, "This a log specific to [datanode=123]"));
- Assertions.assertTrue(insightPoint
+ assertTrue(insightPoint
.filterLog(filters,
"This a log specific to [datanode=123] [pipeline=abcd]"));
- Assertions.assertFalse(insightPoint
+ assertFalse(insightPoint
.filterLog(filters,
"This a log specific to [datanode=456] [pipeline=abcd]"));
diff --git a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java
index 9be82ebc41d..701652bee09 100644
--- a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java
+++ b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java
@@ -27,11 +27,12 @@
import org.apache.hadoop.hdds.conf.ConfigTag;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
+import static org.assertj.core.api.Assertions.assertThat;
+
/**
* Test insight report which prints out configs.
*/
@@ -60,12 +61,12 @@ public void testPrintConfig() throws UnsupportedEncodingException {
subCommand.printConfig(CustomConfig.class, conf);
final String output = out.toString(StandardCharsets.UTF_8.name());
- Assertions.assertTrue(output.contains(">>> ozone.scm.client.address"));
- Assertions.assertTrue(output.contains("default: localhost"));
- Assertions.assertTrue(output.contains("current: omclient"));
- Assertions.assertTrue(output.contains(">>> ozone.scm.client.secure"));
- Assertions.assertTrue(output.contains("default: true"));
- Assertions.assertTrue(output.contains("current: true"));
+ assertThat(output).contains(">>> ozone.scm.client.address");
+ assertThat(output).contains("default: localhost");
+ assertThat(output).contains("current: omclient");
+ assertThat(output).contains(">>> ozone.scm.client.secure");
+ assertThat(output).contains("default: true");
+ assertThat(output).contains("current: true");
}
/**
diff --git a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestLogSubcommand.java b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestLogSubcommand.java
index 01402085861..f895a91c537 100644
--- a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestLogSubcommand.java
+++ b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestLogSubcommand.java
@@ -17,9 +17,10 @@
*/
package org.apache.hadoop.ozone.insight;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
/**
* Testing utility methods of the log subcommand test.
*/
@@ -36,6 +37,6 @@ public void filterLog() {
+ "storageLocation: \"/tmp/hadoop-neo/dfs/data\"\\n capacity: "
+ "250438021120\\n scmUsed: 16384\\n remaining: 212041244672\\n "
+ "storageType: DISK\\n failed: false\\n}\\n");
- Assertions.assertEquals(10, result.split("\n").length);
+ assertEquals(10, result.split("\n").length);
}
}
diff --git a/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml
index 00f7f4daf1d..92ceb203b11 100644
--- a/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml
@@ -105,16 +105,12 @@
-
-
-
-
-
+
@@ -130,7 +126,7 @@
-
+
diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml
index 3eef8fa58c0..913cd639bf7 100644
--- a/hadoop-ozone/integration-test/pom.xml
+++ b/hadoop-ozone/integration-test/pom.xml
@@ -119,21 +119,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
test-jar
test
-
- junit
- junit
- test
-
org.junit.jupiter
junit-jupiter-engine
test
-
- org.junit.vintage
- junit-vintage-engine
- test
-
org.junit.platform
junit-platform-launcher
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
index 2db8faaa6ea..51d75c07d2d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
@@ -22,7 +22,7 @@
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java
index b4d494f771d..ff5ed3b0624 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java
@@ -20,7 +20,7 @@
import org.apache.hadoop.fs.Path;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractContentSummaryTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractContentSummaryTest.java
index 5e5c9173954..07405dc9cd7 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractContentSummaryTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractContentSummaryTest.java
@@ -23,7 +23,7 @@
import org.apache.hadoop.fs.Path;
import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import java.io.FileNotFoundException;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java
index ca1a757e9d2..0d6c30e52c0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java
@@ -25,7 +25,7 @@
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
index b3b91ce467a..c39a9be1619 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
@@ -28,7 +28,7 @@
import org.apache.hadoop.fs.StreamCapabilities;
import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
index 7a606144851..8ea9357f23b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
@@ -21,7 +21,7 @@
import org.apache.hadoop.fs.Path;
import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import java.io.IOException;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractEtagTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractEtagTest.java
index 02c419b09a1..2624605ed25 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractEtagTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractEtagTest.java
@@ -21,7 +21,7 @@
import java.nio.charset.StandardCharsets;
import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
index 314d289c2a8..21290d1e889 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
@@ -33,7 +33,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.RemoteIterator;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import static org.apache.hadoop.fs.contract.ContractTestUtils.createSubdirs;
import static org.apache.hadoop.fs.contract.ContractTestUtils.iteratorToList;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractLeaseRecoveryTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractLeaseRecoveryTest.java
index fbe47302ffc..2bde7b757a1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractLeaseRecoveryTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractLeaseRecoveryTest.java
@@ -22,7 +22,7 @@
import java.io.IOException;
import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LeaseRecoverable;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
index a64398a54f7..22f947abc64 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
@@ -23,7 +23,7 @@
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import java.io.IOException;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java
index 906c110d3dc..86363b55ccf 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java
@@ -30,7 +30,7 @@
import com.google.common.base.Charsets;
import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
index 2527aacfd9e..166e8e301e4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
@@ -45,7 +45,7 @@
import static org.assertj.core.api.Assertions.fail;
import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
/**
* Test Open operations.
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java
index 6ae118d3463..96ecb01bfac 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java
@@ -22,7 +22,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import java.io.FileNotFoundException;
import java.io.IOException;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
index 8721951e656..3ff3f72cc6e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
@@ -21,7 +21,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.assertj.core.api.Assertions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSafeModeTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSafeModeTest.java
index 72d0dce9ff9..88666ee8a95 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSafeModeTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSafeModeTest.java
@@ -19,7 +19,7 @@
package org.apache.hadoop.fs.contract;
import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.SafeMode;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
index c9c51f360fd..618025dc06f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.fs.contract;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataInputStream;
@@ -25,13 +26,12 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.EOFException;
import java.io.IOException;
-import java.util.Random;
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
@@ -341,15 +341,14 @@ public void testRandomSeeks() throws Throwable {
byte[] buf = dataset(filesize, 0, 255);
Path randomSeekFile = path("testrandomseeks.bin");
createFile(getFileSystem(), randomSeekFile, true, buf);
- Random r = new Random();
// Record the sequence of seeks and reads which trigger a failure.
int[] seeks = new int[10];
int[] reads = new int[10];
try (FSDataInputStream stm = getFileSystem().open(randomSeekFile)) {
for (int i = 0; i < limit; i++) {
- int seekOff = r.nextInt(buf.length);
- int toRead = r.nextInt(Math.min(buf.length - seekOff, 32000));
+ int seekOff = RandomUtils.nextInt(0, buf.length);
+ int toRead = RandomUtils.nextInt(0, Math.min(buf.length - seekOff, 32000));
seeks[i % seeks.length] = seekOff;
reads[i % reads.length] = toRead;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java
index f9267dbf519..b9a86ae366c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java
@@ -21,7 +21,7 @@
import java.io.FileNotFoundException;
import org.apache.hadoop.fs.Path;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java
index 6312bd6060a..07c4f26543a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java
@@ -19,7 +19,7 @@
package org.apache.hadoop.fs.contract;
import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.util.Arrays;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java
index 586364eb076..b34e945a3dc 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java
@@ -22,19 +22,11 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.ozone.test.JUnit5AwareTimeout;
import org.assertj.core.api.Assertions;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.TestInfo;
-import org.junit.rules.TestName;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -78,12 +70,8 @@ public abstract class AbstractFSContractTestBase implements ContractOptions {
*/
private Path testPath;
- @Rule
- public TestName methodName = new TestName();
-
private String testMethodName;
- @BeforeClass
@BeforeAll
public static void nameTestThread() {
Thread.currentThread().setName("JUnit");
@@ -93,12 +81,6 @@ private void nameThread() {
Thread.currentThread().setName("JUnit-" + getMethodName());
}
- @Before
- public void getTestMethodName() {
- testMethodName = methodName.getMethodName();
- nameThread();
- }
-
@BeforeEach
void getTestMethodName(TestInfo testInfo) {
testInfo.getTestMethod().ifPresent(m -> testMethodName = m.getName());
@@ -179,12 +161,6 @@ protected Configuration createConfiguration() {
return new Configuration();
}
- /**
- * Set the timeout for every test.
- */
- @Rule
- public TestRule testTimeout = new JUnit5AwareTimeout(new Timeout(getTestTimeoutMillis()));
-
/**
* Option for tests to override the default timeout value.
* @return the current test timeout
@@ -198,7 +174,6 @@ protected int getTestTimeoutMillis() {
* Setup: create the contract then init it.
* @throws Exception on any failure
*/
- @Before
@BeforeEach
public void setup() throws Exception {
Thread.currentThread().setName("setup");
@@ -231,7 +206,6 @@ public void setup() throws Exception {
* Teardown.
* @throws Exception on any failure
*/
- @After
@AfterEach
public void teardown() throws Exception {
Thread.currentThread().setName("teardown");
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
index b62606a6865..a9fc2710ce3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
@@ -117,6 +117,7 @@
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
@@ -393,7 +394,7 @@ private void checkInvalidPath(Path path) {
InvalidPathException pathException = assertThrows(
InvalidPathException.class, () -> fs.create(path, false)
);
- assertTrue(pathException.getMessage().contains("Invalid path Name"));
+ assertThat(pathException.getMessage()).contains("Invalid path Name");
}
@Test
@@ -467,7 +468,7 @@ public void testRecursiveDelete() throws Exception {
// delete a dir with sub-file
try {
FileStatus[] parents = fs.listStatus(grandparent);
- assertTrue(parents.length > 0);
+ assertThat(parents.length).isGreaterThan(0);
fs.delete(parents[0].getPath(), false);
fail("Must throw exception as dir is not empty!");
} catch (PathIsNotEmptyDirectoryException pde) {
@@ -538,8 +539,8 @@ private void checkPath(Path path) {
fs.getFileStatus(path);
fail("testRecursiveDelete failed");
} catch (IOException ex) {
- assertTrue(ex instanceof FileNotFoundException);
- assertTrue(ex.getMessage().contains("No such file or directory"));
+ assertInstanceOf(FileNotFoundException.class, ex);
+ assertThat(ex.getMessage()).contains("No such file or directory");
}
}
@@ -749,7 +750,7 @@ public void testListStatusOnLargeDirectory() throws Exception {
assertEquals(numDirs, fileStatuses.length, "Total directories listed do not match the existing directories");
for (int i = 0; i < numDirs; i++) {
- assertTrue(paths.contains(fileStatuses[i].getPath().getName()));
+ assertThat(paths).contains(fileStatuses[i].getPath().getName());
}
}
@@ -1004,7 +1005,7 @@ public void testSeekOnFileLength() throws IOException {
fs.open(fileNotExists);
fail("Should throw FileNotFoundException as file doesn't exist!");
} catch (FileNotFoundException fnfe) {
- assertTrue(fnfe.getMessage().contains("KEY_NOT_FOUND"), "Expected KEY_NOT_FOUND error");
+ assertThat(fnfe.getMessage()).contains("KEY_NOT_FOUND");
}
}
@@ -1027,12 +1028,16 @@ public void testAllocateMoreThanOneBlock() throws IOException {
FileStatus fileStatus = fs.getFileStatus(file);
long blkSize = fileStatus.getBlockSize();
long fileLength = fileStatus.getLen();
- assertTrue(fileLength > blkSize, "Block allocation should happen");
+ assertThat(fileLength)
+ .withFailMessage("Block allocation should happen")
+ .isGreaterThan(blkSize);
long newNumBlockAllocations =
cluster.getOzoneManager().getMetrics().getNumBlockAllocates();
- assertTrue((newNumBlockAllocations > numBlockAllocationsOrg), "Block allocation should happen");
+ assertThat(newNumBlockAllocations)
+ .withFailMessage("Block allocation should happen")
+ .isGreaterThan(numBlockAllocationsOrg);
stream.seek(fileLength);
assertEquals(-1, stream.read());
@@ -1367,7 +1372,7 @@ public void testRenameDir() throws Exception {
IllegalArgumentException exception = assertThrows(
IllegalArgumentException.class,
() -> fs.rename(new Path(fs.getUri().toString() + "fake" + dir), dest));
- assertTrue(exception.getMessage().contains("Wrong FS"));
+ assertThat(exception.getMessage()).contains("Wrong FS");
}
private OzoneKeyDetails getKey(Path keyPath, boolean isDirectory)
@@ -1420,7 +1425,7 @@ public void testGetDirectoryModificationTime()
for (int i = 0; i < 5; i++) {
Thread.sleep(10);
fileStatuses = o3fs.listStatus(mdir1);
- assertTrue(modificationTime <= fileStatuses[0].getModificationTime());
+ assertThat(modificationTime).isLessThanOrEqualTo(fileStatuses[0].getModificationTime());
}
}
@@ -1818,7 +1823,7 @@ public void testOzFsReadWrite() throws IOException {
// The timestamp of the newly created file should always be greater than
// the time when the test was started
- assertTrue(status.getModificationTime() > currentTime);
+ assertThat(status.getModificationTime()).isGreaterThan(currentTime);
assertFalse(status.isDirectory());
assertEquals(FsPermission.getFileDefault(), status.getPermission());
@@ -1969,7 +1974,7 @@ void testListStatus2() throws IOException {
assertChange(initialStats, statistics, Statistic.OBJECTS_LIST.getSymbol(), 2);
assertEquals(initialListStatusCount + 2, omMetrics.getNumListStatus());
for (Path p : paths) {
- assertTrue(Arrays.asList(statusList).contains(fs.getFileStatus(p)));
+ assertThat(Arrays.asList(statusList)).contains(fs.getFileStatus(p));
}
}
@@ -2007,7 +2012,7 @@ void testOzoneManagerFileSystemInterface() throws IOException {
// doesn't actually exist on server; if it exists, it will be a fixed value.
// In this case, the dir key exists.
assertEquals(0, omStatus.getKeyInfo().getDataSize());
- assertTrue(omStatus.getKeyInfo().getModificationTime() <= currentTime);
+ assertThat(omStatus.getKeyInfo().getModificationTime()).isLessThanOrEqualTo(currentTime);
assertEquals(new Path(omStatus.getPath()).getName(),
o3fs.pathToKey(path));
}
@@ -2021,13 +2026,12 @@ public void testOzoneManagerLocatedFileStatus() throws IOException {
stream.writeBytes(data);
}
FileStatus status = fs.getFileStatus(path);
- assertTrue(status instanceof LocatedFileStatus);
- LocatedFileStatus locatedFileStatus = (LocatedFileStatus) status;
- assertTrue(locatedFileStatus.getBlockLocations().length >= 1);
+ LocatedFileStatus locatedFileStatus = assertInstanceOf(LocatedFileStatus.class, status);
+ assertThat(locatedFileStatus.getBlockLocations().length).isGreaterThanOrEqualTo(1);
for (BlockLocation blockLocation : locatedFileStatus.getBlockLocations()) {
- assertTrue(blockLocation.getNames().length >= 1);
- assertTrue(blockLocation.getHosts().length >= 1);
+ assertThat(blockLocation.getNames().length).isGreaterThanOrEqualTo(1);
+ assertThat(blockLocation.getHosts().length).isGreaterThanOrEqualTo(1);
}
}
@@ -2047,8 +2051,7 @@ void testBlockOffsetsWithMultiBlockFile() throws Exception {
stream.writeBytes(data);
}
FileStatus status = fs.getFileStatus(path);
- assertTrue(status instanceof LocatedFileStatus);
- LocatedFileStatus locatedFileStatus = (LocatedFileStatus) status;
+ LocatedFileStatus locatedFileStatus = assertInstanceOf(LocatedFileStatus.class, status);
BlockLocation[] blockLocations = locatedFileStatus.getBlockLocations();
assertEquals(0, blockLocations[0].getOffset());
@@ -2100,7 +2103,7 @@ void testFileSystemWithObjectStoreLayout() throws IOException {
config.set(FS_DEFAULT_NAME_KEY, obsRootPath);
IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> FileSystem.get(config));
- assertTrue(e.getMessage().contains("OBJECT_STORE, which does not support file system semantics"));
+ assertThat(e.getMessage()).contains("OBJECT_STORE, which does not support file system semantics");
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTestWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTestWithFSO.java
index 2d4c310c886..f0ff1ab43b4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTestWithFSO.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTestWithFSO.java
@@ -45,8 +45,10 @@
import java.io.IOException;
import java.util.ArrayList;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -305,8 +307,8 @@ public void testRenameParentDirModificationTime() throws IOException {
.getModificationTime();
// rename should change the parent directory of source and object files
// modification time but not change modification time of the renamed file
- assertTrue(dir1BeforeMTime < dir1AfterMTime);
- assertTrue(dir2BeforeMTime < dir2AfterMTime);
+ assertThat(dir1BeforeMTime).isLessThan(dir1AfterMTime);
+ assertThat(dir2BeforeMTime).isLessThan(dir2AfterMTime);
assertEquals(file1BeforeMTime, file1AfterMTime);
// mv "/dir1/subdir1/" to "/dir2/subdir1/"
@@ -323,8 +325,8 @@ public void testRenameParentDirModificationTime() throws IOException {
dir2AfterMTime = getFs().getFileStatus(dir2).getModificationTime();
long subdir1AfterMTime = getFs().getFileStatus(renamedSubdir1)
.getModificationTime();
- assertTrue(dir1BeforeMTime < dir1AfterMTime);
- assertTrue(dir2BeforeMTime < dir2AfterMTime);
+ assertThat(dir1BeforeMTime).isLessThan(dir1AfterMTime);
+ assertThat(dir2BeforeMTime).isLessThan(dir2AfterMTime);
assertEquals(subdir1BeforeMTime, subdir1AfterMTime);
}
@@ -379,7 +381,7 @@ private void renameAndAssert(OMMetadataManager omMgr,
long bucketAfterMTime = omBucketInfo.getModificationTime();
long fileAfterMTime = getFs().getFileStatus(to).getModificationTime();
if (exceptChangeMtime) {
- assertTrue(bucketBeforeMTime < bucketAfterMTime);
+ assertThat(bucketBeforeMTime).isLessThan(bucketAfterMTime);
} else {
assertEquals(bucketBeforeMTime, bucketAfterMTime);
}
@@ -434,7 +436,7 @@ public void testMultiLevelDirs() throws Exception {
long d6ObjectID =
verifyDirKey(volumeId, bucketId, d4ObjectID,
"d6", "/d1/d2/d3/d4/d6", dirKeys, omMgr);
- assertTrue(d5ObjectID != d6ObjectID, "Wrong objectIds for sub-dirs[" + d5ObjectID + "/d5, " + d6ObjectID
+ assertNotEquals(d5ObjectID, d6ObjectID, "Wrong objectIds for sub-dirs[" + d5ObjectID + "/d5, " + d6ObjectID
+ "/d6] of same parent!");
assertEquals(6, getCluster().getOzoneManager().getMetrics().getNumKeys(), "Wrong OM numKeys metrics");
@@ -520,10 +522,10 @@ public void testFSDeleteLogWarnNoExist() throws Exception {
GenericTestUtils.LogCapturer logCapture = GenericTestUtils.LogCapturer
.captureLogs(BasicOzoneClientAdapterImpl.LOG);
getFs().delete(new Path("/d1/d3/noexist/"), true);
- assertTrue(logCapture.getOutput().contains(
- "delete key failed Unable to get file status"));
- assertTrue(logCapture.getOutput().contains(
- "WARN ozone.BasicOzoneClientAdapterImpl"));
+ assertThat(logCapture.getOutput()).contains(
+ "delete key failed Unable to get file status");
+ assertThat(logCapture.getOutput()).contains(
+ "WARN ozone.BasicOzoneClientAdapterImpl");
}
private void verifyOMFileInfoFormat(OmKeyInfo omKeyInfo, String fileName,
@@ -546,7 +548,7 @@ long verifyDirKey(long volumeId, long bucketId, long parentId,
" using dbKey: " + dbKey);
assertEquals(parentId, dirInfo.getParentObjectID(), "Parent Id mismatches");
assertEquals(dirKey, dirInfo.getName(), "Mismatches directory name");
- assertTrue(dirInfo.getCreationTime() > 0, "Mismatches directory creation time param");
+ assertThat(dirInfo.getCreationTime()).isGreaterThan(0);
assertEquals(dirInfo.getCreationTime(), dirInfo.getModificationTime());
return dirInfo.getObjectID();
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java
index 9bd1025e694..d44342acc43 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java
@@ -96,7 +96,6 @@
import java.util.LinkedList;
import java.util.List;
import java.util.Optional;
-import java.util.Random;
import java.util.Set;
import java.util.TreeSet;
import java.util.UUID;
@@ -125,6 +124,7 @@
import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE;
import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.DELETE;
import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.LIST;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
@@ -776,7 +776,7 @@ void testListStatusOnLargeDirectory() throws Exception {
assertEquals(numDirs, fileStatuses.length, "Total directories listed do not match the existing directories");
for (int i = 0; i < numDirs; i++) {
- assertTrue(paths.contains(fileStatuses[i].getPath().getName()));
+ assertThat(paths).contains(fileStatuses[i].getPath().getName());
}
} finally {
// Cleanup
@@ -1109,7 +1109,7 @@ void testListStatusRootAndVolumeRecursive() throws IOException {
*/
private FileStatus[] customListStatus(Path f, boolean recursive,
String startPath, int numEntries) throws IOException {
- assertTrue(numEntries > 0);
+ assertThat(numEntries).isGreaterThan(0);
LinkedList statuses = new LinkedList<>();
List tmpStatusList;
do {
@@ -1486,9 +1486,9 @@ void testSymlinkList() throws Exception {
new GenericTestUtils.SystemOutCapturer()) {
String linkPathStr = rootPath + destVolume;
ToolRunner.run(shell, new String[]{"-ls", linkPathStr});
- assertTrue(capture.getOutput().contains("drwxrwxrwx"));
- assertTrue(capture.getOutput().contains(linkPathStr +
- OZONE_URI_DELIMITER + srcBucket));
+ assertThat(capture.getOutput()).contains("drwxrwxrwx");
+ assertThat(capture.getOutput()).contains(linkPathStr +
+ OZONE_URI_DELIMITER + srcBucket);
} finally {
shell.close();
}
@@ -1509,12 +1509,12 @@ void testSymlinkList() throws Exception {
String linkPathStr = rootPath + destVolume;
ToolRunner.run(shell, new String[]{"-ls", "-R",
linkPathStr + OZONE_URI_DELIMITER + srcBucket});
- assertTrue(capture.getOutput().contains("drwxrwxrwx"));
- assertTrue(capture.getOutput().contains(linkPathStr +
- OZONE_URI_DELIMITER + srcBucket));
- assertTrue(capture.getOutput().contains("-rw-rw-rw-"));
- assertTrue(capture.getOutput().contains(linkPathStr +
- OZONE_URI_DELIMITER + srcBucket + OZONE_URI_DELIMITER + key));
+ assertThat(capture.getOutput()).contains("drwxrwxrwx");
+ assertThat(capture.getOutput()).contains(linkPathStr +
+ OZONE_URI_DELIMITER + srcBucket);
+ assertThat(capture.getOutput()).contains("-rw-rw-rw-");
+ assertThat(capture.getOutput()).contains(linkPathStr +
+ OZONE_URI_DELIMITER + srcBucket + OZONE_URI_DELIMITER + key);
} finally {
shell.close();
}
@@ -1678,7 +1678,7 @@ void testDeleteBucketLink() throws Exception {
// confirm link is gone
FileNotFoundException exception = assertThrows(FileNotFoundException.class,
() -> fs.getFileStatus(dirPathLink));
- assertTrue(exception.getMessage().contains("File not found."));
+ assertThat(exception.getMessage()).contains("File not found.");
// Cleanup
fs.delete(bucketPath1, true);
@@ -1928,15 +1928,15 @@ void testTrash() throws Exception {
}, 1000, 180000);
if (isBucketFSOptimized) {
- assertTrue(getOMMetrics()
- .getNumTrashAtomicDirRenames() > prevNumTrashAtomicDirRenames);
+ assertThat(getOMMetrics().getNumTrashAtomicDirRenames())
+ .isGreaterThan(prevNumTrashAtomicDirRenames);
} else {
// This condition should pass after the checkpoint
- assertTrue(getOMMetrics()
- .getNumTrashRenames() > prevNumTrashRenames);
+ assertThat(getOMMetrics().getNumTrashRenames())
+ .isGreaterThan(prevNumTrashRenames);
// With new layout version, file renames wouldn't be counted
- assertTrue(getOMMetrics()
- .getNumTrashFilesRenames() > prevNumTrashFileRenames);
+ assertThat(getOMMetrics().getNumTrashFilesRenames())
+ .isGreaterThan(prevNumTrashFileRenames);
}
// wait for deletion of checkpoint dir
@@ -1995,13 +1995,13 @@ void testCreateWithInvalidPaths() {
private void checkInvalidPath(Path path) {
InvalidPathException exception = assertThrows(InvalidPathException.class,
() -> fs.create(path, false));
- assertTrue(exception.getMessage().contains("Invalid path Name"));
+ assertThat(exception.getMessage()).contains("Invalid path Name");
}
@Test
void testRenameFile() throws Exception {
- final String dir = "/dir" + new Random().nextInt(1000);
+ final String dir = "/dir" + RandomUtils.nextInt(0, 1000);
Path dirPath = new Path(getBucketPath() + dir);
Path file1Source = new Path(getBucketPath() + dir
+ "/file1_Copy");
@@ -2027,7 +2027,7 @@ void testRenameFile() throws Exception {
*/
@Test
void testRenameFileToDir() throws Exception {
- final String dir = "/dir" + new Random().nextInt(1000);
+ final String dir = "/dir" + RandomUtils.nextInt(0, 1000);
Path dirPath = new Path(getBucketPath() + dir);
getFs().mkdirs(dirPath);
@@ -2447,7 +2447,7 @@ void testSnapshotDiff() throws Exception {
IllegalArgumentException.class,
() -> ofs.getSnapshotDiffReport(volumePath1, finalFromSnap,
finalToSnap));
- assertTrue(exception.getMessage().contains(errorMsg));
+ assertThat(exception.getMessage()).contains(errorMsg);
}
@Test
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java
index d5c042bb036..87f114bd711 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java
@@ -57,7 +57,9 @@
import java.util.concurrent.TimeoutException;
import java.util.function.LongSupplier;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
@@ -177,7 +179,7 @@ public void testDeleteEmptyDirectory() throws Exception {
assertEquals(root.getName(), iterator.next().getValue().getName());
}
- assertTrue(dirDeletingService.getRunCount().get() > 1);
+ assertThat(dirDeletingService.getRunCount().get()).isGreaterThan(1);
}
/**
@@ -244,9 +246,9 @@ public void testDeleteWithLargeSubPathsThanBatchSize() throws Exception {
assertSubPathsCount(dirDeletingService::getDeletedDirsCount, 19);
long elapsedRunCount = dirDeletingService.getRunCount().get() - preRunCount;
- assertTrue(dirDeletingService.getRunCount().get() > 1);
+ assertThat(dirDeletingService.getRunCount().get()).isGreaterThan(1);
// Ensure dir deleting speed, here provide a backup value for safe CI
- assertTrue(elapsedRunCount >= 7);
+ assertThat(elapsedRunCount).isGreaterThanOrEqualTo(7);
}
@Test
@@ -295,7 +297,7 @@ public void testDeleteWithMultiLevels() throws Exception {
assertSubPathsCount(dirDeletingService::getMovedDirsCount, 2);
assertSubPathsCount(dirDeletingService::getDeletedDirsCount, 5);
- assertTrue(dirDeletingService.getRunCount().get() > 1);
+ assertThat(dirDeletingService.getRunCount().get()).isGreaterThan(1);
}
@Test
@@ -545,13 +547,8 @@ private boolean assertTableRowCount(int expectedCount,
}
private void checkPath(Path path) {
- try {
- fs.getFileStatus(path);
- fail("testRecursiveDelete failed");
- } catch (IOException ex) {
- assertTrue(ex instanceof FileNotFoundException);
- assertTrue(ex.getMessage().contains("No such file or directory"));
- }
+ FileNotFoundException ex = assertThrows(FileNotFoundException.class, () -> fs.getFileStatus(path));
+ assertThat(ex.getMessage()).contains("No such file or directory");
}
private static BucketLayout getFSOBucketLayout() {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
index 8d7439604e8..a8c450e3cc9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
@@ -21,6 +21,8 @@
import java.io.Closeable;
import java.io.FileNotFoundException;
import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
import java.security.GeneralSecurityException;
import java.security.PrivilegedExceptionAction;
import java.util.HashMap;
@@ -28,29 +30,37 @@
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
+import java.util.stream.Stream;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.utils.IOUtils;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.crypto.CipherSuite;
import org.apache.hadoop.crypto.CryptoCodec;
import org.apache.hadoop.crypto.CryptoOutputStream;
import org.apache.hadoop.crypto.Encryptor;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.StreamCapabilities;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.StorageType;
+import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+import org.apache.hadoop.hdds.scm.storage.BlockInputStream;
+import org.apache.hadoop.hdds.scm.storage.BlockOutputStream;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StreamCapabilities;
+
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
@@ -58,8 +68,10 @@
import org.apache.hadoop.ozone.client.BucketArgs;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneKeyDetails;
import org.apache.hadoop.ozone.client.io.ECKeyOutputStream;
import org.apache.hadoop.ozone.client.io.KeyOutputStream;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OMMetrics;
@@ -82,11 +94,16 @@
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestMethodOrder;
import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
+import org.junit.jupiter.params.provider.ValueSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.event.Level;
import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_ROOT;
@@ -96,11 +113,13 @@
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY;
import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.params.provider.Arguments.arguments;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -120,12 +139,13 @@ public class TestHSync {
private static OzoneClient client;
private static final BucketLayout BUCKET_LAYOUT = BucketLayout.FILE_SYSTEM_OPTIMIZED;
+ private static final int CHUNK_SIZE = 4 << 12;
+ private static final int FLUSH_SIZE = 2 * CHUNK_SIZE;
+ private static final int MAX_FLUSH_SIZE = 2 * FLUSH_SIZE;
+ private static final int BLOCK_SIZE = 2 * MAX_FLUSH_SIZE;
+
@BeforeAll
public static void init() throws Exception {
- final int chunkSize = 4 << 10;
- final int flushSize = 2 * chunkSize;
- final int maxFlushSize = 2 * flushSize;
- final int blockSize = 2 * maxFlushSize;
final BucketLayout layout = BUCKET_LAYOUT;
CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false);
@@ -133,17 +153,21 @@ public static void init() throws Exception {
CONF.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true);
// Reduce KeyDeletingService interval
CONF.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS);
+ CONF.setBoolean("ozone.client.incremental.chunk.list", true);
+ CONF.setBoolean(OZONE_CHUNK_LIST_INCREMENTAL, true);
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
+ .setBlockSize(BLOCK_SIZE)
+ .setChunkSize(CHUNK_SIZE)
+ .setStreamBufferFlushSize(FLUSH_SIZE)
+ .setStreamBufferMaxSize(MAX_FLUSH_SIZE)
+ .setDataStreamBufferFlushSize(MAX_FLUSH_SIZE)
+ .setDataStreamMinPacketSize(CHUNK_SIZE)
+ .setDataStreamWindowSize(5 * CHUNK_SIZE)
+ .applyTo(CONF);
+
cluster = MiniOzoneCluster.newBuilder(CONF)
.setNumDatanodes(5)
.setTotalPipelineNumLimit(10)
- .setBlockSize(blockSize)
- .setChunkSize(chunkSize)
- .setStreamBufferFlushSize(flushSize)
- .setStreamBufferMaxSize(maxFlushSize)
- .setDataStreamBufferFlushize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
- .setDataStreamMinPacketSize(chunkSize)
- .setDataStreamStreamWindowSize(5 * chunkSize)
.build();
cluster.waitForClusterToBeReady();
client = cluster.newClient();
@@ -155,6 +179,8 @@ public static void init() throws Exception {
GenericTestUtils.setLogLevel(OMKeyRequest.LOG, Level.DEBUG);
GenericTestUtils.setLogLevel(OMKeyCommitRequest.LOG, Level.DEBUG);
GenericTestUtils.setLogLevel(OMKeyCommitRequestWithFSO.LOG, Level.DEBUG);
+ GenericTestUtils.setLogLevel(BlockOutputStream.LOG, Level.DEBUG);
+ GenericTestUtils.setLogLevel(BlockInputStream.LOG, Level.DEBUG);
}
@AfterAll
@@ -287,13 +313,15 @@ public void testKeyHSyncThenClose() throws Exception {
}
}
- @Test
- public void testO3fsHSync() throws Exception {
+ @ParameterizedTest
+ @ValueSource(booleans = {false, true})
+ public void testO3fsHSync(boolean incrementalChunkList) throws Exception {
// Set the fs.defaultFS
final String rootPath = String.format("%s://%s.%s/",
OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName());
CONF.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+ initClientConfig(incrementalChunkList);
try (FileSystem fs = FileSystem.get(CONF)) {
for (int i = 0; i < 10; i++) {
final Path file = new Path("/file" + i);
@@ -302,8 +330,10 @@ public void testO3fsHSync() throws Exception {
}
}
- @Test
- public void testOfsHSync() throws Exception {
+
+ @ParameterizedTest
+ @ValueSource(booleans = {false, true})
+ public void testOfsHSync(boolean incrementalChunkList) throws Exception {
// Set the fs.defaultFS
final String rootPath = String.format("%s://%s/",
OZONE_OFS_URI_SCHEME, CONF.get(OZONE_OM_ADDRESS_KEY));
@@ -312,6 +342,7 @@ public void testOfsHSync() throws Exception {
final String dir = OZONE_ROOT + bucket.getVolumeName()
+ OZONE_URI_DELIMITER + bucket.getName();
+ initClientConfig(incrementalChunkList);
try (FileSystem fs = FileSystem.get(CONF)) {
for (int i = 0; i < 10; i++) {
final Path file = new Path(dir, "file" + i);
@@ -429,13 +460,11 @@ public void testHsyncKeyCallCount() throws Exception {
ThreadLocalRandom.current().nextBytes(data);
final Path file = new Path(dir, "file-hsync-then-close");
- long blockSize;
try (FileSystem fs = FileSystem.get(CONF)) {
- blockSize = fs.getDefaultBlockSize(file);
long fileSize = 0;
try (FSDataOutputStream outputStream = fs.create(file, true)) {
// make sure at least writing 2 blocks data
- while (fileSize <= blockSize) {
+ while (fileSize <= BLOCK_SIZE) {
outputStream.write(data, 0, data.length);
outputStream.hsync();
fileSize += data.length;
@@ -448,9 +477,9 @@ public void testHsyncKeyCallCount() throws Exception {
omMetrics.resetNumKeyHSyncs();
long writtenSize = 0;
try (OzoneOutputStream outputStream = bucket.createKey("key-" + RandomStringUtils.randomNumeric(5),
- blockSize * 2, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>())) {
+ BLOCK_SIZE * 2, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>())) {
// make sure at least writing 2 blocks data
- while (writtenSize <= blockSize) {
+ while (writtenSize <= BLOCK_SIZE) {
outputStream.write(data, 0, data.length);
outputStream.hsync();
writtenSize += data.length;
@@ -733,4 +762,117 @@ private void testEncryptedStreamCapabilities(boolean isEC) throws IOException,
assertFalse(cofsos.hasCapability(StreamCapabilities.HFLUSH));
}
}
+
+ public void initClientConfig(boolean incrementalChunkList) {
+ OzoneClientConfig clientConfig = CONF.getObject(OzoneClientConfig.class);
+ clientConfig.setIncrementalChunkList(incrementalChunkList);
+ clientConfig.setChecksumType(ContainerProtos.ChecksumType.CRC32C);
+ CONF.setFromObject(clientConfig);
+ }
+
+ public static Stream parameters1() {
+ return Stream.of(
+ arguments(true, 512),
+ arguments(true, 511),
+ arguments(true, 513),
+ arguments(false, 512),
+ arguments(false, 511),
+ arguments(false, 513)
+ );
+ }
+
+ @ParameterizedTest
+ @MethodSource("parameters1")
+ public void writeWithSmallBuffer(boolean incrementalChunkList, int bufferSize)
+ throws IOException {
+ initClientConfig(incrementalChunkList);
+
+ final String keyName = UUID.randomUUID().toString();
+ int fileSize = 16 << 11;
+ String s = RandomStringUtils.randomAlphabetic(bufferSize);
+ ByteBuffer byteBuffer = ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8));
+
+ int writtenSize = 0;
+ try (OzoneOutputStream out = bucket.createKey(keyName, fileSize,
+ ReplicationConfig.getDefault(CONF), new HashMap<>())) {
+ while (writtenSize < fileSize) {
+ int len = Math.min(bufferSize, fileSize - writtenSize);
+ out.write(byteBuffer, 0, len);
+ out.hsync();
+ writtenSize += bufferSize;
+ }
+ }
+
+ OzoneKeyDetails keyInfo = bucket.getKey(keyName);
+ assertEquals(fileSize, keyInfo.getDataSize());
+
+ int readSize = 0;
+ try (OzoneInputStream is = bucket.readKey(keyName)) {
+ while (readSize < fileSize) {
+ int len = Math.min(bufferSize, fileSize - readSize);
+ ByteBuffer readBuffer = ByteBuffer.allocate(len);
+ int readLen = is.read(readBuffer);
+ assertEquals(len, readLen);
+ if (len < bufferSize) {
+ for (int i = 0; i < len; i++) {
+ assertEquals(readBuffer.array()[i], byteBuffer.array()[i]);
+ }
+ } else {
+ assertArrayEquals(readBuffer.array(), byteBuffer.array());
+ }
+ readSize += readLen;
+ }
+ }
+ bucket.deleteKey(keyName);
+ }
+
+ public static Stream parameters2() {
+ return Stream.of(
+ arguments(true, 1024 * 1024 + 1),
+ arguments(true, 1024 * 1024 + 1 + CHUNK_SIZE),
+ arguments(true, 1024 * 1024 - 1 + CHUNK_SIZE),
+ arguments(false, 1024 * 1024 + 1),
+ arguments(false, 1024 * 1024 + 1 + CHUNK_SIZE),
+ arguments(false, 1024 * 1024 - 1 + CHUNK_SIZE)
+ );
+ }
+
+ @ParameterizedTest
+ @MethodSource("parameters2")
+ public void writeWithBigBuffer(boolean incrementalChunkList, int bufferSize)
+ throws IOException {
+ initClientConfig(incrementalChunkList);
+
+ final String keyName = UUID.randomUUID().toString();
+ int count = 2;
+ int fileSize = bufferSize * count;
+ ByteBuffer byteBuffer = ByteBuffer.allocate(bufferSize);
+
+ try (OzoneOutputStream out = bucket.createKey(keyName, fileSize,
+ ReplicationConfig.getDefault(CONF), new HashMap<>())) {
+ for (int i = 0; i < count; i++) {
+ out.write(byteBuffer);
+ out.hsync();
+ }
+ }
+
+ OzoneKeyDetails keyInfo = bucket.getKey(keyName);
+ assertEquals(fileSize, keyInfo.getDataSize());
+ int totalReadLen = 0;
+ try (OzoneInputStream is = bucket.readKey(keyName)) {
+
+ for (int i = 0; i < count; i++) {
+ ByteBuffer readBuffer = ByteBuffer.allocate(bufferSize);
+ int readLen = is.read(readBuffer);
+ if (bufferSize != readLen) {
+ throw new IOException("failed to read " + bufferSize + " from offset " + totalReadLen +
+ ", actually read " + readLen + ", block " + totalReadLen /
+ BLOCK_SIZE);
+ }
+ assertArrayEquals(byteBuffer.array(), readBuffer.array());
+ totalReadLen += readLen;
+ }
+ }
+ bucket.deleteKey(keyName);
+ }
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java
index 93775f40136..4b45bb5fa0d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.fs.ozone;
import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -26,6 +25,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.XceiverClientGrpc;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
@@ -33,6 +33,7 @@
import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneTestUtils;
@@ -118,17 +119,19 @@ public void init() throws IOException, InterruptedException,
conf.set(OzoneConfigKeys.OZONE_OM_LEASE_SOFT_LIMIT, "0s");
// make sure flush will write data to DN
conf.setBoolean("ozone.client.stream.buffer.flush.delay", false);
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
+ .setBlockSize(blockSize)
+ .setChunkSize(chunkSize)
+ .setStreamBufferFlushSize(flushSize)
+ .setStreamBufferMaxSize(maxFlushSize)
+ .setDataStreamBufferFlushSize(maxFlushSize)
+ .setDataStreamMinPacketSize(chunkSize)
+ .setDataStreamWindowSize(5 * chunkSize)
+ .applyTo(conf);
+
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(3)
.setTotalPipelineNumLimit(10)
- .setBlockSize(blockSize)
- .setChunkSize(chunkSize)
- .setStreamBufferFlushSize(flushSize)
- .setStreamBufferMaxSize(maxFlushSize)
- .setDataStreamBufferFlushize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
- .setDataStreamMinPacketSize(chunkSize)
- .setDataStreamStreamWindowSize(5 * chunkSize)
.build();
cluster.waitForClusterToBeReady();
client = cluster.newClient();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
index 23d71047ef8..c6893c57e96 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
@@ -35,9 +35,11 @@
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestDataUtil;
@@ -52,9 +54,10 @@
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
-import org.junit.jupiter.api.Assertions;
import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
/**
* Test OzoneFSInputStream by reading through multiple interfaces.
@@ -82,12 +85,16 @@ public static void init() throws Exception {
conf = new OzoneConfiguration();
conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT,
BucketLayout.LEGACY.name());
+
+ ClientConfigForTesting.newBuilder(StorageUnit.MB)
+ .setChunkSize(2)
+ .setBlockSize(8)
+ .setStreamBufferFlushSize(2)
+ .setStreamBufferMaxSize(4)
+ .applyTo(conf);
+
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(5)
- .setChunkSize(2) // MB
- .setBlockSize(8) // MB
- .setStreamBufferFlushSize(2) // MB
- .setStreamBufferMaxSize(4) // MB
.build();
cluster.waitForClusterToBeReady();
client = cluster.newClient();
@@ -147,11 +154,11 @@ public void testO3FSSingleByteRead() throws IOException {
break;
}
value[i] = (byte) val;
- Assertions.assertEquals(value[i], data[i], "value mismatch at:" + i);
+ assertEquals(value[i], data[i], "value mismatch at:" + i);
i++;
}
- Assertions.assertEquals(i, data.length);
- Assertions.assertArrayEquals(value, data);
+ assertEquals(i, data.length);
+ assertArrayEquals(value, data);
}
}
@@ -169,8 +176,8 @@ public void testO3FSMultiByteRead() throws IOException {
System.arraycopy(tmp, 0, value, i * tmp.length, tmp.length);
i++;
}
- Assertions.assertEquals((long) i * tmp.length, data.length);
- Assertions.assertArrayEquals(value, data);
+ assertEquals((long) i * tmp.length, data.length);
+ assertArrayEquals(value, data);
}
}
@@ -181,12 +188,12 @@ public void testO3FSByteBufferRead() throws IOException {
ByteBuffer buffer = ByteBuffer.allocate(1024 * 1024);
int byteRead = inputStream.read(buffer);
- Assertions.assertEquals(byteRead, 1024 * 1024);
+ assertEquals(byteRead, 1024 * 1024);
byte[] value = new byte[1024 * 1024];
System.arraycopy(data, 0, value, 0, value.length);
- Assertions.assertArrayEquals(value, buffer.array());
+ assertArrayEquals(value, buffer.array());
}
}
@@ -208,7 +215,7 @@ public void testSequenceFileReaderSync() throws IOException {
in.sync(0);
blockStart = in.getPosition();
// The behavior should be consistent with HDFS
- Assertions.assertEquals(srcfile.length(), blockStart);
+ assertEquals(srcfile.length(), blockStart);
in.close();
}
@@ -230,7 +237,7 @@ public void testSequenceFileReaderSyncEC() throws IOException {
in.sync(0);
blockStart = in.getPosition();
// The behavior should be consistent with HDFS
- Assertions.assertEquals(srcfile.length(), blockStart);
+ assertEquals(srcfile.length(), blockStart);
in.close();
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
index 71d1e4bdddd..6dccd604208 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
@@ -47,7 +47,6 @@
import org.junit.jupiter.api.Timeout;
import java.io.FileNotFoundException;
-import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
@@ -60,11 +59,11 @@
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* Class tests create with object store and getFileStatus.
@@ -264,16 +263,8 @@ public void testKeyCreationFailDuetoDirectoryCreationBeforeCommit()
// Before close create directory with same name.
o3fs.mkdirs(new Path("/a/b/c"));
-
- try {
- ozoneOutputStream.close();
- fail("testKeyCreationFailDuetoDirectoryCreationBeforeCommit");
- } catch (IOException ex) {
- assertTrue(ex instanceof OMException);
- assertEquals(NOT_A_FILE,
- ((OMException) ex).getResult());
- }
-
+ OMException ex = assertThrows(OMException.class, () -> ozoneOutputStream.close());
+ assertEquals(NOT_A_FILE, ex.getResult());
}
@@ -308,14 +299,10 @@ public void testMPUFailDuetoDirectoryCreationBeforeComplete()
partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName());
// Should fail, as we have directory with same name.
- try {
- ozoneBucket.completeMultipartUpload(keyName,
- omMultipartInfo.getUploadID(), partsMap);
- fail("testMPUFailDuetoDirectoryCreationBeforeComplete failed");
- } catch (OMException ex) {
- assertTrue(ex instanceof OMException);
- assertEquals(NOT_A_FILE, ex.getResult());
- }
+ OMException ex = assertThrows(OMException.class, () -> ozoneBucket.completeMultipartUpload(keyName,
+ omMultipartInfo.getUploadID(), partsMap));
+ assertEquals(NOT_A_FILE, ex.getResult());
+
// Delete directory
o3fs.delete(new Path(keyName), true);
@@ -338,25 +325,16 @@ public void testMPUFailDuetoDirectoryCreationBeforeComplete()
public void testCreateDirectoryFirstThenKeyAndFileWithSameName()
throws Exception {
o3fs.mkdirs(new Path("/t1/t2"));
-
- try {
- o3fs.create(new Path("/t1/t2"));
- fail("testCreateDirectoryFirstThenFileWithSameName failed");
- } catch (FileAlreadyExistsException ex) {
- assertTrue(ex.getMessage().contains(NOT_A_FILE.name()));
- }
+ FileAlreadyExistsException e =
+ assertThrows(FileAlreadyExistsException.class, () -> o3fs.create(new Path("/t1/t2")));
+ assertThat(e.getMessage()).contains(NOT_A_FILE.name());
OzoneVolume ozoneVolume =
client.getObjectStore().getVolume(volumeName);
OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
ozoneBucket.createDirectory("t1/t2");
- try {
- ozoneBucket.createKey("t1/t2", 0);
- fail("testCreateDirectoryFirstThenFileWithSameName failed");
- } catch (OMException ex) {
- assertTrue(ex instanceof OMException);
- assertEquals(NOT_A_FILE, ex.getResult());
- }
+ OMException ex = assertThrows(OMException.class, () -> ozoneBucket.createKey("t1/t2", 0));
+ assertEquals(NOT_A_FILE, ex.getResult());
}
@@ -463,7 +441,7 @@ private void checkPath(Path path) {
FileNotFoundException ex = assertThrows(FileNotFoundException.class, () ->
o3fs.getFileStatus(path),
"testObjectStoreCreateWithO3fs failed for Path" + path);
- assertTrue(ex.getMessage().contains("No such file or directory"));
+ assertThat(ex.getMessage()).contains("No such file or directory");
}
private void checkAncestors(Path p) throws Exception {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMetrics.java
index 6c0ecff0db8..5aba83bd412 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMetrics.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.fs.ozone;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -25,6 +26,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestDataUtil;
@@ -40,9 +42,9 @@
import java.io.IOException;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Test OM Metrics for OzoneFileSystem operations.
@@ -72,12 +74,16 @@ public static void init() throws Exception {
conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT,
BucketLayout.LEGACY.name());
conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true);
+
+ ClientConfigForTesting.newBuilder(StorageUnit.MB)
+ .setChunkSize(2)
+ .setBlockSize(8)
+ .setStreamBufferFlushSize(2)
+ .setStreamBufferMaxSize(4)
+ .applyTo(conf);
+
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(3)
- .setChunkSize(2) // MB
- .setBlockSize(8) // MB
- .setStreamBufferFlushSize(2) // MB
- .setStreamBufferMaxSize(4) // MB
.build();
cluster.waitForClusterToBeReady();
client = cluster.newClient();
@@ -149,13 +155,13 @@ private void testOzoneFileCommit(TestOps op) throws Exception {
long numKeysAfterCommit = cluster
.getOzoneManager().getMetrics().getNumKeys();
- assertTrue(numKeysAfterCommit > 0);
+ assertThat(numKeysAfterCommit).isGreaterThan(0);
assertEquals(numKeysBeforeCreate + 2, numKeysAfterCommit);
fs.delete(parentDir, true);
long numKeysAfterDelete = cluster
.getOzoneManager().getMetrics().getNumKeys();
- assertTrue(numKeysAfterDelete >= 0);
+ assertThat(numKeysAfterDelete).isGreaterThanOrEqualTo(0);
assertEquals(numKeysBeforeCreate, numKeysAfterDelete);
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMissingParent.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMissingParent.java
index d03cd4c81ce..228a820ed62 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMissingParent.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMissingParent.java
@@ -38,8 +38,8 @@
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests OFS behavior when filesystem paths are enabled and parent directory is
@@ -111,8 +111,8 @@ public void testCloseFileWithDeletedParent() throws Exception {
// Close should throw exception, Since parent doesn't exist.
OMException omException = assertThrows(OMException.class, stream::close);
- assertTrue(omException.getMessage().contains("Cannot create file : " +
- "parent/file as parent directory doesn't exist"));
+ assertThat(omException.getMessage())
+ .contains("Cannot create file : " + "parent/file as parent directory doesn't exist");
}
/**
@@ -132,8 +132,8 @@ public void testCloseFileWithRenamedParent() throws Exception {
// Close should throw exception, Since parent has been moved.
OMException omException = assertThrows(OMException.class, stream::close);
- assertTrue(omException.getMessage().contains("Cannot create file : " +
- "parent/file as parent directory doesn't exist"));
+ assertThat(omException.getMessage())
+ .contains("Cannot create file : " + "parent/file as parent directory doesn't exist");
fs.delete(renamedPath, true);
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java
index 972ad7dd2e7..37116f33e27 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java
@@ -35,11 +35,12 @@
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
-import org.junit.jupiter.api.Assertions;
import java.io.IOException;
import java.net.URI;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
/**
* Test Ozone Prefix Parser.
*/
@@ -115,18 +116,12 @@ public void testPrefixParsePath() throws Exception {
private void assertPrefixStats(PrefixParser parser, int volumeCount,
int bucketCount, int intermediateDirCount, int nonExistentDirCount,
int fileCount, int dirCount) {
- Assertions.assertEquals(volumeCount,
- parser.getParserStats(PrefixParser.Types.VOLUME));
- Assertions.assertEquals(bucketCount,
- parser.getParserStats(PrefixParser.Types.BUCKET));
- Assertions.assertEquals(intermediateDirCount,
- parser.getParserStats(PrefixParser.Types.INTERMEDIATE_DIRECTORY));
- Assertions.assertEquals(nonExistentDirCount,
- parser.getParserStats(PrefixParser.Types.NON_EXISTENT_DIRECTORY));
- Assertions.assertEquals(fileCount,
- parser.getParserStats(PrefixParser.Types.FILE));
- Assertions.assertEquals(dirCount,
- parser.getParserStats(PrefixParser.Types.DIRECTORY));
+ assertEquals(volumeCount, parser.getParserStats(PrefixParser.Types.VOLUME));
+ assertEquals(bucketCount, parser.getParserStats(PrefixParser.Types.BUCKET));
+ assertEquals(intermediateDirCount, parser.getParserStats(PrefixParser.Types.INTERMEDIATE_DIRECTORY));
+ assertEquals(nonExistentDirCount, parser.getParserStats(PrefixParser.Types.NON_EXISTENT_DIRECTORY));
+ assertEquals(fileCount, parser.getParserStats(PrefixParser.Types.FILE));
+ assertEquals(dirCount, parser.getParserStats(PrefixParser.Types.DIRECTORY));
}
private void testPrefixParseWithInvalidPaths() throws Exception {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java
index 5d068546828..2a6c8c456b9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java
@@ -22,14 +22,15 @@
import java.io.OutputStream;
import java.util.concurrent.ThreadLocalRandom;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.hdds.utils.IOUtils;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
+import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.OzoneBucket;
@@ -37,7 +38,6 @@
import org.apache.hadoop.ozone.client.io.SelectorOutputStream;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -55,6 +55,9 @@
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
/**
* Ozone file system tests with Streaming.
@@ -84,17 +87,20 @@ public static void init() throws Exception {
CONF.set(OZONE_FS_DATASTREAM_AUTO_THRESHOLD, AUTO_THRESHOLD + "B");
CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, true);
CONF.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name());
- cluster = MiniOzoneCluster.newBuilder(CONF)
- .setNumDatanodes(5)
- .setTotalPipelineNumLimit(10)
+
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
.setBlockSize(blockSize)
.setChunkSize(chunkSize)
.setStreamBufferFlushSize(flushSize)
.setStreamBufferMaxSize(maxFlushSize)
- .setDataStreamBufferFlushize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
+ .setDataStreamBufferFlushSize(maxFlushSize)
.setDataStreamMinPacketSize(chunkSize)
- .setDataStreamStreamWindowSize(5 * chunkSize)
+ .setDataStreamWindowSize(5 * chunkSize)
+ .applyTo(CONF);
+
+ cluster = MiniOzoneCluster.newBuilder(CONF)
+ .setNumDatanodes(5)
+ .setTotalPipelineNumLimit(10)
.build();
cluster.waitForClusterToBeReady();
client = cluster.newClient();
@@ -152,7 +158,7 @@ static void createFile(FileSystem fs, Path path, boolean overwrite,
final OutputStream wrapped = out.getWrappedStream();
LOG.info("wrapped: {}", wrapped.getClass());
- Assertions.assertEquals(SelectorOutputStream.class, wrapped.getClass());
+ assertEquals(SelectorOutputStream.class, wrapped.getClass());
final SelectorOutputStream> selector = (SelectorOutputStream>) wrapped;
final boolean belowThreshold = data.length <= AUTO_THRESHOLD;
LOG.info("data.length={}, threshold={}, belowThreshold? {}",
@@ -161,13 +167,12 @@ static void createFile(FileSystem fs, Path path, boolean overwrite,
out.close();
final OutputStream underlying = selector.getUnderlying();
- Assertions.assertNotNull(underlying);
+ assertNotNull(underlying);
LOG.info("underlying after close: {}", underlying.getClass());
if (belowThreshold) {
- Assertions.assertTrue(underlying instanceof OzoneFSOutputStream);
+ assertInstanceOf(OzoneFSOutputStream.class, underlying);
} else {
- Assertions.assertEquals(OzoneFSDataStreamOutput.class,
- underlying.getClass());
+ assertEquals(OzoneFSDataStreamOutput.class, underlying.getClass());
}
}
@@ -177,10 +182,10 @@ static void assertUnderlying(SelectorOutputStream> selector,
LOG.info("underlying before close: {}", underlying != null ?
underlying.getClass() : null);
if (belowThreshold) {
- Assertions.assertNull(underlying);
+ assertNull(underlying);
} else {
- Assertions.assertNotNull(underlying);
- Assertions.assertEquals(OzoneFSDataStreamOutput.class,
+ assertNotNull(underlying);
+ assertEquals(OzoneFSDataStreamOutput.class,
underlying.getClass());
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java
index d639f2734fc..47dc9ac0c3b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java
@@ -35,7 +35,6 @@
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.ha.ConfUtils;
import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMStorage;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.ozone.test.GenericTestUtils;
@@ -46,19 +45,19 @@
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.io.TempDir;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.io.File;
import java.util.Optional;
import java.util.OptionalInt;
-import java.util.UUID;
import static org.apache.hadoop.hdds.HddsUtils.getHostName;
import static org.apache.hadoop.hdds.HddsUtils.getHostPort;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Test client-side URI handling with Ozone Manager HA.
@@ -74,10 +73,7 @@ public class TestOzoneFsHAURLs {
private OzoneConfiguration conf;
private static MiniOzoneCluster cluster;
- private static String omId;
private static String omServiceId;
- private static String clusterId;
- private static String scmId;
private static OzoneManager om;
private static int numOfOMs;
@@ -85,47 +81,35 @@ public class TestOzoneFsHAURLs {
private String bucketName;
private String rootPath;
- private final String o3fsImplKey =
+ private static final String O3FS_IMPL_KEY =
"fs." + OzoneConsts.OZONE_URI_SCHEME + ".impl";
- private final String o3fsImplValue =
+ private static final String O3FS_IMPL_VALUE =
"org.apache.hadoop.fs.ozone.OzoneFileSystem";
private static OzoneClient client;
- private final String ofsImplKey =
+ private static final String OFS_IMPL_KEY =
"fs." + OzoneConsts.OZONE_OFS_URI_SCHEME + ".impl";
- private final String ofsImplValue =
+ private static final String OFS_IMPL_VALUE =
"org.apache.hadoop.fs.ozone.RootedOzoneFileSystem";
@BeforeAll
- public static void initClass() throws Exception {
+ static void initClass(@TempDir File tempDir) throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
- omId = UUID.randomUUID().toString();
omServiceId = "om-service-test1";
numOfOMs = 3;
- clusterId = UUID.randomUUID().toString();
- scmId = UUID.randomUUID().toString();
- final String path = GenericTestUtils.getTempPath(omId);
- java.nio.file.Path metaDirPath = java.nio.file.Paths.get(path, "om-meta");
- conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString());
+ conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempDir.getAbsolutePath());
conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 3);
conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT,
BucketLayout.LEGACY.name());
conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true);
- OMStorage omStore = new OMStorage(conf);
- omStore.setClusterId(clusterId);
- // writes the version file properties
- omStore.initialize();
-
// Start the cluster
cluster = MiniOzoneCluster.newOMHABuilder(conf)
.setNumDatanodes(5)
.setTotalPipelineNumLimit(3)
- .setClusterId(clusterId)
- .setScmId(scmId)
.setOMServiceId(omServiceId)
.setNumOfOzoneManagers(numOfOMs)
.build();
@@ -220,7 +204,7 @@ private int getPortFromAddress(String addr) {
public void testWithQualifiedDefaultFS() throws Exception {
OzoneConfiguration clientConf = new OzoneConfiguration(conf);
clientConf.setQuietMode(false);
- clientConf.set(o3fsImplKey, o3fsImplValue);
+ clientConf.set(O3FS_IMPL_KEY, O3FS_IMPL_VALUE);
// fs.defaultFS = o3fs://bucketName.volumeName.omServiceId/
clientConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
@@ -315,7 +299,7 @@ public void testWithQualifiedDefaultFS() throws Exception {
private void testWithDefaultFS(String defaultFS) throws Exception {
OzoneConfiguration clientConf = new OzoneConfiguration(conf);
clientConf.setQuietMode(false);
- clientConf.set(o3fsImplKey, o3fsImplValue);
+ clientConf.set(O3FS_IMPL_KEY, O3FS_IMPL_VALUE);
// fs.defaultFS = file:///
clientConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
defaultFS);
@@ -360,8 +344,8 @@ public void testOtherDefaultFS() throws Exception {
public void testIncorrectAuthorityInURI() throws Exception {
OzoneConfiguration clientConf = new OzoneConfiguration(conf);
clientConf.setQuietMode(false);
- clientConf.set(o3fsImplKey, o3fsImplValue);
- clientConf.set(ofsImplKey, ofsImplValue);
+ clientConf.set(O3FS_IMPL_KEY, O3FS_IMPL_VALUE);
+ clientConf.set(OFS_IMPL_KEY, OFS_IMPL_VALUE);
FsShell shell = new FsShell(clientConf);
String incorrectSvcId = "dummy";
String o3fsPathWithCorrectSvcId =
@@ -385,8 +369,7 @@ public void testIncorrectAuthorityInURI() throws Exception {
res = ToolRunner.run(shell,
new String[] {"-ls", ofsPathWithIncorrectSvcId });
assertEquals(1, res);
- assertTrue(
- capture.getOutput().contains("Cannot resolve OM host"));
+ assertThat(capture.getOutput()).contains("Cannot resolve OM host");
}
try (GenericTestUtils.SystemErrCapturer capture = new
@@ -394,8 +377,7 @@ public void testIncorrectAuthorityInURI() throws Exception {
res = ToolRunner.run(shell,
new String[] {"-ls", o3fsPathWithInCorrectSvcId });
assertEquals(1, res);
- assertTrue(
- capture.getOutput().contains("Cannot resolve OM host"));
+ assertThat(capture.getOutput()).contains("Cannot resolve OM host");
}
} finally {
shell.close();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java
index d74c77d3435..ae6a24a910c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java
@@ -24,11 +24,8 @@
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
-import java.util.UUID;
import java.util.stream.Stream;
-import com.google.common.base.Strings;
-
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -39,7 +36,6 @@
import org.apache.hadoop.util.ToolRunner;
import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -56,6 +52,8 @@
import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR;
import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath;
import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
/**
* Test client-side CRUD snapshot operations with Ozone Manager.
@@ -89,8 +87,6 @@ static void initClass() throws Exception {
// Start the cluster
cluster = MiniOzoneCluster.newOMHABuilder(conf)
- .setClusterId(UUID.randomUUID().toString())
- .setScmId(UUID.randomUUID().toString())
.setOMServiceId(OM_SERVICE_ID)
.setNumOfOzoneManagers(1)
.build();
@@ -120,13 +116,13 @@ private static void createVolBuckKey()
// Create volume and bucket
int res = ToolRunner.run(shell,
new String[]{"-mkdir", "-p", BUCKET_PATH});
- Assertions.assertEquals(0, res);
+ assertEquals(0, res);
// Create key
res = ToolRunner.run(shell, new String[]{"-touch", KEY_PATH});
- Assertions.assertEquals(0, res);
+ assertEquals(0, res);
// List the bucket to make sure that bucket exists.
res = ToolRunner.run(shell, new String[]{"-ls", BUCKET_PATH});
- Assertions.assertEquals(0, res);
+ assertEquals(0, res);
}
@@ -137,12 +133,12 @@ void testCreateSnapshotDuplicateName() throws Exception {
int res = ToolRunner.run(shell,
new String[]{"-createSnapshot", BUCKET_PATH, snapshotName});
// Asserts that create request succeeded
- Assertions.assertEquals(0, res);
+ assertEquals(0, res);
res = ToolRunner.run(shell,
new String[]{"-createSnapshot", BUCKET_PATH, snapshotName});
// Asserts that create request fails since snapshot name provided twice
- Assertions.assertEquals(1, res);
+ assertEquals(1, res);
}
@Test
@@ -162,19 +158,19 @@ void testCreateSnapshotWithSubDirInput() throws Exception {
int res = ToolRunner.run(shell, new String[] {
"-mkdir", "-p", dirPath});
- Assertions.assertEquals(0, res);
+ assertEquals(0, res);
try (GenericTestUtils.SystemOutCapturer capture =
new GenericTestUtils.SystemOutCapturer()) {
res = ToolRunner.run(shell, new String[] {
"-createSnapshot", dirPath, snapshotName});
// Asserts that create request succeeded
- Assertions.assertEquals(0, res);
+ assertEquals(0, res);
String expectedSnapshotPath = Paths.get(
BUCKET_PATH, OM_SNAPSHOT_INDICATOR, snapshotName).toString();
String out = capture.getOutput().trim();
- Assertions.assertTrue(out.endsWith(expectedSnapshotPath));
+ assertThat(out).endsWith(expectedSnapshotPath);
}
}
@@ -192,7 +188,7 @@ void testCreateSnapshotSuccess(String snapshotName)
int res = ToolRunner.run(shell,
new String[]{"-createSnapshot", BUCKET_PATH, snapshotName});
// Asserts that create request succeeded
- Assertions.assertEquals(0, res);
+ assertEquals(0, res);
SnapshotInfo snapshotInfo = ozoneManager
.getMetadataManager()
@@ -202,7 +198,7 @@ void testCreateSnapshotSuccess(String snapshotName)
// Assert that snapshot exists in RocksDB.
// We can't use list or valid if snapshot directory exists because DB
// transaction might not be flushed by the time.
- Assertions.assertNotNull(snapshotInfo);
+ assertNotNull(snapshotInfo);
}
private static Stream createSnapshotFailureScenarios() {
@@ -252,8 +248,7 @@ void testCreateSnapshotFailure(String description,
String errorMessage = execShellCommandAndGetOutput(expectedResponse,
new String[]{"-createSnapshot", paramBucketPath, snapshotName});
- Assertions.assertTrue(errorMessage
- .contains(expectedMessage));
+ assertThat(errorMessage).contains(expectedMessage);
}
/**
@@ -291,7 +286,7 @@ void testFsLsSnapshot(@TempDir Path tempDir) throws Exception {
int res = ToolRunner.run(shell,
new String[]{"-deleteSnapshot", BUCKET_PATH, snapshotName1});
// Asserts that delete request succeeded
- Assertions.assertEquals(0, res);
+ assertEquals(0, res);
// Wait for the snapshot to be marked deleted.
GenericTestUtils.waitFor(() -> {
@@ -339,24 +334,22 @@ void testDeleteBucketWithSnapshot() throws Exception {
String deleteKeyOut = execShellCommandAndGetOutput(0,
new String[]{"-rm", "-r", "-skipTrash", KEY_PATH});
- Assertions.assertTrue(deleteKeyOut
- .contains("Deleted " + BUCKET_PATH));
+ assertThat(deleteKeyOut).contains("Deleted " + BUCKET_PATH);
// Delete bucket should fail due to existing snapshot
String deleteBucketOut = execShellCommandAndGetOutput(1,
new String[]{"-rm", "-r", "-skipTrash", BUCKET_PATH});
- Assertions.assertTrue(deleteBucketOut
- .contains(BUCKET + " can't be deleted when it has snapshots"));
+ assertThat(deleteBucketOut).contains(BUCKET + " can't be deleted when it has snapshots");
// Key shouldn't exist under bucket
String listKeyOut = execShellCommandAndGetOutput(0,
new String[]{"-ls", BUCKET_PATH});
- Assertions.assertTrue(Strings.isNullOrEmpty(listKeyOut));
+ assertThat(listKeyOut).isNullOrEmpty();
// Key should still exist under snapshot
String listSnapKeyOut = execShellCommandAndGetOutput(0,
new String[]{"-ls", snapshotPath});
- Assertions.assertTrue(listSnapKeyOut.contains(snapshotKeyPath));
+ assertThat(listSnapKeyOut).contains(snapshotKeyPath);
}
@Test
@@ -366,7 +359,7 @@ void testSnapshotDeleteSuccess() throws Exception {
int res = ToolRunner.run(shell,
new String[]{"-deleteSnapshot", BUCKET_PATH, snapshotName});
// Asserts that delete request succeeded
- Assertions.assertEquals(0, res);
+ assertEquals(0, res);
// Wait for the snapshot to be marked deleted.
GenericTestUtils.waitFor(() -> {
@@ -417,8 +410,7 @@ void testSnapshotDeleteFailure(String description,
String errorMessage = execShellCommandAndGetOutput(expectedResponse,
new String[]{"-deleteSnapshot", paramBucketPath, snapshotName});
- Assertions.assertTrue(errorMessage
- .contains(expectedMessage), errorMessage);
+ assertThat(errorMessage).contains(expectedMessage);
}
/**
@@ -438,7 +430,7 @@ private String execShellCommandAndGetOutput(
// Execute command
int res = ToolRunner.run(shell, args);
- Assertions.assertEquals(response, res);
+ assertEquals(response, res);
// Store command output to a string,
// if command should succeed then
@@ -467,7 +459,7 @@ private String createSnapshot() throws Exception {
int res = ToolRunner.run(shell,
new String[]{"-createSnapshot", BUCKET_PATH, snapshotName});
// Asserts that create request succeeded
- Assertions.assertEquals(0, res);
+ assertEquals(0, res);
OzoneConfiguration conf = ozoneManager.getConfiguration();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java
index cd12e0d52a2..074a8e7df4b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java
@@ -57,6 +57,7 @@
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -215,7 +216,7 @@ public void testDeleteVolumeAndBucket() throws Exception {
private void checkPath(Path path) {
FileNotFoundException ex = assertThrows(FileNotFoundException.class, () ->
fs.getFileStatus(path), "testRecursiveDelete failed");
- assertTrue(ex.getMessage().contains("File not found"));
+ assertThat(ex.getMessage()).contains("File not found");
}
private void assertTableRowCount(Table table, int count)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContract.java
new file mode 100644
index 00000000000..73c9fa7dc2a
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContract.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.ozone.contract;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+
+/**
+ * Base class for Ozone filesystem contracts. It needs a {@link MiniOzoneCluster},
+ * and provides the {@link FileSystem} that's subject of the test.
+ */
+abstract class AbstractOzoneContract extends AbstractFSContract {
+
+ private final MiniOzoneCluster cluster;
+
+ /**
+ * @return root URI for the FileSystem
+ */
+ protected abstract String getRootURI() throws IOException;
+
+ protected MiniOzoneCluster getCluster() {
+ return cluster;
+ }
+
+ AbstractOzoneContract(MiniOzoneCluster cluster) {
+ super(cluster.getConf());
+ this.cluster = cluster;
+ }
+
+ @Override
+ public FileSystem getTestFileSystem() throws IOException {
+ assertNotNull(cluster, "cluster not created");
+ getConf().set("fs.defaultFS", getRootURI());
+ return FileSystem.get(getConf());
+ }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java
new file mode 100644
index 00000000000..ab1736c3b0b
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java
@@ -0,0 +1,315 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.ozone.contract;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
+import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
+import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
+import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
+import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
+import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
+import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
+import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
+import org.apache.hadoop.fs.contract.AbstractContractUnbufferTest;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig;
+import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.tools.contract.AbstractContractDistCpTest;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Nested;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
+
+import java.io.IOException;
+import java.time.Duration;
+
+import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME;
+import static org.assertj.core.api.Assumptions.assumeThat;
+
+/**
+ * Base class for Ozone contract tests. Manages lifecycle of {@link MiniOzoneCluster}.
+ *
+ * All specific contract tests are implemented as {@link Nested} inner classes. This allows
+ * running all tests in the same cluster.
+ *
+ * Subclasses only need to implement {@link #createOzoneContract(Configuration)},
+ * but can tweak configuration by also overriding {@link #createOzoneConfig()}.
+ */
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+abstract class AbstractOzoneContractTest {
+
+ private static final String CONTRACT_XML = "contract/ozone.xml";
+
+ private MiniOzoneCluster cluster;
+
+ /**
+ * This must be implemented by all subclasses.
+ * @return the FS contract
+ */
+ abstract AbstractFSContract createOzoneContract(Configuration conf);
+
+ /**
+ * Creates the base configuration for contract tests. This can be tweaked
+ * in subclasses by overriding {@link #createOzoneConfig()}.
+ */
+ protected static OzoneConfiguration createBaseConfiguration() {
+ OzoneConfiguration conf = new OzoneConfiguration();
+ DatanodeRatisServerConfig ratisServerConfig =
+ conf.getObject(DatanodeRatisServerConfig.class);
+ ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
+ ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10));
+ conf.setFromObject(ratisServerConfig);
+
+ RatisClientConfig.RaftConfig raftClientConfig =
+ conf.getObject(RatisClientConfig.RaftConfig.class);
+ raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3));
+ raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10));
+ conf.setFromObject(raftClientConfig);
+
+ conf.addResource(CONTRACT_XML);
+
+ conf.setBoolean(OZONE_FS_HSYNC_ENABLED, true);
+
+ return conf;
+ }
+
+ /**
+ * Hook method that allows tweaking the configuration.
+ */
+ OzoneConfiguration createOzoneConfig() {
+ return createBaseConfiguration();
+ }
+
+ MiniOzoneCluster getCluster() {
+ return cluster;
+ }
+
+ @BeforeAll
+ void setup() throws Exception {
+ cluster = MiniOzoneCluster.newBuilder(createOzoneConfig())
+ .setNumDatanodes(5)
+ .build();
+ cluster.waitForClusterToBeReady();
+ }
+
+ @AfterAll
+ void teardown() {
+ IOUtils.closeQuietly(cluster);
+ }
+
+ @Nested
+ class TestContractCreate extends AbstractContractCreateTest {
+ @Override
+ protected Configuration createConfiguration() {
+ return createOzoneConfig();
+ }
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return createOzoneContract(conf);
+ }
+ }
+
+ @Nested
+ class TestContractDistCp extends AbstractContractDistCpTest {
+ @Override
+ protected Configuration createConfiguration() {
+ return createOzoneConfig();
+ }
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return createOzoneContract(conf);
+ }
+
+ @Override
+ protected void deleteTestDirInTeardown() throws IOException {
+ super.deleteTestDirInTeardown();
+ cleanup("TEARDOWN", getLocalFS(), getLocalDir());
+ }
+ }
+
+ @Nested
+ class TestContractDelete extends AbstractContractDeleteTest {
+ @Override
+ protected Configuration createConfiguration() {
+ return createOzoneConfig();
+ }
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return createOzoneContract(conf);
+ }
+ }
+
+ @Nested
+ class TestContractGetFileStatus extends AbstractContractGetFileStatusTest {
+ @Override
+ protected Configuration createConfiguration() {
+ return createOzoneConfig();
+ }
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return createOzoneContract(conf);
+ }
+ }
+
+ @Nested
+ class TestContractMkdir extends AbstractContractMkdirTest {
+ @Override
+ protected Configuration createConfiguration() {
+ return createOzoneConfig();
+ }
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return createOzoneContract(conf);
+ }
+ }
+
+ @Nested
+ class TestContractOpen extends AbstractContractOpenTest {
+ @Override
+ protected Configuration createConfiguration() {
+ return createOzoneConfig();
+ }
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return createOzoneContract(conf);
+ }
+ }
+
+ @Nested
+ class TestContractRename extends AbstractContractRenameTest {
+ @Override
+ protected Configuration createConfiguration() {
+ return createOzoneConfig();
+ }
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return createOzoneContract(conf);
+ }
+ }
+
+ @Nested
+ class TestContractRootDirectory extends AbstractContractRootDirectoryTest {
+ @Override
+ protected Configuration createConfiguration() {
+ return createOzoneConfig();
+ }
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return createOzoneContract(conf);
+ }
+
+ @Override
+ @Test
+ public void testRmRootRecursive() throws Throwable {
+ // OFS doesn't support creating files directly under root
+ assumeThat(getContract().getScheme())
+ .isNotEqualTo(OZONE_OFS_URI_SCHEME);
+ super.testRmRootRecursive();
+ }
+
+ @Override
+ @Test
+ public void testRmNonEmptyRootDirNonRecursive() throws Throwable {
+ // OFS doesn't support creating files directly under root
+ assumeThat(getContract().getScheme())
+ .isNotEqualTo(OZONE_OFS_URI_SCHEME);
+ super.testRmNonEmptyRootDirNonRecursive();
+ }
+
+ @Override
+ @Test
+ public void testRmEmptyRootDirNonRecursive() throws Throwable {
+ // Internally test deletes volume recursively
+ // Which is not supported
+ assumeThat(getContract().getScheme())
+ .isNotEqualTo(OZONE_OFS_URI_SCHEME);
+ super.testRmEmptyRootDirNonRecursive();
+ }
+
+ @Override
+ @Test
+ public void testListEmptyRootDirectory() throws IOException {
+ // Internally test deletes volume recursively
+ // Which is not supported
+ assumeThat(getContract().getScheme())
+ .isNotEqualTo(OZONE_OFS_URI_SCHEME);
+ super.testListEmptyRootDirectory();
+ }
+
+ @Override
+ @Test
+ public void testSimpleRootListing() throws IOException {
+ // Recursive list is not supported
+ assumeThat(getContract().getScheme())
+ .isNotEqualTo(OZONE_OFS_URI_SCHEME);
+ super.testSimpleRootListing();
+ }
+
+ @Override
+ @Test
+ public void testMkDirDepth1() throws Throwable {
+ // Internally test deletes volume recursively
+ // Which is not supported
+ assumeThat(getContract().getScheme())
+ .isNotEqualTo(OZONE_OFS_URI_SCHEME);
+ super.testMkDirDepth1();
+ }
+ }
+
+ @Nested
+ class TestContractSeek extends AbstractContractSeekTest {
+ @Override
+ protected Configuration createConfiguration() {
+ return createOzoneConfig();
+ }
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return createOzoneContract(conf);
+ }
+ }
+
+ @Nested
+ class TestContractUnbuffer extends AbstractContractUnbufferTest {
+ @Override
+ protected Configuration createConfiguration() {
+ return createOzoneConfig();
+ }
+
+ @Override
+ protected AbstractFSContract createContract(Configuration conf) {
+ return createOzoneContract(conf);
+ }
+ }
+
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java
deleted file mode 100644
index fd4e4d416f0..00000000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import java.io.IOException;
-import java.util.Collection;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-import org.junit.AfterClass;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-/**
- * Ozone contract tests creating files.
- */
-@RunWith(Parameterized.class)
-public class ITestOzoneContractCreate extends AbstractContractCreateTest {
-
- public ITestOzoneContractCreate(boolean fso) {
- // Actual init done in initParam().
- }
-
- @Parameterized.BeforeParam
- public static void initParam(boolean fso) throws IOException {
- OzoneContract.createCluster(fso);
- }
-
- @Parameterized.AfterParam
- public static void teardownParam() throws IOException {
- OzoneContract.destroyCluster();
- }
-
- @AfterClass
- public static void teardownCluster() throws IOException {
- OzoneContract.destroyCluster();
- }
-
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new OzoneContract(conf);
- }
-
- @Parameterized.Parameters
- public static Collection data() {
- return OzoneContract.getFsoCombinations();
- }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java
deleted file mode 100644
index 8ca70f0a7f9..00000000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import java.io.IOException;
-import java.util.Collection;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-import org.junit.AfterClass;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-/**
- * Ozone contract tests covering deletes.
- */
-@RunWith(Parameterized.class)
-public class ITestOzoneContractDelete extends AbstractContractDeleteTest {
-
- public ITestOzoneContractDelete(boolean fso) {
- // Actual init done in initParam().
- }
-
- @Parameterized.BeforeParam
- public static void initParam(boolean fso) throws IOException {
- OzoneContract.createCluster(fso);
- }
-
- @Parameterized.AfterParam
- public static void teardownParam() throws IOException {
- OzoneContract.destroyCluster();
- }
-
- @AfterClass
- public static void teardownCluster() throws IOException {
- OzoneContract.destroyCluster();
- }
-
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new OzoneContract(conf);
- }
-
- @Parameterized.Parameters
- public static Collection data() {
- return OzoneContract.getFsoCombinations();
- }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java
deleted file mode 100644
index cba18fe25a6..00000000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.tools.contract.AbstractContractDistCpTest;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup;
-
-
-/**
- * Contract test suite covering S3A integration with DistCp.
- * Uses the block output stream, buffered to disk. This is the
- * recommended output mechanism for DistCP due to its scalability.
- */
-public class ITestOzoneContractDistCp extends AbstractContractDistCpTest {
-
- @BeforeClass
- public static void createCluster() throws IOException {
- OzoneContract.createCluster();
- }
-
- @AfterClass
- public static void teardownCluster() throws IOException {
- OzoneContract.destroyCluster();
- }
-
- @Override
- protected OzoneContract createContract(Configuration conf) {
- return new OzoneContract(conf);
- }
-
- @Override
- protected void deleteTestDirInTeardown() throws IOException {
- super.deleteTestDirInTeardown();
- cleanup("TEARDOWN", getLocalFS(), getLocalDir());
- }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCpWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCpWithFSO.java
deleted file mode 100644
index 333ef18f5f0..00000000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCpWithFSO.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.tools.contract.AbstractContractDistCpTest;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup;
-
-
-/**
- * Contract test suite covering S3A integration with DistCp.
- * Uses the block output stream, buffered to disk. This is the
- * recommended output mechanism for DistCP due to its scalability.
- * This test suite runs the server in File System Optimized mode.
- *
- * Note: It isn't possible to convert this into a parameterized test due to
- * unrelated failures occurring while trying to handle directories with names
- * containing '[' and ']' characters.
- */
-public class ITestOzoneContractDistCpWithFSO
- extends AbstractContractDistCpTest {
-
- @BeforeClass
- public static void createCluster() throws IOException {
- OzoneContract.createCluster(true);
- }
-
- @AfterClass
- public static void teardownCluster() throws IOException {
- OzoneContract.destroyCluster();
- }
-
- @Override
- protected OzoneContract createContract(Configuration conf) {
- return new OzoneContract(conf);
- }
-
- @Override
- protected void deleteTestDirInTeardown() throws IOException {
- super.deleteTestDirInTeardown();
- cleanup("TEARDOWN", getLocalFS(), getLocalDir());
- }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java
deleted file mode 100644
index a8013387cd4..00000000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import java.io.IOException;
-import java.util.Collection;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-import org.junit.AfterClass;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Ozone contract tests covering getFileStatus.
- */
-@RunWith(Parameterized.class)
-public class ITestOzoneContractGetFileStatus
- extends AbstractContractGetFileStatusTest {
-
- public ITestOzoneContractGetFileStatus(boolean fso) {
- // Actual init done in initParam().
- }
-
- @Parameterized.BeforeParam
- public static void initParam(boolean fso) throws IOException {
- OzoneContract.createCluster(fso);
- }
-
- @Parameterized.AfterParam
- public static void teardownParam() throws IOException {
- OzoneContract.destroyCluster();
- }
-
- private static final Logger LOG =
- LoggerFactory.getLogger(ITestOzoneContractGetFileStatus.class);
-
- @AfterClass
- public static void teardownCluster() throws IOException {
- OzoneContract.destroyCluster();
- }
-
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new OzoneContract(conf);
- }
-
- @Override
- public void teardown() throws Exception {
- LOG.info("FS details {}", getFileSystem());
- super.teardown();
- }
-
- @Override
- protected Configuration createConfiguration() {
- return super.createConfiguration();
- }
-
- @Parameterized.Parameters
- public static Collection data() {
- return OzoneContract.getFsoCombinations();
- }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java
deleted file mode 100644
index 49118a0595a..00000000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import java.io.IOException;
-import java.util.Collection;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-import org.junit.AfterClass;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-/**
- * Test dir operations on Ozone.
- */
-@RunWith(Parameterized.class)
-public class ITestOzoneContractMkdir extends AbstractContractMkdirTest {
-
- public ITestOzoneContractMkdir(boolean fso) {
- // Actual init done in initParam().
- }
-
- @Parameterized.BeforeParam
- public static void initParam(boolean fso) throws IOException {
- OzoneContract.createCluster(fso);
- }
-
- @Parameterized.AfterParam
- public static void teardownParam() throws IOException {
- OzoneContract.destroyCluster();
- }
-
- @AfterClass
- public static void teardownCluster() throws IOException {
- OzoneContract.destroyCluster();
- }
-
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new OzoneContract(conf);
- }
-
- @Parameterized.Parameters
- public static Collection data() {
- return OzoneContract.getFsoCombinations();
- }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java
deleted file mode 100644
index 05babc015af..00000000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import java.io.IOException;
-import java.util.Collection;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-import org.junit.AfterClass;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-/**
- * Ozone contract tests opening files.
- */
-@RunWith(Parameterized.class)
-public class ITestOzoneContractOpen extends AbstractContractOpenTest {
-
- public ITestOzoneContractOpen(boolean fso) {
- // Actual init done in initParam().
- }
-
- @Parameterized.BeforeParam
- public static void initParam(boolean fso) throws IOException {
- OzoneContract.createCluster(fso);
- }
-
- @Parameterized.AfterParam
- public static void teardownParam() throws IOException {
- OzoneContract.destroyCluster();
- }
-
- @AfterClass
- public static void teardownCluster() throws IOException {
- OzoneContract.destroyCluster();
- }
-
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new OzoneContract(conf);
- }
-
- @Parameterized.Parameters
- public static Collection data() {
- return OzoneContract.getFsoCombinations();
- }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java
deleted file mode 100644
index fe5c112a109..00000000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import java.io.IOException;
-import java.util.Collection;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-import org.junit.AfterClass;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-/**
- * Ozone contract tests covering rename.
- */
-@RunWith(Parameterized.class)
-public class ITestOzoneContractRename extends AbstractContractRenameTest {
-
- public ITestOzoneContractRename(boolean fso) {
- // Actual init done in initParam().
- }
-
- @Parameterized.BeforeParam
- public static void initParam(boolean fso) throws IOException {
- OzoneContract.createCluster(fso);
- }
-
- @Parameterized.AfterParam
- public static void teardownParam() throws IOException {
- OzoneContract.destroyCluster();
- }
-
- @AfterClass
- public static void teardownCluster() throws IOException {
- OzoneContract.destroyCluster();
- }
-
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new OzoneContract(conf);
- }
-
-
- @Parameterized.Parameters
- public static Collection data() {
- return OzoneContract.getFsoCombinations();
- }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
deleted file mode 100644
index f4ec389229e..00000000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract;
-
-import java.io.IOException;
-import java.util.Collection;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-import org.junit.AfterClass;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-/**
- * Ozone contract test for ROOT directory operations.
- */
-@RunWith(Parameterized.class)
-public class ITestOzoneContractRootDir extends
- AbstractContractRootDirectoryTest {
-
- public ITestOzoneContractRootDir(boolean fso) {
- // Actual init done in initParam().
- }
-
- @Parameterized.BeforeParam
- public static void initParam(boolean fso) throws IOException {
- OzoneContract.createCluster(fso);
- }
-
- @Parameterized.AfterParam
- public static void teardownParam() throws IOException {
- OzoneContract.destroyCluster();
- }
-
- @AfterClass
- public static void teardownCluster() throws IOException {
- OzoneContract.destroyCluster();
- }
-
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new OzoneContract(conf);
- }
-
- @Parameterized.Parameters
- public static Collection data() {
- return OzoneContract.getFsoCombinations();
- }
-
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
index f13ca6cda34..e889c4f216d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
@@ -19,68 +19,24 @@
package org.apache.hadoop.fs.ozone.contract;
import java.io.IOException;
-import java.time.Duration;
-import java.util.Arrays;
-import java.util.List;
-import org.apache.hadoop.hdds.utils.IOUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT;
/**
- * The contract of Ozone: only enabled if the test bucket is provided.
+ * Filesystem contract for O3FS.
*/
-class OzoneContract extends AbstractFSContract {
-
- private static final List FSO_COMBINATIONS = Arrays.asList(
- // FSO configuration is a cluster level server side configuration.
- // If the cluster is configured with SIMPLE metadata layout,
- // non-FSO bucket will created.
- // If the cluster is configured with PREFIX metadata layout,
- // FSO bucket will be created.
- // Presently, OzoneClient checks bucketMetadata then invokes FSO or
- // non-FSO specific code and it makes no sense to add client side
- // configs now. Once the specific client API to set FSO or non-FSO
- // bucket is provided the contract test can be refactored to include
- // another parameter (fsoClient) which sets/unsets the client side
- // configs.
- true, // Server is configured with new layout (PREFIX)
- // and new buckets will be operated on
- false // Server is configured with old layout (SIMPLE)
- // and old buckets will be operated on
- );
- private static MiniOzoneCluster cluster;
- private static final String CONTRACT_XML = "contract/ozone.xml";
-
- private static boolean fsOptimizedServer;
- private static OzoneClient client;
+final class OzoneContract extends AbstractOzoneContract {
- OzoneContract(Configuration conf) {
- super(conf);
- //insert the base features
- addConfResource(CONTRACT_XML);
- }
-
- static List getFsoCombinations() {
- return FSO_COMBINATIONS;
+ OzoneContract(MiniOzoneCluster cluster) {
+ super(cluster);
}
@Override
@@ -93,73 +49,12 @@ public Path getTestPath() {
return new Path("/test");
}
- public static void initOzoneConfiguration(boolean fsoServer) {
- fsOptimizedServer = fsoServer;
- }
-
- public static void createCluster(boolean fsoServer) throws IOException {
- // Set the flag to enable/disable FSO on server.
- initOzoneConfiguration(fsoServer);
- createCluster();
- }
-
- public static void createCluster() throws IOException {
- OzoneConfiguration conf = new OzoneConfiguration();
- DatanodeRatisServerConfig ratisServerConfig =
- conf.getObject(DatanodeRatisServerConfig.class);
- ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
- ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10));
- conf.setFromObject(ratisServerConfig);
-
- RatisClientConfig.RaftConfig raftClientConfig =
- conf.getObject(RatisClientConfig.RaftConfig.class);
- raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3));
- raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10));
- conf.setFromObject(raftClientConfig);
-
- conf.addResource(CONTRACT_XML);
-
- BucketLayout bucketLayout = fsOptimizedServer
- ? BucketLayout.FILE_SYSTEM_OPTIMIZED : BucketLayout.LEGACY;
- conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, bucketLayout.name());
- conf.setBoolean(OZONE_FS_HSYNC_ENABLED, true);
-
- cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build();
- try {
- cluster.waitForClusterToBeReady();
- cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.THREE,
- 180000);
- client = cluster.newClient();
- } catch (Exception e) {
- throw new IOException(e);
- }
- }
-
- private void copyClusterConfigs(String configKey) {
- getConf().set(configKey, cluster.getConf().get(configKey));
- }
-
@Override
- public FileSystem getTestFileSystem() throws IOException {
- //assumes cluster is not null
- assertNotNull(client);
-
- OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client);
-
- String uri = String.format("%s://%s.%s/",
- OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName());
- getConf().set("fs.defaultFS", uri);
- copyClusterConfigs(OMConfigKeys.OZONE_OM_ADDRESS_KEY);
- copyClusterConfigs(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
- copyClusterConfigs(OZONE_FS_HSYNC_ENABLED);
- return FileSystem.get(getConf());
- }
-
- public static void destroyCluster() throws IOException {
- IOUtils.closeQuietly(client);
- if (cluster != null) {
- cluster.shutdown();
- cluster = null;
+ protected String getRootURI() throws IOException {
+ try (OzoneClient client = getCluster().newClient()) {
+ BucketLayout layout = getConf().getEnum(OZONE_DEFAULT_BUCKET_LAYOUT, BucketLayout.DEFAULT);
+ OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, layout);
+ return String.format("%s://%s.%s/", getScheme(), bucket.getName(), bucket.getVolumeName());
}
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/RootedOzoneContract.java
similarity index 56%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/RootedOzoneContract.java
index 51a35ee7e3a..d617ca9de73 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/RootedOzoneContract.java
@@ -18,32 +18,31 @@
package org.apache.hadoop.fs.ozone.contract;
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
/**
- * Ozone contract tests covering file seek.
+ * Filesystem contract for OFS.
*/
-public class ITestOzoneContractSeek extends AbstractContractSeekTest {
+final class RootedOzoneContract extends AbstractOzoneContract {
+
+ RootedOzoneContract(MiniOzoneCluster cluster) {
+ super(cluster);
+ }
- @BeforeClass
- public static void createCluster() throws IOException {
- OzoneContract.createCluster();
+ @Override
+ public String getScheme() {
+ return OzoneConsts.OZONE_OFS_URI_SCHEME;
}
- @AfterClass
- public static void teardownCluster() throws IOException {
- OzoneContract.destroyCluster();
+ @Override
+ public Path getTestPath() {
+ return new Path("/testvol1/testbucket1/test");
}
@Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new OzoneContract(conf);
+ protected String getRootURI() {
+ return String.format("%s://localhost", getScheme());
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUnbuffer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractFSO.java
similarity index 60%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUnbuffer.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractFSO.java
index 1af6b87b8fd..b45e68d85eb 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUnbuffer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractFSO.java
@@ -18,30 +18,26 @@
package org.apache.hadoop.fs.ozone.contract;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractUnbufferTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import java.io.IOException;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT;
+import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED;
/**
- * Ozone contract tests for {@link org.apache.hadoop.fs.CanUnbuffer#unbuffer}.
+ * Tests O3FS with FSO bucket.
*/
-public class ITestOzoneContractUnbuffer extends AbstractContractUnbufferTest {
+class TestOzoneContractFSO extends AbstractOzoneContractTest {
- @BeforeClass
- public static void createCluster() throws IOException {
- OzoneContract.createCluster();
- }
-
- @AfterClass
- public static void teardownCluster() throws IOException {
- OzoneContract.destroyCluster();
+ @Override
+ OzoneConfiguration createOzoneConfig() {
+ OzoneConfiguration conf = createBaseConfiguration();
+ conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, FILE_SYSTEM_OPTIMIZED.name());
+ return conf;
}
@Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new OzoneContract(conf);
+ AbstractFSContract createOzoneContract(Configuration conf) {
+ return new OzoneContract(getCluster());
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractUnbuffer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractLegacy.java
similarity index 57%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractUnbuffer.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractLegacy.java
index e081e8d5b80..97ced88fcde 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractUnbuffer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractLegacy.java
@@ -15,34 +15,29 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.fs.ozone.contract.rooted;
+package org.apache.hadoop.fs.ozone.contract;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractUnbufferTest;
import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import java.io.IOException;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT;
+import static org.apache.hadoop.ozone.om.helpers.BucketLayout.LEGACY;
/**
- * Ozone contract tests for {@link org.apache.hadoop.fs.CanUnbuffer#unbuffer}.
+ * Tests O3FS with LEGACY bucket.
*/
-public class ITestRootedOzoneContractUnbuffer
- extends AbstractContractUnbufferTest {
+class TestOzoneContractLegacy extends AbstractOzoneContractTest {
- @BeforeClass
- public static void createCluster() throws IOException {
- RootedOzoneContract.createCluster();
- }
-
- @AfterClass
- public static void teardownCluster() {
- RootedOzoneContract.destroyCluster();
+ @Override
+ OzoneConfiguration createOzoneConfig() {
+ OzoneConfiguration conf = createBaseConfiguration();
+ conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, LEGACY.name());
+ return conf;
}
@Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new RootedOzoneContract(conf);
+ AbstractFSContract createOzoneContract(Configuration conf) {
+ return new OzoneContract(getCluster());
}
}
diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/JUnit5AwareTimeout.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestRootedOzoneContract.java
similarity index 70%
rename from hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/JUnit5AwareTimeout.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestRootedOzoneContract.java
index 22840bd7a30..ab738f2f664 100644
--- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/JUnit5AwareTimeout.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestRootedOzoneContract.java
@@ -15,18 +15,17 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.ozone.test;
+package org.apache.hadoop.fs.ozone.contract;
-import org.junit.rules.Timeout;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
/**
- * Disables the given JUnit4 timeout rule if JUnit5-specific timeout-mode is set
- * to "disabled".
+ * Tests OFS.
*/
-public class JUnit5AwareTimeout extends DisableOnProperty {
-
- public JUnit5AwareTimeout(Timeout delegate) {
- super(delegate, "junit.jupiter.execution.timeout.mode", "disabled");
+class TestRootedOzoneContract extends AbstractOzoneContractTest {
+ @Override
+ AbstractFSContract createOzoneContract(Configuration conf) {
+ return new RootedOzoneContract(getCluster());
}
-
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractCreate.java
deleted file mode 100644
index dd1312f3eb0..00000000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractCreate.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract.rooted;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-/**
- * Ozone contract tests creating files.
- */
-public class ITestRootedOzoneContractCreate extends AbstractContractCreateTest {
-
- @BeforeClass
- public static void createCluster() throws IOException {
- RootedOzoneContract.createCluster();
- }
-
- @AfterClass
- public static void teardownCluster() {
- RootedOzoneContract.destroyCluster();
- }
-
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new RootedOzoneContract(conf);
- }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractDelete.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractDelete.java
deleted file mode 100644
index 12971a3e2d6..00000000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractDelete.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract.rooted;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-/**
- * Ozone contract tests covering deletes.
- */
-public class ITestRootedOzoneContractDelete extends AbstractContractDeleteTest {
-
- @BeforeClass
- public static void createCluster() throws IOException {
- RootedOzoneContract.createCluster();
- }
-
- @AfterClass
- public static void teardownCluster() {
- RootedOzoneContract.destroyCluster();
- }
-
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new RootedOzoneContract(conf);
- }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractDistCp.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractDistCp.java
deleted file mode 100644
index 026f63c7795..00000000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractDistCp.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract.rooted;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.tools.contract.AbstractContractDistCpTest;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup;
-
-
-/**
- * Contract test suite covering S3A integration with DistCp.
- * Uses the block output stream, buffered to disk. This is the
- * recommended output mechanism for DistCP due to its scalability.
- */
-public class ITestRootedOzoneContractDistCp extends AbstractContractDistCpTest {
-
- @BeforeClass
- public static void createCluster() throws IOException {
- RootedOzoneContract.createCluster();
- }
-
- @AfterClass
- public static void teardownCluster() {
- RootedOzoneContract.destroyCluster();
- }
-
- @Override
- protected RootedOzoneContract createContract(Configuration conf) {
- return new RootedOzoneContract(conf);
- }
-
- @Override
- protected void deleteTestDirInTeardown() throws IOException {
- super.deleteTestDirInTeardown();
- cleanup("TEARDOWN", getLocalFS(), getLocalDir());
- }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractGetFileStatus.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractGetFileStatus.java
deleted file mode 100644
index c858b35ac74..00000000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractGetFileStatus.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract.rooted;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-/**
- * Ozone contract tests covering getFileStatus.
- */
-public class ITestRootedOzoneContractGetFileStatus
- extends AbstractContractGetFileStatusTest {
-
- @BeforeClass
- public static void createCluster() throws IOException {
- RootedOzoneContract.createCluster();
- }
-
- @AfterClass
- public static void teardownCluster() {
- RootedOzoneContract.destroyCluster();
- }
-
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new RootedOzoneContract(conf);
- }
-
- @Override
- protected Configuration createConfiguration() {
- return super.createConfiguration();
- }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractMkdir.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractMkdir.java
deleted file mode 100644
index 680754f8406..00000000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractMkdir.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract.rooted;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-/**
- * Test dir operations on Ozone.
- */
-public class ITestRootedOzoneContractMkdir extends AbstractContractMkdirTest {
-
- @BeforeClass
- public static void createCluster() throws IOException {
- RootedOzoneContract.createCluster();
- }
-
- @AfterClass
- public static void teardownCluster() {
- RootedOzoneContract.destroyCluster();
- }
-
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new RootedOzoneContract(conf);
- }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractOpen.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractOpen.java
deleted file mode 100644
index 6c98cc5284b..00000000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractOpen.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract.rooted;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-/**
- * Ozone contract tests opening files.
- */
-public class ITestRootedOzoneContractOpen extends AbstractContractOpenTest {
- @BeforeClass
- public static void createCluster() throws IOException {
- RootedOzoneContract.createCluster();
- }
-
- @AfterClass
- public static void teardownCluster() {
- RootedOzoneContract.destroyCluster();
- }
-
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new RootedOzoneContract(conf);
- }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractRename.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractRename.java
deleted file mode 100644
index 56134053ead..00000000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractRename.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract.rooted;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-/**
- * Ozone contract tests covering rename.
- */
-public class ITestRootedOzoneContractRename extends AbstractContractRenameTest {
-
- @BeforeClass
- public static void createCluster() throws IOException {
- RootedOzoneContract.createCluster();
- }
-
- @AfterClass
- public static void teardownCluster() {
- RootedOzoneContract.destroyCluster();
- }
-
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new RootedOzoneContract(conf);
- }
-
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractRootDir.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractRootDir.java
deleted file mode 100644
index f4e27df2cdb..00000000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractRootDir.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract.rooted;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-/**
- * Ozone contract test for ROOT directory operations.
- */
-public class ITestRootedOzoneContractRootDir extends
- AbstractContractRootDirectoryTest {
-
- @BeforeClass
- public static void createCluster() throws IOException {
- RootedOzoneContract.createCluster();
- }
-
- @AfterClass
- public static void teardownCluster() {
- RootedOzoneContract.destroyCluster();
- }
-
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new RootedOzoneContract(conf);
- }
-
- @Override
- public void testRmRootRecursive() {
- // OFS doesn't support creating files directly under root
- }
-
- @Override
- public void testRmNonEmptyRootDirNonRecursive() {
- // OFS doesn't support creating files directly under root
- }
-
- @Override
- public void testRmEmptyRootDirNonRecursive() {
- // Internally test deletes volume recursively
- // Which is not supported
- }
-
- @Override
- public void testListEmptyRootDirectory() {
- // Internally test deletes volume recursively
- // Which is not supported
- }
-
- @Override
- public void testSimpleRootListing() {
- // Recursive list is not supported
- }
-
- @Override
- public void testMkDirDepth1() {
- // Internally test deletes volume recursively
- // Which is not supported
- }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractSeek.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractSeek.java
deleted file mode 100644
index ec456fe1a72..00000000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractSeek.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract.rooted;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-/**
- * Ozone contract tests covering file seek.
- */
-public class ITestRootedOzoneContractSeek extends AbstractContractSeekTest {
- @BeforeClass
- public static void createCluster() throws IOException {
- RootedOzoneContract.createCluster();
- }
-
- @AfterClass
- public static void teardownCluster() {
- RootedOzoneContract.destroyCluster();
- }
-
- @Override
- protected AbstractFSContract createContract(Configuration conf) {
- return new RootedOzoneContract(conf);
- }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/RootedOzoneContract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/RootedOzoneContract.java
deleted file mode 100644
index 09a05bfb337..00000000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/RootedOzoneContract.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.ozone.contract.rooted;
-
-import java.io.IOException;
-import java.time.Duration;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-
-/**
- * The contract of Rooted Ozone FileSystem (OFS).
- */
-class RootedOzoneContract extends AbstractFSContract {
-
- private static MiniOzoneCluster cluster;
- private static final String CONTRACT_XML = "contract/ozone.xml";
-
- RootedOzoneContract(Configuration conf) {
- super(conf);
- // insert the base features
- addConfResource(CONTRACT_XML);
- }
-
- @Override
- public String getScheme() {
- return OzoneConsts.OZONE_OFS_URI_SCHEME;
- }
-
- @Override
- public Path getTestPath() {
- return new Path("/testvol1/testbucket1/test");
- }
-
- public static void createCluster() throws IOException {
- OzoneConfiguration conf = new OzoneConfiguration();
- DatanodeRatisServerConfig ratisServerConfig =
- conf.getObject(DatanodeRatisServerConfig.class);
- ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
- ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10));
- conf.setFromObject(ratisServerConfig);
-
- RatisClientConfig.RaftConfig raftClientConfig =
- conf.getObject(RatisClientConfig.RaftConfig.class);
- raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3));
- raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10));
- conf.setFromObject(raftClientConfig);
- conf.setBoolean(OZONE_FS_HSYNC_ENABLED, true);
-
- conf.addResource(CONTRACT_XML);
-
- cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build();
- try {
- cluster.waitForClusterToBeReady();
- cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.THREE,
- 180000);
- } catch (Exception e) {
- throw new IOException(e);
- }
- }
-
- private void copyClusterConfigs(String configKey) {
- getConf().set(configKey, cluster.getConf().get(configKey));
- }
-
- @Override
- public FileSystem getTestFileSystem() throws IOException {
- //assumes cluster is not null
- assertNotNull(cluster);
-
- String uri = String.format("%s://localhost:%s/",
- OzoneConsts.OZONE_OFS_URI_SCHEME,
- cluster.getOzoneManager().getRpcPort());
- getConf().set("fs.defaultFS", uri);
- // fs.ofs.impl should be loaded from META-INF, no need to explicitly set it
- copyClusterConfigs(OMConfigKeys.OZONE_OM_ADDRESS_KEY);
- copyClusterConfigs(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
- copyClusterConfigs(OZONE_FS_HSYNC_ENABLED);
- return FileSystem.get(getConf());
- }
-
- public static void destroyCluster() {
- if (cluster != null) {
- cluster.shutdown();
- cluster = null;
- }
- }
-}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestAllocateContainer.java
similarity index 97%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestAllocateContainer.java
index ff7ebd3b735..2b64d397eae 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestAllocateContainer.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.ozone.scm;
+package org.apache.hadoop.hdds.scm;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -23,7 +23,6 @@
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCloseContainer.java
similarity index 99%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCloseContainer.java
index 11f4bf5c133..ac33bd2fdc2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestCloseContainer.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.ozone.scm;
+package org.apache.hadoop.hdds.scm;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.client.ReplicationFactor;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerReportWithKeys.java
similarity index 99%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerReportWithKeys.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerReportWithKeys.java
index 81597193a6e..43df6bf051d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerReportWithKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerReportWithKeys.java
@@ -14,7 +14,7 @@
* License for the specific language governing permissions and limitations under
* the License.
*/
-package org.apache.hadoop.ozone.scm;
+package org.apache.hadoop.hdds.scm;
import org.apache.commons.lang3.RandomStringUtils;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerSmallFile.java
similarity index 95%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerSmallFile.java
index 65a03baaef6..30c4e4cd5b4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerSmallFile.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.ozone.scm;
+package org.apache.hadoop.hdds.scm;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -24,13 +24,8 @@
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.PlacementPolicy;
import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.protocolPB
- .StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
+import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.ozone.OzoneConsts;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java
similarity index 93%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java
index c9324fab28c..688d13ad361 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java
@@ -14,7 +14,7 @@
* License for the specific language governing permissions and limitations under
* the License.
*/
-package org.apache.hadoop.ozone.scm;
+package org.apache.hadoop.hdds.scm;
import com.google.protobuf.ByteString;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -28,7 +28,6 @@
import org.apache.hadoop.hdds.scm.container.balancer.IllegalContainerBalancerStateException;
import org.apache.hadoop.hdds.scm.container.balancer.InvalidContainerBalancerConfigurationException;
import org.apache.hadoop.hdds.scm.container.common.helpers.MoveDataNodePair;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB;
@@ -49,16 +48,15 @@
import java.io.IOException;
import java.util.Map;
-import java.util.UUID;
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerBalancerConfigurationProto;
import static org.apache.hadoop.hdds.scm.HddsTestUtils.getContainer;
import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests failover with SCM HA setup.
@@ -66,8 +64,6 @@
public class TestFailoverWithSCMHA {
private MiniOzoneHAClusterImpl cluster = null;
private OzoneConfiguration conf;
- private String clusterId;
- private String scmId;
private String omServiceId;
private String scmServiceId;
private int numOfOMs = 1;
@@ -83,15 +79,13 @@ public class TestFailoverWithSCMHA {
@BeforeEach
public void init() throws Exception {
conf = new OzoneConfiguration();
- clusterId = UUID.randomUUID().toString();
- scmId = UUID.randomUUID().toString();
omServiceId = "om-service-test1";
scmServiceId = "scm-service-test1";
conf.setLong(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD,
SNAPSHOT_THRESHOLD);
cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf)
- .setClusterId(clusterId).setScmId(scmId).setOMServiceId(omServiceId)
+ .setOMServiceId(omServiceId)
.setSCMServiceId(scmServiceId).setNumOfOzoneManagers(numOfOMs)
.setNumOfStorageContainerManagers(numOfSCMs).setNumOfActiveSCMs(3)
.build();
@@ -133,8 +127,8 @@ public void testFailover() throws Exception {
.createProxy(scmBlockLocationClient, ScmBlockLocationProtocol.class,
conf);
scmBlockLocationProtocol.getScmInfo();
- assertTrue(logCapture.getOutput()
- .contains("Performing failover to suggested leader"));
+ assertThat(logCapture.getOutput())
+ .contains("Performing failover to suggested leader");
scm = getLeader(cluster);
SCMContainerLocationFailoverProxyProvider proxyProvider =
new SCMContainerLocationFailoverProxyProvider(conf, null);
@@ -150,8 +144,8 @@ public void testFailover() throws Exception {
scmContainerClient.allocateContainer(HddsProtos.ReplicationType.RATIS,
HddsProtos.ReplicationFactor.ONE, "ozone");
- assertTrue(logCapture.getOutput()
- .contains("Performing failover to suggested leader"));
+ assertThat(logCapture.getOutput())
+ .contains("Performing failover to suggested leader");
}
@Test
@@ -192,8 +186,8 @@ public void testMoveFailover() throws Exception {
.createProxy(scmBlockLocationClient, ScmBlockLocationProtocol.class,
conf);
scmBlockLocationProtocol.getScmInfo();
- assertTrue(logCapture.getOutput()
- .contains("Performing failover to suggested leader"));
+ assertThat(logCapture.getOutput())
+ .contains("Performing failover to suggested leader");
scm = getLeader(cluster);
assertNotNull(scm);
@@ -201,7 +195,7 @@ public void testMoveFailover() throws Exception {
//get the same inflightMove
Map inflightMove =
scm.getReplicationManager().getMoveScheduler().getInflightMove();
- assertTrue(inflightMove.containsKey(id));
+ assertThat(inflightMove).containsKey(id);
MoveDataNodePair mp = inflightMove.get(id);
assertEquals(dn2, mp.getTgt());
assertEquals(dn1, mp.getSrc());
@@ -225,8 +219,8 @@ public void testMoveFailover() throws Exception {
scmContainerClient.allocateContainer(HddsProtos.ReplicationType.RATIS,
HddsProtos.ReplicationFactor.ONE, "ozone");
- assertTrue(logCapture.getOutput()
- .contains("Performing failover to suggested leader"));
+ assertThat(logCapture.getOutput())
+ .contains("Performing failover to suggested leader");
//switch to the new leader successfully, new leader should
//get the same inflightMove , which should not contains
@@ -235,7 +229,7 @@ public void testMoveFailover() throws Exception {
assertNotNull(scm);
inflightMove = scm.getReplicationManager()
.getMoveScheduler().getInflightMove();
- assertFalse(inflightMove.containsKey(id));
+ assertThat(inflightMove).doesNotContainKey(id);
}
/**
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestGetCommittedBlockLengthAndPutKey.java
similarity index 91%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestGetCommittedBlockLengthAndPutKey.java
index 3f59f8c601f..43fc45efd09 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestGetCommittedBlockLengthAndPutKey.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.ozone.scm;
+package org.apache.hadoop.hdds.scm;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.lang3.RandomUtils;
@@ -23,19 +23,11 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers.
- ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.
- StorageContainerException;
-import org.apache.hadoop.hdds.scm.PlacementPolicy;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.
- SCMContainerPlacementCapacity;
-import org.apache.hadoop.hdds.scm.protocolPB.
- StorageContainerLocationProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
+import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -54,9 +46,9 @@
import java.util.concurrent.atomic.AtomicReference;
import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Test Container calls.
@@ -152,7 +144,7 @@ public void testGetCommittedBlockLengthForInvalidBlock() throws Exception {
Throwable t = assertThrows(StorageContainerException.class,
() -> ContainerProtocolCalls.getCommittedBlockLength(client, blockID,
null));
- assertTrue(t.getMessage().contains("Unable to find the block"));
+ assertThat(t.getMessage()).contains("Unable to find the block");
xceiverClientManager.releaseClient(client, false);
}
@@ -183,8 +175,8 @@ public void tesPutKeyResposne() throws Exception {
.getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
response = client.sendCommand(putKeyRequest).getPutBlock();
assertEquals(response.getCommittedBlockLength().getBlockLength(), data.length);
- assertTrue(response.getCommittedBlockLength().getBlockID()
- .getBlockCommitSequenceId() > 0);
+ assertThat(response.getCommittedBlockLength().getBlockID().getBlockCommitSequenceId())
+ .isGreaterThan(0);
BlockID responseBlockID = BlockID
.getFromProtobuf(response.getCommittedBlockLength().getBlockID());
blockID
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java
index e5053b3ec78..6f0bd40dde0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java
@@ -33,7 +33,7 @@
import org.apache.ozone.test.GenericTestUtils;
import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY;
-import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import org.apache.ratis.protocol.ClientId;
@@ -81,7 +81,7 @@ public void testLeaderIdUsedOnFirstCall() throws Exception {
List pipelines = cluster.getStorageContainerManager()
.getPipelineManager().getPipelines(RatisReplicationConfig.getInstance(
ReplicationFactor.THREE));
- assertFalse(pipelines.isEmpty());
+ assertThat(pipelines).isNotEmpty();
Optional optional = pipelines.stream()
.filter(Pipeline::isHealthy)
.findFirst();
@@ -109,10 +109,8 @@ public void testLeaderIdUsedOnFirstCall() throws Exception {
ContainerProtocolCalls.createContainer(xceiverClientRatis, 1L, null);
}
logCapturer.stopCapturing();
- assertFalse(
- logCapturer.getOutput().contains(
- "org.apache.ratis.protocol.NotLeaderException"),
- "Client should connect to pipeline leader on first try.");
+ assertThat(logCapturer.getOutput())
+ .doesNotContain("org.apache.ratis.protocol.NotLeaderException");
}
@Test @Timeout(unit = TimeUnit.MILLISECONDS, value = 120000)
@@ -120,7 +118,7 @@ public void testLeaderIdAfterLeaderChange() throws Exception {
List pipelines = cluster.getStorageContainerManager()
.getPipelineManager().getPipelines(RatisReplicationConfig.getInstance(
ReplicationFactor.THREE));
- assertFalse(pipelines.isEmpty());
+ assertThat(pipelines).isNotEmpty();
Optional optional = pipelines.stream()
.filter(Pipeline::isHealthy)
.findFirst();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java
similarity index 98%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java
index d980d761de6..90f8375f829 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.hadoop.ozone.scm;
+package org.apache.hadoop.hdds.scm;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
@@ -24,9 +24,7 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
- .SCMContainerPlacementMetrics;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementMetrics;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
import org.apache.hadoop.hdds.utils.IOUtils;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java
index 5d4f9f013d1..6b5b1aedcda 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java
@@ -34,7 +34,6 @@
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
-import java.util.UUID;
import java.util.stream.Stream;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -59,15 +58,15 @@
import org.junit.jupiter.params.provider.MethodSource;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_TO_EXCLUDE_SST;
-import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.params.provider.Arguments.arguments;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyBoolean;
-import static org.mockito.ArgumentMatchers.anyInt;
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyBoolean;
+import static org.mockito.Mockito.anyInt;
+import static org.mockito.Mockito.anyString;
import static org.mockito.Mockito.doCallRealMethod;
import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@@ -81,9 +80,6 @@ public class TestSCMDbCheckpointServlet {
private StorageContainerManager scm;
private SCMMetrics scmMetrics;
private OzoneConfiguration conf;
- private String clusterId;
- private String scmId;
- private String omId;
private HttpServletRequest requestMock;
private HttpServletResponse responseMock;
private String method;
@@ -100,14 +96,8 @@ public class TestSCMDbCheckpointServlet {
@BeforeEach
public void init() throws Exception {
conf = new OzoneConfiguration();
- clusterId = UUID.randomUUID().toString();
- scmId = UUID.randomUUID().toString();
- omId = UUID.randomUUID().toString();
conf.setBoolean(OZONE_ACL_ENABLED, true);
cluster = MiniOzoneCluster.newBuilder(conf)
- .setClusterId(clusterId)
- .setScmId(scmId)
- .setOmId(omId)
.build();
cluster.waitForClusterToBeReady();
scm = cluster.getStorageContainerManager();
@@ -203,15 +193,13 @@ public void write(int b) throws IOException {
doEndpoint();
- assertTrue(outputPath.toFile().length() > 0);
- assertTrue(
- scmMetrics.getDBCheckpointMetrics().
- getLastCheckpointCreationTimeTaken() > 0);
- assertTrue(
- scmMetrics.getDBCheckpointMetrics().
- getLastCheckpointStreamingTimeTaken() > 0);
- assertTrue(scmMetrics.getDBCheckpointMetrics().
- getNumCheckpoints() > initialCheckpointCount);
+ assertThat(outputPath.toFile().length()).isGreaterThan(0);
+ assertThat(scmMetrics.getDBCheckpointMetrics().getLastCheckpointCreationTimeTaken())
+ .isGreaterThan(0);
+ assertThat(scmMetrics.getDBCheckpointMetrics().getLastCheckpointStreamingTimeTaken())
+ .isGreaterThan(0);
+ assertThat(scmMetrics.getDBCheckpointMetrics().getNumCheckpoints())
+ .isGreaterThan(initialCheckpointCount);
verify(scmDbCheckpointServletMock).writeDbDataToStream(any(),
any(), any(), eq(toExcludeList), any(), any());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java
similarity index 96%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java
index e63c0658de3..0aa2599637a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java
@@ -14,7 +14,7 @@
* License for the specific language governing permissions and limitations under
* the License.
*/
-package org.apache.hadoop.ozone.scm;
+package org.apache.hadoop.hdds.scm;
import java.io.File;
import java.io.IOException;
@@ -22,14 +22,12 @@
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
-import java.util.UUID;
import java.util.concurrent.TimeoutException;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.ha.SCMHAManagerImpl;
import org.apache.hadoop.hdds.scm.ha.SCMStateMachine;
@@ -48,6 +46,7 @@
import org.apache.ozone.test.tag.Flaky;
import org.apache.ratis.server.protocol.TermIndex;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
@@ -71,8 +70,6 @@ public class TestSCMInstallSnapshotWithHA {
private MiniOzoneHAClusterImpl cluster = null;
private OzoneConfiguration conf;
- private String clusterId;
- private String scmId;
private String omServiceId;
private String scmServiceId;
private int numOfOMs = 1;
@@ -89,8 +86,6 @@ public class TestSCMInstallSnapshotWithHA {
@BeforeEach
public void init() throws Exception {
conf = new OzoneConfiguration();
- clusterId = UUID.randomUUID().toString();
- scmId = UUID.randomUUID().toString();
omServiceId = "om-service-test1";
scmServiceId = "scm-service-test1";
@@ -100,8 +95,6 @@ public void init() throws Exception {
SNAPSHOT_THRESHOLD);
cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf)
- .setClusterId(clusterId)
- .setScmId(scmId)
.setOMServiceId(omServiceId)
.setSCMServiceId(scmServiceId)
.setNumOfOzoneManagers(numOfOMs)
@@ -150,7 +143,7 @@ public void testInstallSnapshot() throws Exception {
}, 100, 3000);
long followerLastAppliedIndex =
followerSM.getLastAppliedTermIndex().getIndex();
- assertTrue(followerLastAppliedIndex >= 200);
+ assertThat(followerLastAppliedIndex).isGreaterThanOrEqualTo(200);
assertFalse(followerSM.getLifeCycleState().isPausingOrPaused());
// Verify that the follower 's DB contains the transactions which were
@@ -207,7 +200,7 @@ public void testInstallOldCheckpointFailure() throws Exception {
}
String errorMsg = "Reloading old state of SCM";
- assertTrue(logCapture.getOutput().contains(errorMsg));
+ assertThat(logCapture.getOutput()).contains(errorMsg);
assertNull(newTermIndex, " installed checkpoint even though checkpoint " +
"logIndex is less than it's lastAppliedIndex");
assertEquals(followerTermIndex, followerSM.getLastAppliedTermIndex());
@@ -272,8 +265,8 @@ public void testInstallCorruptedCheckpointFailure() throws Exception {
scmhaManager.installCheckpoint(leaderCheckpointLocation,
leaderCheckpointTrxnInfo);
- assertTrue(logCapture.getOutput()
- .contains("Failed to reload SCM state and instantiate services."));
+ assertThat(logCapture.getOutput())
+ .contains("Failed to reload SCM state and instantiate services.");
final LifeCycle.State s = followerSM.getLifeCycleState();
assertTrue(s == LifeCycle.State.NEW || s.isPausingOrPaused(), "Unexpected lifeCycle state: " + s);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMMXBean.java
similarity index 95%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMMXBean.java
index 94019ed1d62..598a65fb48f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMMXBean.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.hadoop.ozone.scm;
+package org.apache.hadoop.hdds.scm;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -47,9 +47,10 @@
import javax.management.openmbean.CompositeData;
import javax.management.openmbean.TabularData;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
/**
*
@@ -183,16 +184,15 @@ private void verifyEquals(TabularData actualData,
assertNotNull(expectedData);
for (Object obj : actualData.values()) {
// Each TabularData is a set of CompositeData
- assertTrue(obj instanceof CompositeData);
- CompositeData cds = (CompositeData) obj;
+ CompositeData cds = assertInstanceOf(CompositeData.class, obj);
assertEquals(2, cds.values().size());
Iterator> it = cds.values().iterator();
String key = it.next().toString();
String value = it.next().toString();
int num = Integer.parseInt(value);
- assertTrue(expectedData.containsKey(key));
+ assertThat(expectedData).containsKey(key);
assertEquals(expectedData.remove(key).intValue(), num);
}
- assertTrue(expectedData.isEmpty());
+ assertThat(expectedData).isEmpty();
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMNodeManagerMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMNodeManagerMXBean.java
similarity index 93%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMNodeManagerMXBean.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMNodeManagerMXBean.java
index 2d43b625c44..dcc9b3e8e37 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMNodeManagerMXBean.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMNodeManagerMXBean.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.hadoop.ozone.scm;
+package org.apache.hadoop.hdds.scm;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -40,9 +40,10 @@
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Class which tests the SCMNodeManagerInfo Bean.
@@ -128,17 +129,16 @@ private void verifyEquals(TabularData actualData, Map
assertNotNull(actualData);
assertNotNull(expectedData);
for (Object obj : actualData.values()) {
- assertTrue(obj instanceof CompositeData);
- CompositeData cds = (CompositeData) obj;
+ CompositeData cds = assertInstanceOf(CompositeData.class, obj);
assertEquals(2, cds.values().size());
Iterator> it = cds.values().iterator();
String key = it.next().toString();
String value = it.next().toString();
long num = Long.parseLong(value);
- assertTrue(expectedData.containsKey(key));
+ assertThat(expectedData).containsKey(key);
assertEquals(expectedData.remove(key).longValue(), num);
}
- assertTrue(expectedData.isEmpty());
+ assertThat(expectedData).isEmpty();
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java
index 5fd91fa46f2..0375d83baaf 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java
@@ -29,12 +29,10 @@
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
-import java.util.UUID;
-
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
-import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests snapshots in SCM HA.
@@ -52,7 +50,6 @@ public static void setup() throws Exception {
cluster = MiniOzoneCluster
.newBuilder(conf)
.setNumDatanodes(3)
- .setScmId(UUID.randomUUID().toString())
.build();
cluster.waitForClusterToBeReady();
}
@@ -76,14 +73,12 @@ public void testSnapshot() throws Exception {
long snapshotInfo2 = scm.getScmHAManager().asSCMHADBTransactionBuffer()
.getLatestTrxInfo().getTransactionIndex();
- assertTrue(snapshotInfo2 > snapshotInfo1,
- String.format("Snapshot index 2 %d should greater than Snapshot " +
- "index 1 %d", snapshotInfo2, snapshotInfo1));
+ assertThat(snapshotInfo2).isGreaterThan(snapshotInfo1);
cluster.restartStorageContainerManager(false);
TransactionInfo trxInfoAfterRestart =
scm.getScmHAManager().asSCMHADBTransactionBuffer().getLatestTrxInfo();
- assertTrue(trxInfoAfterRestart.getTransactionIndex() >= snapshotInfo2);
+ assertThat(trxInfoAfterRestart.getTransactionIndex()).isGreaterThanOrEqualTo(snapshotInfo2);
assertDoesNotThrow(() ->
pipelineManager.getPipeline(ratisPipeline1.getId()));
assertDoesNotThrow(() ->
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSecretKeySnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java
similarity index 95%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSecretKeySnapshot.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java
index 1d4fc95dbf1..f7a3aa9c9b7 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSecretKeySnapshot.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java
@@ -15,14 +15,12 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.ozone.scm;
+package org.apache.hadoop.hdds.scm;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.conf.DefaultConfigManager;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.scm.ScmConfig;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.ha.SCMStateMachine;
import org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig;
@@ -39,6 +37,7 @@
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.io.TempDir;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -48,7 +47,6 @@
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
-import java.util.UUID;
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
@@ -70,6 +68,7 @@
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY;
import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
@@ -91,12 +90,11 @@ public final class TestSecretKeySnapshot {
private MiniKdc miniKdc;
private OzoneConfiguration conf;
+ @TempDir
private File workDir;
private File ozoneKeytab;
private File spnegoKeytab;
private String host;
- private String clusterId;
- private String scmId;
private MiniOzoneHAClusterImpl cluster;
@BeforeEach
@@ -106,10 +104,6 @@ public void init() throws Exception {
ExitUtils.disableSystemExit();
- workDir = GenericTestUtils.getTestDir(getClass().getSimpleName());
- clusterId = UUID.randomUUID().toString();
- scmId = UUID.randomUUID().toString();
-
startMiniKdc();
setSecureConfig();
createCredentialsInKDC();
@@ -125,9 +119,7 @@ public void init() throws Exception {
conf.set(HDDS_SECRET_KEY_EXPIRY_DURATION, EXPIRY_DURATION_MS + "ms");
MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf)
- .setClusterId(clusterId)
.setSCMServiceId("TestSecretKeySnapshot")
- .setScmId(scmId)
.setSCMServiceId("SCMServiceId")
.setNumDatanodes(1)
.setNumOfStorageContainerManagers(3)
@@ -239,7 +231,7 @@ public void testInstallSnapshot() throws Exception {
100, 3000);
long followerLastAppliedIndex =
followerSM.getLastAppliedTermIndex().getIndex();
- assertTrue(followerLastAppliedIndex >= 200);
+ assertThat(followerLastAppliedIndex).isGreaterThanOrEqualTo(200);
assertFalse(followerSM.getLifeCycleState().isPausingOrPaused());
// Verify that the follower has the secret keys created
@@ -250,8 +242,8 @@ public void testInstallSnapshot() throws Exception {
List followerKeys =
followerSecretKeyManager.getSortedKeys();
LOG.info("Follower secret keys after snapshot: {}", followerKeys);
- assertTrue(followerKeys.size() >= 2);
- assertTrue(followerKeys.contains(currentKeyInLeader));
+ assertThat(followerKeys.size()).isGreaterThanOrEqualTo(2);
+ assertThat(followerKeys).contains(currentKeyInLeader);
assertEquals(leaderSecretKeyManager.getSortedKeys(), followerKeys);
// Wait for the next rotation, assert that the updates can be synchronized
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSecretKeysApi.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java
similarity index 96%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSecretKeysApi.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java
index 1b8c2843996..eb2442cd098 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSecretKeysApi.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.ozone.scm;
+package org.apache.hadoop.hdds.scm;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
import org.apache.hadoop.hdds.conf.DefaultConfigManager;
@@ -35,11 +35,12 @@
import org.apache.hadoop.util.ExitUtil;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ratis.util.ExitUtils;
-import org.jetbrains.annotations.NotNull;
+import jakarta.annotation.Nonnull;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.io.TempDir;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -74,11 +75,11 @@
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY;
import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Integration test to verify symmetric SecretKeys APIs in a secure cluster.
@@ -91,14 +92,13 @@ public final class TestSecretKeysApi {
.getLogger(TestSecretKeysApi.class);
private MiniKdc miniKdc;
private OzoneConfiguration conf;
+ @TempDir
private File workDir;
private File ozoneKeytab;
private File spnegoKeytab;
private File testUserKeytab;
private String testUserPrincipal;
private String ozonePrincipal;
- private String clusterId;
- private String scmId;
private MiniOzoneHAClusterImpl cluster;
@BeforeEach
@@ -109,10 +109,6 @@ public void init() throws Exception {
ExitUtils.disableSystemExit();
ExitUtil.disableSystemExit();
- workDir = GenericTestUtils.getTestDir(getClass().getSimpleName());
- clusterId = UUID.randomUUID().toString();
- scmId = UUID.randomUUID().toString();
-
startMiniKdc();
setSecureConfig();
createCredentialsInKDC();
@@ -309,9 +305,9 @@ private void testSecretKeyAuthorization() throws Exception {
assertThrows(RemoteException.class,
secretKeyProtocol::getCurrentSecretKey);
assertEquals(AuthorizationException.class.getName(), ex.getClassName());
- assertTrue(ex.getMessage().contains(
+ assertThat(ex.getMessage()).contains(
"User test@EXAMPLE.COM (auth:KERBEROS) is not authorized " +
- "for protocol"));
+ "for protocol");
}
@Test
@@ -331,9 +327,7 @@ private void startCluster(int numSCMs)
throws IOException, TimeoutException, InterruptedException {
OzoneManager.setTestSecureOmFlag(true);
MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf)
- .setClusterId(clusterId)
.setSCMServiceId("TestSecretKey")
- .setScmId(scmId)
.setNumDatanodes(3)
.setNumOfStorageContainerManagers(numSCMs)
.setNumOfOzoneManagers(1);
@@ -342,12 +336,12 @@ private void startCluster(int numSCMs)
cluster.waitForClusterToBeReady();
}
- @NotNull
+ @Nonnull
private SecretKeyProtocol getSecretKeyProtocol() throws IOException {
return getSecretKeyProtocol(ozonePrincipal, ozoneKeytab);
}
- @NotNull
+ @Nonnull
private SecretKeyProtocol getSecretKeyProtocol(
String user, File keyTab) throws IOException {
UserGroupInformation ugi =
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
similarity index 92%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManager.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
index b0c73a592a7..e973c842de4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.ozone.scm;
+package org.apache.hadoop.hdds.scm;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
@@ -30,16 +30,11 @@
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.scm.HddsTestUtils;
-import org.apache.hadoop.hdds.scm.ScmConfig;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
+import org.apache.hadoop.hdds.ratis.RatisHelper;
import org.apache.hadoop.hdds.scm.block.DeletedBlockLog;
import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
@@ -78,6 +73,7 @@
import org.apache.hadoop.ozone.OzoneTestUtils;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.ozone.HddsDatanodeService;
+import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine;
import org.apache.hadoop.ozone.container.common.states.endpoint.HeartbeatEndpointTask;
@@ -88,7 +84,6 @@
import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.apache.hadoop.ozone.upgrade.LayoutVersionManager;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.util.ExitUtil;
@@ -139,6 +134,7 @@
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils.setInternalState;
import static org.apache.hadoop.hdds.scm.HddsTestUtils.mockRemoteUser;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
@@ -149,10 +145,10 @@
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.argThat;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.argThat;
import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
@@ -221,19 +217,14 @@ private void testRpcPermission(MiniOzoneCluster cluster,
cluster.getStorageContainerManager().getClientProtocolServer());
mockRemoteUser(UserGroupInformation.createRemoteUser(fakeRemoteUsername));
-
- try {
- mockClientServer.deleteContainer(
- ContainerTestHelper.getTestContainerID());
- fail("Operation should fail, expecting an IOException here.");
- } catch (Exception e) {
- if (expectPermissionDenied) {
- verifyPermissionDeniedException(e, fakeRemoteUsername);
- } else {
- // If passes permission check, it should fail with
- // container not exist exception.
- assertInstanceOf(ContainerNotFoundException.class, e);
- }
+ Exception ex = assertThrows(Exception.class, () -> mockClientServer.deleteContainer(
+ ContainerTestHelper.getTestContainerID()));
+ if (expectPermissionDenied) {
+ verifyPermissionDeniedException(ex, fakeRemoteUsername);
+ } else {
+ // If passes permission check, it should fail with
+ // container not exist exception.
+ assertInstanceOf(ContainerNotFoundException.class, ex);
}
try {
@@ -249,18 +240,14 @@ private void testRpcPermission(MiniOzoneCluster cluster,
verifyPermissionDeniedException(e, fakeRemoteUsername);
}
- try {
- mockClientServer.getContainer(
- ContainerTestHelper.getTestContainerID());
- fail("Operation should fail, expecting an IOException here.");
- } catch (Exception e) {
- if (expectPermissionDenied) {
- verifyPermissionDeniedException(e, fakeRemoteUsername);
- } else {
- // If passes permission check, it should fail with
- // key not exist exception.
- assertInstanceOf(ContainerNotFoundException.class, e);
- }
+ Exception e = assertThrows(Exception.class, () -> mockClientServer.getContainer(
+ ContainerTestHelper.getTestContainerID()));
+ if (expectPermissionDenied) {
+ verifyPermissionDeniedException(e, fakeRemoteUsername);
+ } else {
+ // If passes permission check, it should fail with
+ // key not exist exception.
+ assertInstanceOf(ContainerNotFoundException.class, e);
}
}
@@ -275,26 +262,36 @@ private void verifyPermissionDeniedException(Exception e, String userName) {
public void testBlockDeletionTransactions() throws Exception {
int numKeys = 5;
OzoneConfiguration conf = new OzoneConfiguration();
- conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 100,
+ conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100,
TimeUnit.MILLISECONDS);
- conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 100,
+ DatanodeConfiguration datanodeConfiguration = conf.getObject(
+ DatanodeConfiguration.class);
+ datanodeConfiguration.setBlockDeletionInterval(Duration.ofMillis(100));
+ conf.setFromObject(datanodeConfiguration);
+ ScmConfig scmConfig = conf.getObject(ScmConfig.class);
+ scmConfig.setBlockDeletionInterval(Duration.ofMillis(100));
+ conf.setFromObject(scmConfig);
+
+ conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY
+ + ".client.request.write.timeout", 30, TimeUnit.SECONDS);
+ conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY
+ + ".client.request.watch.timeout", 30, TimeUnit.SECONDS);
+ conf.setInt("hdds.datanode.block.delete.threads.max", 5);
+ conf.setInt("hdds.datanode.block.delete.queue.limit", 32);
+ conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 50,
+ TimeUnit.MILLISECONDS);
+ conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200,
TimeUnit.MILLISECONDS);
- conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
- 3000,
+ conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200,
TimeUnit.MILLISECONDS);
conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5);
- conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
- 1, TimeUnit.SECONDS);
- ScmConfig scmConfig = conf.getObject(ScmConfig.class);
- scmConfig.setBlockDeletionInterval(Duration.ofSeconds(1));
- conf.setFromObject(scmConfig);
// Reset container provision size, otherwise only one container
// is created by default.
conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT,
numKeys);
MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
- .setHbInterval(100)
+ .setHbInterval(50)
.build();
cluster.waitForClusterToBeReady();
@@ -313,13 +310,12 @@ public void testBlockDeletionTransactions() throws Exception {
OzoneTestUtils.closeContainers(keyInfo.getKeyLocationVersions(),
cluster.getStorageContainerManager());
}
-
Map> containerBlocks = createDeleteTXLog(
cluster.getStorageContainerManager(),
delLog, keyLocations, helper);
// Verify a few TX gets created in the TX log.
- assertTrue(delLog.getNumOfValidTransactions() > 0);
+ assertThat(delLog.getNumOfValidTransactions()).isGreaterThan(0);
// Once TXs are written into the log, SCM starts to fetch TX
// entries from the log and schedule block deletions in HB interval,
@@ -336,7 +332,7 @@ public void testBlockDeletionTransactions() throws Exception {
} catch (IOException e) {
return false;
}
- }, 1000, 10000);
+ }, 1000, 22000);
assertTrue(helper.verifyBlocksWithTxnTable(containerBlocks));
// Continue the work, add some TXs that with known container names,
// but unknown block IDs.
@@ -352,7 +348,7 @@ public void testBlockDeletionTransactions() throws Exception {
}
// Verify a few TX gets created in the TX log.
- assertTrue(delLog.getNumOfValidTransactions() > 0);
+ assertThat(delLog.getNumOfValidTransactions()).isGreaterThan(0);
// These blocks cannot be found in the container, skip deleting them
// eventually these TX will success.
@@ -378,7 +374,7 @@ public void testOldDNRegistersToReInitialisedSCM() throws Exception {
MiniOzoneCluster cluster =
MiniOzoneCluster.newBuilder(conf).setHbInterval(1000)
.setHbProcessorInterval(3000).setNumDatanodes(1)
- .setClusterId(UUID.randomUUID().toString()).build();
+ .build();
cluster.waitForClusterToBeReady();
try {
@@ -413,8 +409,8 @@ public void testOldDNRegistersToReInitialisedSCM() throws Exception {
GenericTestUtils.LogCapturer versionEndPointTaskLog =
GenericTestUtils.LogCapturer.captureLogs(VersionEndpointTask.LOG);
// Initially empty
- assertTrue(scmDnHBDispatcherLog.getOutput().isEmpty());
- assertTrue(versionEndPointTaskLog.getOutput().isEmpty());
+ assertThat(scmDnHBDispatcherLog.getOutput()).isEmpty();
+ assertThat(versionEndPointTaskLog.getOutput()).isEmpty();
// start the new SCM
scm.start();
// Initially DatanodeStateMachine will be in Running state
@@ -444,9 +440,9 @@ public void testOldDNRegistersToReInitialisedSCM() throws Exception {
5000);
assertEquals(DatanodeStateMachine.DatanodeStates.SHUTDOWN,
dsm.getContext().getState());
- assertTrue(versionEndPointTaskLog.getOutput().contains(
+ assertThat(versionEndPointTaskLog.getOutput()).contains(
"org.apache.hadoop.ozone.common" +
- ".InconsistentStorageStateException: Mismatched ClusterIDs"));
+ ".InconsistentStorageStateException: Mismatched ClusterIDs");
} finally {
cluster.shutdown();
}
@@ -458,7 +454,7 @@ public void testBlockDeletingThrottling() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS);
conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5);
- conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
+ conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
100, TimeUnit.MILLISECONDS);
ScmConfig scmConfig = conf.getObject(ScmConfig.class);
scmConfig.setBlockDeletionInterval(Duration.ofMillis(100));
@@ -501,23 +497,14 @@ public void testBlockDeletingThrottling() throws Exception {
createDeleteTXLog(cluster.getStorageContainerManager(),
delLog, keyLocations, helper);
// Verify a few TX gets created in the TX log.
- assertTrue(delLog.getNumOfValidTransactions() > 0);
+ assertThat(delLog.getNumOfValidTransactions()).isGreaterThan(0);
// Verify the size in delete commands is expected.
GenericTestUtils.waitFor(() -> {
NodeManager nodeManager = cluster.getStorageContainerManager()
.getScmNodeManager();
- LayoutVersionManager versionManager =
- nodeManager.getLayoutVersionManager();
- StorageContainerDatanodeProtocolProtos.LayoutVersionProto layoutInfo
- = StorageContainerDatanodeProtocolProtos.LayoutVersionProto
- .newBuilder()
- .setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion())
- .setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion())
- .build();
List commands = nodeManager.processHeartbeat(
- nodeManager.getNodes(NodeStatus.inServiceHealthy()).get(0),
- layoutInfo);
+ nodeManager.getNodes(NodeStatus.inServiceHealthy()).get(0));
if (commands != null) {
for (SCMCommand cmd : commands) {
if (cmd.getType() == SCMCommandProto.Type.deleteBlocksCommand) {
@@ -555,7 +542,7 @@ private Map> createDeleteTXLog(
for (OmKeyInfo info : keyLocations.values()) {
totalCreatedBlocks += info.getKeyLocationVersions().size();
}
- assertTrue(totalCreatedBlocks > 0);
+ assertThat(totalCreatedBlocks).isGreaterThan(0);
assertEquals(totalCreatedBlocks,
helper.getAllBlocks(containerNames).size());
@@ -727,12 +714,7 @@ public void testSCMReinitializationWithHAEnabled() throws Exception {
final String clusterId =
cluster.getStorageContainerManager().getClusterId();
// validate there is no ratis group pre existing
- try {
- validateRatisGroupExists(conf, clusterId);
- fail();
- } catch (IOException ioe) {
- // Exception is expected here
- }
+ assertThrows(IOException.class, () -> validateRatisGroupExists(conf, clusterId));
conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
// This will re-initialize SCM
@@ -794,7 +776,6 @@ public void testScmInfo() throws Exception {
@Test
public void testScmProcessDatanodeHeartbeat() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
- String scmId = UUID.randomUUID().toString();
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
StaticMapping.class, DNSToSwitchMapping.class);
StaticMapping.addNodeToRack(NetUtils.normalizeHostNames(
@@ -804,7 +785,6 @@ public void testScmProcessDatanodeHeartbeat() throws Exception {
final int datanodeNum = 3;
MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(datanodeNum)
- .setScmId(scmId)
.build();
cluster.waitForClusterToBeReady();
StorageContainerManager scm = cluster.getStorageContainerManager();
@@ -824,7 +804,7 @@ public void testScmProcessDatanodeHeartbeat() throws Exception {
for (DatanodeDetails node : allNodes) {
DatanodeInfo datanodeInfo = (DatanodeInfo) scm.getScmNodeManager()
.getNodeByUuid(node.getUuidString());
- assertTrue(datanodeInfo.getLastHeartbeatTime() > start);
+ assertThat(datanodeInfo.getLastHeartbeatTime()).isGreaterThan(start);
assertEquals(datanodeInfo.getUuidString(),
datanodeInfo.getNetworkName());
assertEquals("/rack1", datanodeInfo.getNetworkLocation());
@@ -841,7 +821,7 @@ public void testCloseContainerCommandOnRestart() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS);
conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5);
- conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
+ conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
100, TimeUnit.MILLISECONDS);
conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT,
numKeys);
@@ -850,7 +830,6 @@ public void testCloseContainerCommandOnRestart() throws Exception {
MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
.setHbInterval(1000)
.setHbProcessorInterval(3000)
- .setTrace(false)
.setNumDatanodes(1)
.build();
cluster.waitForClusterToBeReady();
@@ -968,7 +947,7 @@ public void testContainerReportQueueWithDrop() throws Exception {
eventQueue.fireEvent(SCMEvents.CONTAINER_REPORT, dndata);
eventQueue.fireEvent(SCMEvents.CONTAINER_REPORT, dndata);
eventQueue.fireEvent(SCMEvents.CONTAINER_REPORT, dndata);
- assertTrue(containerReportExecutors.droppedEvents() > 1);
+ assertThat(containerReportExecutors.droppedEvents()).isGreaterThan(1);
Thread.currentThread().sleep(1000);
assertEquals(containerReportExecutors.droppedEvents()
+ containerReportExecutors.scheduledEvents(),
@@ -1024,8 +1003,8 @@ public void testContainerReportQueueTakingMoreTime() throws Exception {
= new ContainerReportFromDatanode(dn, report);
eventQueue.fireEvent(SCMEvents.CONTAINER_REPORT, dndata2);
semaphore.acquire(2);
- assertTrue(containerReportExecutors.longWaitInQueueEvents() >= 1);
- assertTrue(containerReportExecutors.longTimeExecutionEvents() >= 1);
+ assertThat(containerReportExecutors.longWaitInQueueEvents()).isGreaterThanOrEqualTo(1);
+ assertThat(containerReportExecutors.longTimeExecutionEvents()).isGreaterThanOrEqualTo(1);
containerReportExecutors.close();
semaphore.release(2);
}
@@ -1080,10 +1059,7 @@ public void testNonRatisToRatis()
throws IOException, AuthenticationException, InterruptedException,
TimeoutException {
final OzoneConfiguration conf = new OzoneConfiguration();
- final String clusterID = UUID.randomUUID().toString();
try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
- .setClusterId(clusterID)
- .setScmId(UUID.randomUUID().toString())
.setNumDatanodes(3)
.build()) {
final StorageContainerManager nonRatisSCM = cluster
@@ -1095,7 +1071,7 @@ public void testNonRatisToRatis()
DefaultConfigManager.clearDefaultConfigs();
conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
- StorageContainerManager.scmInit(conf, clusterID);
+ StorageContainerManager.scmInit(conf, cluster.getClusterId());
cluster.restartStorageContainerManager(false);
final StorageContainerManager ratisSCM = cluster
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java
similarity index 97%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java
index a0f5bc7834e..e62820cfb1d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java
@@ -16,13 +16,12 @@
* limitations under the License.
*/
-package org.apache.hadoop.ozone.scm;
+package org.apache.hadoop.hdds.scm;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.conf.DefaultConfigManager;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.ha.SCMHAMetrics;
import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl;
@@ -60,6 +59,7 @@
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
import static org.apache.hadoop.hdds.client.ReplicationType.RATIS;
+import static org.apache.ozone.test.GenericTestUtils.getTestStartTime;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
@@ -74,8 +74,6 @@ public class TestStorageContainerManagerHA {
private MiniOzoneHAClusterImpl cluster = null;
private OzoneConfiguration conf;
- private String clusterId;
- private String scmId;
private String omServiceId;
private static int numOfOMs = 3;
private String scmServiceId;
@@ -95,13 +93,9 @@ public void init() throws Exception {
conf.set(ScmConfigKeys.OZONE_SCM_HA_DBTRANSACTIONBUFFER_FLUSH_INTERVAL,
"5s");
conf.set(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_GAP, "1");
- clusterId = UUID.randomUUID().toString();
- scmId = UUID.randomUUID().toString();
omServiceId = "om-service-test1";
scmServiceId = "scm-service-test1";
cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf)
- .setClusterId(clusterId)
- .setScmId(scmId)
.setOMServiceId(omServiceId)
.setSCMServiceId(scmServiceId)
.setNumOfStorageContainerManagers(numOfSCMs)
@@ -175,7 +169,7 @@ void testAllSCMAreRunning() throws Exception {
private void doPutKey() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
- Instant testStartTime = Instant.now();
+ Instant testStartTime = getTestStartTime();
try (OzoneClient client = cluster.newClient()) {
ObjectStore store = client.getObjectStore();
String value = "sample value";
@@ -195,7 +189,7 @@ private void doPutKey() throws Exception {
assertEquals(keyName, key.getName());
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
- is.read(fileContent);
+ assertEquals(fileContent.length, is.read(fileContent));
assertEquals(value, new String(fileContent, UTF_8));
assertFalse(key.getCreationTime().isBefore(testStartTime));
assertFalse(key.getModificationTime().isBefore(testStartTime));
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHelper.java
similarity index 99%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHelper.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHelper.java
index 23c0ef8496e..322b1e65bc6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHelper.java
@@ -14,7 +14,7 @@
* License for the specific language governing permissions and limitations under
* the License.
*/
-package org.apache.hadoop.ozone.scm;
+package org.apache.hadoop.hdds.scm;
import java.io.IOException;
import java.util.ArrayList;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java
similarity index 96%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java
index f7835d30d5c..fb312dfb509 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java
@@ -15,11 +15,11 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.ozone.scm;
+package org.apache.hadoop.hdds.scm;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -27,9 +27,6 @@
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.scm.XceiverClientGrpc;
-import org.apache.hadoop.hdds.scm.XceiverClientReply;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
@@ -114,7 +111,7 @@ public XceiverClientReply sendCommandAsync(
@Timeout(5)
public void testGetBlockRetryAlNodes() {
final ArrayList allDNs = new ArrayList<>(dns);
- assertTrue(allDNs.size() > 1);
+ assertThat(allDNs.size()).isGreaterThan(1);
try (XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf) {
@Override
public XceiverClientReply sendCommandAsync(
@@ -135,7 +132,7 @@ public XceiverClientReply sendCommandAsync(
@Timeout(5)
public void testReadChunkRetryAllNodes() {
final ArrayList allDNs = new ArrayList<>(dns);
- assertTrue(allDNs.size() > 1);
+ assertThat(allDNs.size()).isGreaterThan(1);
try (XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf) {
@Override
public XceiverClientReply sendCommandAsync(
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientManager.java
similarity index 96%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientManager.java
index f66315f851d..95a0b0e17fd 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientManager.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.ozone.scm;
+package org.apache.hadoop.hdds.scm;
import com.google.common.cache.Cache;
import org.apache.hadoop.hdds.scm.XceiverClientManager.ScmClientConfig;
@@ -25,10 +25,7 @@
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.protocolPB
- .StorageContainerLocationProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
@@ -45,12 +42,12 @@
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.mock;
/**
@@ -177,7 +174,7 @@ public void testFreeByReference() throws IOException {
Throwable t = assertThrows(IOException.class,
() -> ContainerProtocolCalls.createContainer(client1,
container1.getContainerInfo().getContainerID(), null));
- assertTrue(t.getMessage().contains("This channel is not connected"));
+ assertThat(t.getMessage()).contains("This channel is not connected");
clientManager.releaseClient(client2, false);
}
@@ -228,7 +225,7 @@ public void testFreeByEviction() throws IOException {
Throwable t = assertThrows(IOException.class,
() -> ContainerProtocolCalls.createContainer(client1,
container1.getContainerInfo().getContainerID(), null));
- assertTrue(t.getMessage().contains("This channel is not connected"));
+ assertThat(t.getMessage()).contains("This channel is not connected");
clientManager.releaseClient(client2, false);
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientMetrics.java
similarity index 96%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientMetrics.java
index 42c7a58ccd3..c4f62040536 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientMetrics.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.ozone.scm;
+package org.apache.hadoop.hdds.scm;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME;
import static org.apache.ozone.test.MetricsAsserts.assertCounter;
@@ -30,19 +30,14 @@
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
- .ContainerCommandResponseProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.XceiverClientMetrics;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
import org.apache.ozone.test.GenericTestUtils;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
index 72d1ebf4381..df5281e4240 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
@@ -50,12 +50,11 @@
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests for ContainerStateManager.
@@ -214,7 +213,7 @@ public void testGetMatchingContainer() throws IOException {
ContainerInfo info = containerManager
.getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE,
container1.getPipeline());
- assertTrue(info.getContainerID() > cid);
+ assertThat(info.getContainerID()).isGreaterThan(cid);
cid = info.getContainerID();
}
@@ -264,10 +263,9 @@ public void testGetMatchingContainerMultipleThreads()
// TODO: #CLUTIL Look at the division of block allocations in different
// containers.
LOG.error("Total allocated block = " + matchedCount);
- assertTrue(matchedCount <=
- numBlockAllocates / container2MatchedCount.size() + threshold
- && matchedCount >=
- numBlockAllocates / container2MatchedCount.size() - threshold);
+ assertThat(matchedCount)
+ .isLessThanOrEqualTo(numBlockAllocates / container2MatchedCount.size() + threshold)
+ .isGreaterThanOrEqualTo(numBlockAllocates / container2MatchedCount.size() - threshold);
}
}
@@ -375,43 +373,43 @@ public void testReplicaMap() throws Exception {
containerStateManager.updateContainerReplica(id, replicaTwo);
replicaSet = containerStateManager.getContainerReplicas(id);
assertEquals(2, replicaSet.size());
- assertTrue(replicaSet.contains(replicaOne));
- assertTrue(replicaSet.contains(replicaTwo));
+ assertThat(replicaSet).contains(replicaOne);
+ assertThat(replicaSet).contains(replicaTwo);
// Test 3: Remove one replica node and then test
containerStateManager.removeContainerReplica(id, replicaOne);
replicaSet = containerStateManager.getContainerReplicas(id);
assertEquals(1, replicaSet.size());
- assertFalse(replicaSet.contains(replicaOne));
- assertTrue(replicaSet.contains(replicaTwo));
+ assertThat(replicaSet).doesNotContain(replicaOne);
+ assertThat(replicaSet).contains(replicaTwo);
// Test 3: Remove second replica node and then test
containerStateManager.removeContainerReplica(id, replicaTwo);
replicaSet = containerStateManager.getContainerReplicas(id);
assertEquals(0, replicaSet.size());
- assertFalse(replicaSet.contains(replicaOne));
- assertFalse(replicaSet.contains(replicaTwo));
+ assertThat(replicaSet).doesNotContain(replicaOne);
+ assertThat(replicaSet).doesNotContain(replicaTwo);
// Test 4: Re-insert dn1
containerStateManager.updateContainerReplica(id, replicaOne);
replicaSet = containerStateManager.getContainerReplicas(id);
assertEquals(1, replicaSet.size());
- assertTrue(replicaSet.contains(replicaOne));
- assertFalse(replicaSet.contains(replicaTwo));
+ assertThat(replicaSet).contains(replicaOne);
+ assertThat(replicaSet).doesNotContain(replicaTwo);
// Re-insert dn2
containerStateManager.updateContainerReplica(id, replicaTwo);
replicaSet = containerStateManager.getContainerReplicas(id);
assertEquals(2, replicaSet.size());
- assertTrue(replicaSet.contains(replicaOne));
- assertTrue(replicaSet.contains(replicaTwo));
+ assertThat(replicaSet).contains(replicaOne);
+ assertThat(replicaSet).contains(replicaTwo);
// Re-insert dn1
containerStateManager.updateContainerReplica(id, replicaOne);
replicaSet = containerStateManager.getContainerReplicas(id);
assertEquals(2, replicaSet.size());
- assertTrue(replicaSet.contains(replicaOne));
- assertTrue(replicaSet.contains(replicaTwo));
+ assertThat(replicaSet).contains(replicaOne);
+ assertThat(replicaSet).contains(replicaTwo);
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java
similarity index 88%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java
index 2df0d09db53..5ebf9b56a8e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.ozone.scm.node;
+package org.apache.hadoop.hdds.scm.node;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
@@ -31,8 +31,6 @@
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaCount;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.NodeStatus;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
@@ -66,6 +64,11 @@
import java.util.stream.Collectors;
import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.hadoop.hdds.scm.node.TestNodeUtil.getDNHostAndPort;
+import static org.apache.hadoop.hdds.scm.node.TestNodeUtil.waitForDnToReachHealthState;
+import static org.apache.hadoop.hdds.scm.node.TestNodeUtil.waitForDnToReachOpState;
+import static org.apache.hadoop.hdds.scm.node.TestNodeUtil.waitForDnToReachPersistedOpState;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -210,7 +213,7 @@ public void testNodeWithOpenPipelineCanBeDecommissionedAndRecommissioned()
scmClient.decommissionNodes(Arrays.asList(
getDNHostAndPort(toDecommission)));
- waitForDnToReachOpState(toDecommission, DECOMMISSIONED);
+ waitForDnToReachOpState(nm, toDecommission, DECOMMISSIONED);
// Ensure one node transitioned to DECOMMISSIONING
List decomNodes = nm.getNodes(
DECOMMISSIONED,
@@ -226,7 +229,7 @@ public void testNodeWithOpenPipelineCanBeDecommissionedAndRecommissioned()
// Stop the decommissioned DN
int dnIndex = cluster.getHddsDatanodeIndex(toDecommission);
cluster.shutdownHddsDatanode(toDecommission);
- waitForDnToReachHealthState(toDecommission, DEAD);
+ waitForDnToReachHealthState(nm, toDecommission, DEAD);
// Now the decommissioned node is dead, we should have
// 3 replicas for the tracked container.
@@ -237,7 +240,7 @@ public void testNodeWithOpenPipelineCanBeDecommissionedAndRecommissioned()
cluster.restartHddsDatanode(dnIndex, true);
scmClient.recommissionNodes(Arrays.asList(
getDNHostAndPort(toDecommission)));
- waitForDnToReachOpState(toDecommission, IN_SERVICE);
+ waitForDnToReachOpState(nm, toDecommission, IN_SERVICE);
waitForDnToReachPersistedOpState(toDecommission, IN_SERVICE);
}
@@ -273,7 +276,7 @@ public void testDecommissioningNodesCompleteDecommissionOnSCMRestart()
// After the SCM restart, the DN should report as DECOMMISSIONING, then
// it should re-enter the decommission workflow and move to DECOMMISSIONED
DatanodeDetails newDn = nm.getNodeByUuid(dn.getUuid().toString());
- waitForDnToReachOpState(newDn, DECOMMISSIONED);
+ waitForDnToReachOpState(nm, newDn, DECOMMISSIONED);
waitForDnToReachPersistedOpState(newDn, DECOMMISSIONED);
// Now the node is decommissioned, so restart SCM again
@@ -283,7 +286,7 @@ public void testDecommissioningNodesCompleteDecommissionOnSCMRestart()
// On initial registration, the DN should report its operational state
// and if it is decommissioned, that should be updated in the NodeStatus
- waitForDnToReachOpState(newDn, DECOMMISSIONED);
+ waitForDnToReachOpState(nm, newDn, DECOMMISSIONED);
// Also confirm the datanodeDetails correctly reflect the operational
// state.
waitForDnToReachPersistedOpState(newDn, DECOMMISSIONED);
@@ -292,7 +295,7 @@ public void testDecommissioningNodesCompleteDecommissionOnSCMRestart()
// reflect the state of in SCM, in IN_SERVICE.
int dnIndex = cluster.getHddsDatanodeIndex(dn);
cluster.shutdownHddsDatanode(dnIndex);
- waitForDnToReachHealthState(dn, DEAD);
+ waitForDnToReachHealthState(nm, dn, DEAD);
// Datanode is shutdown and dead. Now recommission it in SCM
scmClient.recommissionNodes(Arrays.asList(getDNHostAndPort(dn)));
// Now restart it and ensure it remains IN_SERVICE
@@ -302,8 +305,8 @@ public void testDecommissioningNodesCompleteDecommissionOnSCMRestart()
// As this is not an initial registration since SCM was started, the DN
// should report its operational state and if it differs from what SCM
// has, then the SCM state should be used and the DN state updated.
- waitForDnToReachHealthState(newDn, HEALTHY);
- waitForDnToReachOpState(newDn, IN_SERVICE);
+ waitForDnToReachHealthState(nm, newDn, HEALTHY);
+ waitForDnToReachOpState(nm, newDn, IN_SERVICE);
waitForDnToReachPersistedOpState(newDn, IN_SERVICE);
}
@@ -343,7 +346,7 @@ public void testSingleNodeWithOpenPipelineCanGotoMaintenance()
scmClient.startMaintenanceNodes(Arrays.asList(
getDNHostAndPort(dn)), 0);
- waitForDnToReachOpState(dn, IN_MAINTENANCE);
+ waitForDnToReachOpState(nm, dn, IN_MAINTENANCE);
waitForDnToReachPersistedOpState(dn, IN_MAINTENANCE);
// Should still be 3 replicas online as no replication should happen for
@@ -357,7 +360,7 @@ public void testSingleNodeWithOpenPipelineCanGotoMaintenance()
// Stop the maintenance DN
cluster.shutdownHddsDatanode(dn);
- waitForDnToReachHealthState(dn, DEAD);
+ waitForDnToReachHealthState(nm, dn, DEAD);
// Now the maintenance node is dead, we should still have
// 3 replicas as we don't purge the replicas for a dead maintenance node
@@ -369,13 +372,13 @@ public void testSingleNodeWithOpenPipelineCanGotoMaintenance()
// Restart the DN and it should keep the IN_MAINTENANCE state
cluster.restartHddsDatanode(dn, true);
DatanodeDetails newDN = nm.getNodeByUuid(dn.getUuid().toString());
- waitForDnToReachHealthState(newDN, HEALTHY);
+ waitForDnToReachHealthState(nm, newDN, HEALTHY);
waitForDnToReachPersistedOpState(newDN, IN_MAINTENANCE);
// Stop the DN and wait for it to go dead.
int dnIndex = cluster.getHddsDatanodeIndex(dn);
cluster.shutdownHddsDatanode(dnIndex);
- waitForDnToReachHealthState(dn, DEAD);
+ waitForDnToReachHealthState(nm, dn, DEAD);
// Datanode is shutdown and dead. Now recommission it in SCM
scmClient.recommissionNodes(Arrays.asList(getDNHostAndPort(dn)));
@@ -387,8 +390,8 @@ public void testSingleNodeWithOpenPipelineCanGotoMaintenance()
// As this is not an initial registration since SCM was started, the DN
// should report its operational state and if it differs from what SCM
// has, then the SCM state should be used and the DN state updated.
- waitForDnToReachHealthState(newDn, HEALTHY);
- waitForDnToReachOpState(newDn, IN_SERVICE);
+ waitForDnToReachHealthState(nm, newDn, HEALTHY);
+ waitForDnToReachOpState(nm, newDn, IN_SERVICE);
waitForDnToReachPersistedOpState(dn, IN_SERVICE);
}
@@ -411,7 +414,7 @@ public void testContainerIsReplicatedWhenAllNodesGotoMaintenance()
replicas.forEach(r -> forMaintenance.add(r.getDatanodeDetails()));
scmClient.startMaintenanceNodes(forMaintenance.stream()
- .map(this::getDNHostAndPort)
+ .map(TestNodeUtil::getDNHostAndPort)
.collect(Collectors.toList()), 0);
// Ensure all 3 DNs go to maintenance
@@ -422,7 +425,7 @@ public void testContainerIsReplicatedWhenAllNodesGotoMaintenance()
// There should now be 5-6 replicas of the container we are tracking
Set newReplicas =
cm.getContainerReplicas(container.containerID());
- assertTrue(newReplicas.size() >= 5);
+ assertThat(newReplicas.size()).isGreaterThanOrEqualTo(5);
scmClient.recommissionNodes(forMaintenance.stream()
.map(d -> getDNHostAndPort(d))
@@ -430,7 +433,7 @@ public void testContainerIsReplicatedWhenAllNodesGotoMaintenance()
// Ensure all 3 DNs go to maintenance
for (DatanodeDetails dn : forMaintenance) {
- waitForDnToReachOpState(dn, IN_SERVICE);
+ waitForDnToReachOpState(nm, dn, IN_SERVICE);
}
waitForContainerReplicas(container, 3);
@@ -445,18 +448,18 @@ public void testContainerIsReplicatedWhenAllNodesGotoMaintenance()
.limit(2)
.collect(Collectors.toList());
scmClient.startMaintenanceNodes(ecMaintenance.stream()
- .map(this::getDNHostAndPort)
+ .map(TestNodeUtil::getDNHostAndPort)
.collect(Collectors.toList()), 0);
for (DatanodeDetails dn : ecMaintenance) {
waitForDnToReachPersistedOpState(dn, IN_MAINTENANCE);
}
- assertTrue(cm.getContainerReplicas(ecContainer.containerID()).size() >= 6);
+ assertThat(cm.getContainerReplicas(ecContainer.containerID()).size()).isGreaterThanOrEqualTo(6);
scmClient.recommissionNodes(ecMaintenance.stream()
- .map(this::getDNHostAndPort)
+ .map(TestNodeUtil::getDNHostAndPort)
.collect(Collectors.toList()));
// Ensure the 2 DNs go to IN_SERVICE
for (DatanodeDetails dn : ecMaintenance) {
- waitForDnToReachOpState(dn, IN_SERVICE);
+ waitForDnToReachOpState(nm, dn, IN_SERVICE);
}
waitForContainerReplicas(ecContainer, 5);
}
@@ -479,7 +482,7 @@ public void testEnteringMaintenanceNodeCompletesAfterSCMRestart()
replicas.forEach(r -> forMaintenance.add(r.getDatanodeDetails()));
scmClient.startMaintenanceNodes(forMaintenance.stream()
- .map(this::getDNHostAndPort)
+ .map(TestNodeUtil::getDNHostAndPort)
.collect(Collectors.toList()), 0);
// Ensure all 3 DNs go to entering_maintenance
@@ -496,13 +499,13 @@ public void testEnteringMaintenanceNodeCompletesAfterSCMRestart()
// Ensure all 3 DNs go to maintenance
for (DatanodeDetails dn : newDns) {
- waitForDnToReachOpState(dn, IN_MAINTENANCE);
+ waitForDnToReachOpState(nm, dn, IN_MAINTENANCE);
}
// There should now be 5-6 replicas of the container we are tracking
Set newReplicas =
cm.getContainerReplicas(container.containerID());
- assertTrue(newReplicas.size() >= 5);
+ assertThat(newReplicas.size()).isGreaterThanOrEqualTo(5);
}
@Test
@@ -526,7 +529,7 @@ public void testMaintenanceEndsAutomaticallyAtTimeout()
// decommission interface only allows us to specify hours from now as the
// end time, that is not really suitable for a test like this.
nm.setNodeOperationalState(dn, IN_MAINTENANCE, newEndTime);
- waitForDnToReachOpState(dn, IN_SERVICE);
+ waitForDnToReachOpState(nm, dn, IN_SERVICE);
waitForDnToReachPersistedOpState(dn, IN_SERVICE);
// Put the node back into maintenance and then stop it and wait for it to
@@ -534,11 +537,11 @@ public void testMaintenanceEndsAutomaticallyAtTimeout()
scmClient.startMaintenanceNodes(Arrays.asList(getDNHostAndPort(dn)), 0);
waitForDnToReachPersistedOpState(dn, IN_MAINTENANCE);
cluster.shutdownHddsDatanode(dn);
- waitForDnToReachHealthState(dn, DEAD);
+ waitForDnToReachHealthState(nm, dn, DEAD);
newEndTime = System.currentTimeMillis() / 1000 + 5;
nm.setNodeOperationalState(dn, IN_MAINTENANCE, newEndTime);
- waitForDnToReachOpState(dn, IN_SERVICE);
+ waitForDnToReachOpState(nm, dn, IN_SERVICE);
// Ensure there are 3 replicas not including the dead node, indicating a new
// replica was created
GenericTestUtils.waitFor(() -> getContainerReplicas(container)
@@ -585,7 +588,7 @@ public void testSCMHandlesRestartForMaintenanceNode()
// Now let the node go dead and repeat the test. This time ensure a new
// replica is created.
cluster.shutdownHddsDatanode(dn);
- waitForDnToReachHealthState(dn, DEAD);
+ waitForDnToReachHealthState(nm, dn, DEAD);
cluster.restartStorageContainerManager(false);
setManagers();
@@ -632,18 +635,6 @@ private void generateData(int keyCount, String keyPrefix,
}
}
- /**
- * Retrieves the NodeStatus for the given DN or fails the test if the
- * Node cannot be found. This is a helper method to allow the nodeStatus to be
- * checked in lambda expressions.
- * @param dn Datanode for which to retrieve the NodeStatus.
- * @return
- */
- private NodeStatus getNodeStatus(DatanodeDetails dn) {
- return assertDoesNotThrow(() -> nm.getNodeStatus(dn),
- "Unexpected exception getting the nodeState");
- }
-
/**
* Retrieves the containerReplica set for a given container or fails the test
* if the container cannot be found. This is a helper method to allow the
@@ -669,61 +660,6 @@ private DatanodeDetails getOneDNHostingReplica(
return c.getDatanodeDetails();
}
- /**
- * Given a Datanode, return a string consisting of the hostname and one of its
- * ports in the for host:post.
- * @param dn Datanode for which to retrieve the host:post string
- * @return host:port for the given DN.
- */
- private String getDNHostAndPort(DatanodeDetails dn) {
- return dn.getHostName() + ":" + dn.getPorts().get(0).getValue();
- }
-
- /**
- * Wait for the given datanode to reach the given operational state.
- * @param dn Datanode for which to check the state
- * @param state The state to wait for.
- * @throws TimeoutException
- * @throws InterruptedException
- */
- private void waitForDnToReachOpState(DatanodeDetails dn,
- HddsProtos.NodeOperationalState state)
- throws TimeoutException, InterruptedException {
- GenericTestUtils.waitFor(
- () -> getNodeStatus(dn).getOperationalState().equals(state),
- 200, 30000);
- }
-
- /**
- * Wait for the given datanode to reach the given Health state.
- * @param dn Datanode for which to check the state
- * @param state The state to wait for.
- * @throws TimeoutException
- * @throws InterruptedException
- */
- private void waitForDnToReachHealthState(DatanodeDetails dn,
- HddsProtos.NodeState state)
- throws TimeoutException, InterruptedException {
- GenericTestUtils.waitFor(
- () -> getNodeStatus(dn).getHealth().equals(state),
- 200, 30000);
- }
-
- /**
- * Wait for the given datanode to reach the given persisted state.
- * @param dn Datanode for which to check the state
- * @param state The state to wait for.
- * @throws TimeoutException
- * @throws InterruptedException
- */
- private void waitForDnToReachPersistedOpState(DatanodeDetails dn,
- HddsProtos.NodeOperationalState state)
- throws TimeoutException, InterruptedException {
- GenericTestUtils.waitFor(
- () -> dn.getPersistedOpState().equals(state),
- 200, 30000);
- }
-
/**
* Get any container present in the cluster and wait to ensure 3 replicas
* have been reported before returning the container.
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeUtil.java
new file mode 100644
index 00000000000..1cb5ef792f3
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeUtil.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.node;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.ozone.test.GenericTestUtils;
+import org.junit.jupiter.api.Assertions;
+
+import java.util.concurrent.TimeoutException;
+
+/**
+ * Utility class with helper methods for testing node state and status.
+ */
+public final class TestNodeUtil {
+
+ private TestNodeUtil() {
+ }
+
+ /**
+ * Wait for the given datanode to reach the given operational state.
+ * @param dn Datanode for which to check the state
+ * @param state The state to wait for.
+ * @throws TimeoutException
+ * @throws InterruptedException
+ */
+ public static void waitForDnToReachOpState(NodeManager nodeManager,
+ DatanodeDetails dn, HddsProtos.NodeOperationalState state)
+ throws TimeoutException, InterruptedException {
+ GenericTestUtils.waitFor(
+ () -> getNodeStatus(nodeManager, dn)
+ .getOperationalState().equals(state),
+ 200, 30000);
+ }
+
+ /**
+ * Wait for the given datanode to reach the given Health state.
+ * @param dn Datanode for which to check the state
+ * @param state The state to wait for.
+ * @throws TimeoutException
+ * @throws InterruptedException
+ */
+ public static void waitForDnToReachHealthState(NodeManager nodeManager,
+ DatanodeDetails dn, HddsProtos.NodeState state)
+ throws TimeoutException, InterruptedException {
+ GenericTestUtils.waitFor(
+ () -> getNodeStatus(nodeManager, dn).getHealth().equals(state),
+ 200, 30000);
+ }
+
+ /**
+ * Retrieves the NodeStatus for the given DN or fails the test if the
+ * Node cannot be found. This is a helper method to allow the nodeStatus to be
+ * checked in lambda expressions.
+ * @param dn Datanode for which to retrieve the NodeStatus.
+ */
+ public static NodeStatus getNodeStatus(NodeManager nodeManager,
+ DatanodeDetails dn) {
+ return Assertions.assertDoesNotThrow(
+ () -> nodeManager.getNodeStatus(dn),
+ "Unexpected exception getting the nodeState");
+ }
+
+ /**
+ * Given a Datanode, return a string consisting of the hostname and one of its
+ * ports in the for host:post.
+ * @param dn Datanode for which to retrieve the host:post string
+ * @return host:port for the given DN.
+ */
+ public static String getDNHostAndPort(DatanodeDetails dn) {
+ return dn.getHostName() + ":" + dn.getPorts().get(0).getValue();
+ }
+
+ /**
+ * Wait for the given datanode to reach the given persisted state.
+ * @param dn Datanode for which to check the state
+ * @param state The state to wait for.
+ * @throws TimeoutException
+ * @throws InterruptedException
+ */
+ public static void waitForDnToReachPersistedOpState(DatanodeDetails dn,
+ HddsProtos.NodeOperationalState state)
+ throws TimeoutException, InterruptedException {
+ GenericTestUtils.waitFor(
+ () -> dn.getPersistedOpState().equals(state),
+ 200, 30000);
+ }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestQueryNode.java
similarity index 85%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestQueryNode.java
index 5f36cf53652..e8dc7455a11 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestQueryNode.java
@@ -14,10 +14,9 @@
* License for the specific language governing permissions and limitations under
* the License.
*/
-package org.apache.hadoop.ozone.scm.node;
+package org.apache.hadoop.hdds.scm.node;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -37,33 +36,22 @@
import static java.util.concurrent.TimeUnit.SECONDS;
-import static org.apache.hadoop.hdds.HddsConfigKeys
- .HDDS_HEARTBEAT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys
- .HDDS_PIPELINE_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys
- .HDDS_COMMAND_STATUS_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys
- .HDDS_CONTAINER_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys
- .HDDS_NODE_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_NODE_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.
- NodeOperationalState.IN_SERVICE;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.
- NodeOperationalState.DECOMMISSIONING;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.
- NodeOperationalState.IN_MAINTENANCE;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_DEADNODE_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_STALENODE_INTERVAL;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_MAINTENANCE;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
import static org.junit.jupiter.api.Assertions.assertEquals;
/**
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java
similarity index 95%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/package-info.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java
index 1e03823692f..aa37f6d93a5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/package-info.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java
@@ -21,4 +21,4 @@
/**
* Unit tests for Node related functions in SCM.
*/
-package org.apache.hadoop.ozone.scm.node;
+package org.apache.hadoop.hdds.scm.node;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
index 725b17ee9d6..439b563d633 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdds.scm.pipeline;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
@@ -34,7 +35,6 @@
import java.util.concurrent.TimeUnit;
import java.util.HashMap;
import java.util.Map;
-import java.util.Random;
import java.util.UUID;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE;
@@ -172,7 +172,6 @@ public void testMinLeaderCountChoosePolicy() throws Exception {
// each datanode has leaderNumOfEachDn leaders after balance
checkLeaderBalance(dnNum, leaderNumOfEachDn);
- Random r = new Random(0);
for (int i = 0; i < 10; i++) {
// destroy some pipelines, wait new pipelines created,
// then check leader balance
@@ -181,7 +180,7 @@ public void testMinLeaderCountChoosePolicy() throws Exception {
.getPipelines(RatisReplicationConfig.getInstance(
ReplicationFactor.THREE), Pipeline.PipelineState.OPEN);
- int destroyNum = r.nextInt(pipelines.size());
+ int destroyNum = RandomUtils.nextInt(0, pipelines.size());
for (int k = 0; k <= destroyNum; k++) {
pipelineManager.closePipeline(pipelines.get(k), false);
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java
index 698c6b99bd0..cff6f03c1e7 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java
@@ -17,10 +17,9 @@
*/
package org.apache.hadoop.hdds.scm.pipeline;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -150,10 +149,10 @@ public void testMultiRaft() throws Exception {
private void assertNotSamePeers() {
nodeManager.getAllNodes().forEach((dn) -> {
Collection peers = nodeManager.getPeerList(dn);
- assertFalse(peers.contains(dn));
+ assertThat(peers).doesNotContain(dn);
List trimList = nodeManager.getAllNodes();
trimList.remove(dn);
- assertTrue(peers.containsAll(trimList));
+ assertThat(peers).containsAll(trimList);
});
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
index 2c57ac0a6f4..841d0ef1684 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
@@ -17,9 +17,8 @@
*/
package org.apache.hadoop.hdds.scm.pipeline;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -104,7 +103,7 @@ public void testPipelineMap() throws IOException,
// get pipeline details by dnid
Set pipelines = scm.getScmNodeManager()
.getPipelines(dns.get(0));
- assertTrue(pipelines.contains(ratisContainer.getPipeline().getId()));
+ assertThat(pipelines).contains(ratisContainer.getPipeline().getId());
// Now close the container and it should not show up while fetching
// containers by pipeline
@@ -120,6 +119,6 @@ public void testPipelineMap() throws IOException,
pipelineManager.deletePipeline(ratisContainer.getPipeline().getId());
pipelines = scm.getScmNodeManager()
.getPipelines(dns.get(0));
- assertFalse(pipelines.contains(ratisContainer.getPipeline().getId()));
+ assertThat(pipelines).doesNotContain(ratisContainer.getPipeline().getId());
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
index d46356fb8d4..858a4486757 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
@@ -58,11 +58,10 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.timeout;
@@ -149,8 +148,8 @@ public void testPipelineCloseWithClosedContainer() throws IOException,
pipelineManager.deletePipeline(ratisContainer.getPipeline().getId());
for (DatanodeDetails dn : ratisContainer.getPipeline().getNodes()) {
// Assert that the pipeline has been removed from Node2PipelineMap as well
- assertFalse(scm.getScmNodeManager().getPipelines(dn)
- .contains(ratisContainer.getPipeline().getId()));
+ assertThat(scm.getScmNodeManager().getPipelines(dn))
+ .doesNotContain(ratisContainer.getPipeline().getId());
}
}
@@ -212,7 +211,7 @@ public void testPipelineCloseWithPipelineAction() throws Exception {
}
@Test
- public void testPipelineCloseWithLogFailure()
+ void testPipelineCloseWithLogFailure()
throws IOException, TimeoutException {
EventQueue eventQ = (EventQueue) scm.getEventQueue();
PipelineActionHandler pipelineActionTest =
@@ -230,11 +229,7 @@ public void testPipelineCloseWithLogFailure()
Pipeline openPipeline = containerWithPipeline.getPipeline();
RaftGroupId groupId = RaftGroupId.valueOf(openPipeline.getId().getId());
- try {
- pipelineManager.getPipeline(openPipeline.getId());
- } catch (PipelineNotFoundException e) {
- fail("pipeline should exist");
- }
+ pipelineManager.getPipeline(openPipeline.getId());
DatanodeDetails datanodeDetails = openPipeline.getNodes().get(0);
int index = cluster.getHddsDatanodeIndex(datanodeDetails);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestPipelineManagerMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java
similarity index 98%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestPipelineManagerMXBean.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java
index f1a533bdfdc..b6e85ab942d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestPipelineManagerMXBean.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.hadoop.ozone.scm.pipeline;
+package org.apache.hadoop.hdds.scm.pipeline;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneCluster;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java
similarity index 97%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java
index 29c9392a1d5..25a29410b64 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java
@@ -16,15 +16,13 @@
* limitations under the License.
*/
-package org.apache.hadoop.ozone.scm.pipeline;
+package org.apache.hadoop.hdds.scm.pipeline;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineMetrics;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.ozone.MiniOzoneCluster;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineMetrics.java
similarity index 94%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineMetrics.java
index 85a61e8cb4b..cf41fc60933 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineMetrics.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.hadoop.ozone.scm.pipeline;
+package org.apache.hadoop.hdds.scm.pipeline;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
@@ -24,9 +24,6 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineMetrics;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -77,7 +74,7 @@ public void testPipelineCreation() {
long numPipelineCreated =
getLongCounter("NumPipelineCreated", metrics);
// Pipelines are created in background when the cluster starts.
- assertTrue(numPipelineCreated > 0);
+ assertThat(numPipelineCreated).isGreaterThan(0);
}
/**
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
index d09e924ca81..40a80103934 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
@@ -86,7 +86,7 @@ public void setup(int numDatanodes) throws Exception {
@Test
- public void testScmSafeMode() throws Exception {
+ void testScmSafeMode() throws Exception {
int datanodeCount = 6;
setup(datanodeCount);
waitForRatis3NodePipelines(datanodeCount / 3);
@@ -136,11 +136,7 @@ public void testScmSafeMode() throws Exception {
DatanodeDetails restartedDatanode = pipelineList.get(1).getFirstNode();
// Now restart one datanode from the 2nd pipeline
- try {
- cluster.restartHddsDatanode(restartedDatanode, false);
- } catch (Exception ex) {
- fail("Datanode restart failed");
- }
+ cluster.restartHddsDatanode(restartedDatanode, false);
GenericTestUtils.waitFor(() ->
scmSafeModeManager.getOneReplicaPipelineSafeModeRule()
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java
index 20fa713deb4..563e0162acc 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java
@@ -25,10 +25,10 @@
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
@@ -45,6 +45,7 @@
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
@@ -56,10 +57,11 @@
import static java.util.Collections.singletonList;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
import org.apache.ratis.protocol.exceptions.AlreadyClosedException;
import org.apache.ratis.protocol.exceptions.NotReplicatedException;
@@ -126,21 +128,23 @@ public void init() throws Exception {
ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(10));
conf.setFromObject(ratisClientConfig);
- OzoneClientConfig clientConfig = new OzoneClientConfig();
+ OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
clientConfig.setChecksumType(ChecksumType.NONE);
conf.setFromObject(clientConfig);
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
+ .setBlockSize(blockSize)
+ .setChunkSize(chunkSize)
+ .setStreamBufferFlushSize(flushSize)
+ .setStreamBufferMaxSize(maxFlushSize)
+ .applyTo(conf);
+
conf.setQuietMode(false);
conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4,
StorageUnit.MB);
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(5)
.setTotalPipelineNumLimit(3)
- .setBlockSize(blockSize)
- .setChunkSize(chunkSize)
- .setStreamBufferFlushSize(flushSize)
- .setStreamBufferMaxSize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
.build();
cluster.waitForClusterToBeReady();
//the easiest way to create an open container is creating a key
@@ -178,8 +182,7 @@ public void testReleaseBuffers() throws Exception {
long containerId = container.getContainerInfo().getContainerID();
try (XceiverClientSpi xceiverClient = mgr.acquireClient(pipeline)) {
assertEquals(1, xceiverClient.getRefcount());
- assertTrue(xceiverClient instanceof XceiverClientRatis);
- XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
+ XceiverClientRatis ratisClient = assertInstanceOf(XceiverClientRatis.class, xceiverClient);
CommitWatcher watcher = new CommitWatcher(bufferPool, ratisClient);
BlockID blockID = ContainerTestHelper.getTestBlockID(containerId);
List replies = new ArrayList<>();
@@ -225,17 +228,15 @@ public void testReleaseBuffers() throws Exception {
assertEquals(2, watcher.
getCommitIndexMap().size());
watcher.watchOnFirstIndex();
- assertFalse(watcher.getCommitIndexMap()
- .containsKey(replies.get(0).getLogIndex()));
- assertFalse(watcher.getFutureMap().containsKey((long) chunkSize));
- assertTrue(watcher.getTotalAckDataLength() >= chunkSize);
+ assertThat(watcher.getCommitIndexMap()).doesNotContainKey(replies.get(0).getLogIndex());
+ assertThat(watcher.getFutureMap()).doesNotContainKey((long) chunkSize);
+ assertThat(watcher.getTotalAckDataLength()).isGreaterThanOrEqualTo(chunkSize);
watcher.watchOnLastIndex();
- assertFalse(watcher.getCommitIndexMap()
- .containsKey(replies.get(1).getLogIndex()));
- assertFalse(watcher.getFutureMap().containsKey((long) 2 * chunkSize));
+ assertThat(watcher.getCommitIndexMap()).doesNotContainKey(replies.get(1).getLogIndex());
+ assertThat(watcher.getFutureMap()).doesNotContainKey((long) 2 * chunkSize);
assertEquals(2 * chunkSize, watcher.getTotalAckDataLength());
- assertTrue(watcher.getFutureMap().isEmpty());
- assertTrue(watcher.getCommitIndexMap().isEmpty());
+ assertThat(watcher.getFutureMap()).isEmpty();
+ assertThat(watcher.getCommitIndexMap()).isEmpty();
}
}
}
@@ -252,8 +253,7 @@ public void testReleaseBuffersOnException() throws Exception {
long containerId = container.getContainerInfo().getContainerID();
try (XceiverClientSpi xceiverClient = mgr.acquireClient(pipeline)) {
assertEquals(1, xceiverClient.getRefcount());
- assertTrue(xceiverClient instanceof XceiverClientRatis);
- XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
+ XceiverClientRatis ratisClient = assertInstanceOf(XceiverClientRatis.class, xceiverClient);
CommitWatcher watcher = new CommitWatcher(bufferPool, ratisClient);
BlockID blockID = ContainerTestHelper.getTestBlockID(containerId);
List replies = new ArrayList<>();
@@ -298,32 +298,28 @@ public void testReleaseBuffersOnException() throws Exception {
assertEquals(future2, watcher.getFutureMap().get((long) 2 * chunkSize));
assertEquals(2, watcher.getCommitIndexMap().size());
watcher.watchOnFirstIndex();
- assertFalse(watcher.getCommitIndexMap()
- .containsKey(replies.get(0).getLogIndex()));
- assertFalse(watcher.getFutureMap().containsKey((long) chunkSize));
- assertTrue(watcher.getTotalAckDataLength() >= chunkSize);
+ assertThat(watcher.getCommitIndexMap()).doesNotContainKey(replies.get(0).getLogIndex());
+ assertThat(watcher.getFutureMap()).doesNotContainKey((long) chunkSize);
+ assertThat(watcher.getTotalAckDataLength()).isGreaterThanOrEqualTo(chunkSize);
cluster.shutdownHddsDatanode(pipeline.getNodes().get(0));
cluster.shutdownHddsDatanode(pipeline.getNodes().get(1));
- try {
- // just watch for a higher index so as to ensure, it does an actual
- // call to Ratis. Otherwise, it may just return in case the
- // commitInfoMap is updated to the latest index in putBlock response.
- watcher.watchForCommit(replies.get(1).getLogIndex() + 100);
- fail("Expected exception not thrown");
- } catch (IOException ioe) {
- // with retry count set to noRetry and a lower watch request
- // timeout, watch request will eventually
- // fail with TimeoutIOException from ratis client or the client
- // can itself get AlreadyClosedException from the Ratis Server
- // and the write may fail with RaftRetryFailureException
- Throwable t = HddsClientUtils.checkForException(ioe);
- assertTrue(
- t instanceof RaftRetryFailureException ||
- t instanceof TimeoutIOException ||
- t instanceof AlreadyClosedException ||
- t instanceof NotReplicatedException,
- "Unexpected exception: " + t.getClass());
- }
+ // just watch for a higher index so as to ensure, it does an actual
+ // call to Ratis. Otherwise, it may just return in case the
+ // commitInfoMap is updated to the latest index in putBlock response.
+ IOException ioe =
+ assertThrows(IOException.class, () -> watcher.watchForCommit(replies.get(1).getLogIndex() + 100));
+ Throwable t = HddsClientUtils.checkForException(ioe);
+ // with retry count set to noRetry and a lower watch request
+ // timeout, watch request will eventually
+ // fail with TimeoutIOException from ratis client or the client
+ // can itself get AlreadyClosedException from the Ratis Server
+ // and the write may fail with RaftRetryFailureException
+ assertTrue(
+ t instanceof RaftRetryFailureException ||
+ t instanceof TimeoutIOException ||
+ t instanceof AlreadyClosedException ||
+ t instanceof NotReplicatedException,
+ "Unexpected exception: " + t.getClass());
if (ratisClient.getReplicatedMinCommitIndex() < replies.get(1)
.getLogIndex()) {
assertEquals(chunkSize, watcher.getTotalAckDataLength());
@@ -331,8 +327,8 @@ public void testReleaseBuffersOnException() throws Exception {
assertEquals(1, watcher.getFutureMap().size());
} else {
assertEquals(2 * chunkSize, watcher.getTotalAckDataLength());
- assertTrue(watcher.getFutureMap().isEmpty());
- assertTrue(watcher.getCommitIndexMap().isEmpty());
+ assertThat(watcher.getFutureMap()).isEmpty();
+ assertThat(watcher.getCommitIndexMap()).isEmpty();
}
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
index 8ab74422516..3c980f94c59 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
@@ -112,10 +112,11 @@
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.READ;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.WRITE;
import static org.apache.hadoop.ozone.container.ContainerTestHelper.newWriteChunkRequestBuilder;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* This class tests container commands on EC containers.
@@ -129,8 +130,6 @@ public class TestContainerCommandsEC {
private static ObjectStore store;
private static StorageContainerLocationProtocolClientSideTranslatorPB
storageContainerLocationClient;
- private static final String SCM_ID = UUID.randomUUID().toString();
- private static final String CLUSTER_ID = UUID.randomUUID().toString();
private static final int EC_DATA = 3;
private static final int EC_PARITY = 2;
private static final EcCodec EC_CODEC = EcCodec.RS;
@@ -423,17 +422,17 @@ public void testListBlock() throws Exception {
ListBlockResponseProto response = ContainerProtocolCalls
.listBlock(clients.get(i), containerID, null, Integer.MAX_VALUE,
containerToken);
- assertTrue(
- minNumExpectedBlocks <= response.getBlockDataList().stream().filter(
+ assertThat(minNumExpectedBlocks)
+ .withFailMessage("blocks count should be same or more than min expected" +
+ " blocks count on DN " + i)
+ .isLessThanOrEqualTo(response.getBlockDataList().stream().filter(
k -> k.getChunksCount() > 0 && k.getChunks(0).getLen() > 0)
- .collect(Collectors.toList()).size(),
- "blocks count should be same or more than min expected" +
- " blocks count on DN " + i);
- assertTrue(
- minNumExpectedChunks <= response.getBlockDataList().stream()
- .mapToInt(BlockData::getChunksCount).sum(),
- "chunks count should be same or more than min expected" +
- " chunks count on DN " + i);
+ .collect(Collectors.toList()).size());
+ assertThat(minNumExpectedChunks)
+ .withFailMessage("chunks count should be same or more than min expected" +
+ " chunks count on DN " + i)
+ .isLessThanOrEqualTo(response.getBlockDataList().stream()
+ .mapToInt(BlockData::getChunksCount).sum());
}
}
@@ -797,7 +796,7 @@ private void createKeyAndWriteData(String keyString, OzoneBucket bucket,
try (OzoneOutputStream out = bucket.createKey(keyString, 4096,
new ECReplicationConfig(3, 2, EcCodec.RS, EC_CHUNK_SIZE),
new HashMap<>())) {
- assertTrue(out.getOutputStream() instanceof KeyOutputStream);
+ assertInstanceOf(KeyOutputStream.class, out.getOutputStream());
for (int i = 0; i < numChunks; i++) {
out.write(inputChunks[i]);
}
@@ -925,7 +924,6 @@ public static void startCluster(OzoneConfiguration conf) throws Exception {
secretKeyClient = new SecretKeyTestClient();
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(NUM_DN)
- .setScmId(SCM_ID).setClusterId(CLUSTER_ID)
.setCertificateClient(certClient)
.setSecretKeyClient(secretKeyClient)
.build();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
index caf9cadb165..57e807b7c75 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
@@ -115,7 +115,7 @@ public class TestHDDSUpgrade {
private StorageContainerManager scm;
private ContainerManager scmContainerManager;
private PipelineManager scmPipelineManager;
- private final int numContainersCreated = 1;
+ private static final int NUM_CONTAINERS_CREATED = 1;
private HDDSLayoutVersionManager scmVersionManager;
private AtomicBoolean testPassed = new AtomicBoolean(true);
private static
@@ -316,7 +316,7 @@ public void testFinalizationFromInitialVersionToLatestVersion()
// Verify Post-Upgrade conditions on the SCM.
TestHddsUpgradeUtils.testPostUpgradeConditionsSCM(
cluster.getStorageContainerManagersList(),
- numContainersCreated, NUM_DATA_NODES);
+ NUM_CONTAINERS_CREATED, NUM_DATA_NODES);
// All datanodes on the SCM should have moved to HEALTHY-READONLY state.
TestHddsUpgradeUtils.testDataNodesStateOnSCM(
@@ -327,7 +327,7 @@ public void testFinalizationFromInitialVersionToLatestVersion()
// In the happy path case, no containers should have been quasi closed as
// a result of the upgrade.
TestHddsUpgradeUtils.testPostUpgradeConditionsDataNodes(
- cluster.getHddsDatanodes(), numContainersCreated, CLOSED);
+ cluster.getHddsDatanodes(), NUM_CONTAINERS_CREATED, CLOSED);
// Test that we can use a pipeline after upgrade.
// Will fail with exception if there are no pipelines.
@@ -871,7 +871,7 @@ public void testFinalizationWithFailureInjectionHelper(
// Verify Post-Upgrade conditions on the SCM.
// With failure injection
TestHddsUpgradeUtils.testPostUpgradeConditionsSCM(
- cluster.getStorageContainerManagersList(), numContainersCreated,
+ cluster.getStorageContainerManagersList(), NUM_CONTAINERS_CREATED,
NUM_DATA_NODES);
// All datanodes on the SCM should have moved to HEALTHY-READONLY state.
@@ -898,7 +898,7 @@ public void testFinalizationWithFailureInjectionHelper(
// Verify the SCM has driven all the DataNodes through Layout Upgrade.
TestHddsUpgradeUtils.testPostUpgradeConditionsDataNodes(
- cluster.getHddsDatanodes(), numContainersCreated);
+ cluster.getHddsDatanodes(), NUM_CONTAINERS_CREATED);
// Verify that new pipeline can be created with upgraded datanodes.
try {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java
index 6fc964fd0ab..cd0fd9d4bbe 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java
@@ -49,6 +49,7 @@
import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.OPEN;
import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.ALREADY_FINALIZED;
import static org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.FINALIZATION_DONE;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -117,7 +118,7 @@ public static void testPostUpgradeConditionsSCM(StorageContainerManager scm,
HDDSLayoutVersionManager scmVersionManager = scm.getLayoutVersionManager();
assertEquals(scmVersionManager.getSoftwareLayoutVersion(),
scmVersionManager.getMetadataLayoutVersion());
- assertTrue(scmVersionManager.getMetadataLayoutVersion() >= 1);
+ assertThat(scmVersionManager.getMetadataLayoutVersion()).isGreaterThanOrEqualTo(1);
// SCM should not return from finalization until there is at least one
// pipeline to use.
@@ -147,7 +148,7 @@ public static void testPostUpgradeConditionsSCM(StorageContainerManager scm,
(ciState == HddsProtos.LifeCycleState.QUASI_CLOSED));
countContainers++;
}
- assertTrue(countContainers >= numContainers);
+ assertThat(countContainers).isGreaterThanOrEqualTo(numContainers);
}
/*
@@ -173,7 +174,7 @@ public static void testPreUpgradeConditionsDataNodes(
countContainers++;
}
}
- assertTrue(countContainers >= 1);
+ assertThat(countContainers).isGreaterThanOrEqualTo(1);
}
/*
@@ -217,7 +218,7 @@ public static void testPostUpgradeConditionsDataNodes(
dsm.getLayoutVersionManager();
assertEquals(dnVersionManager.getSoftwareLayoutVersion(),
dnVersionManager.getMetadataLayoutVersion());
- assertTrue(dnVersionManager.getMetadataLayoutVersion() >= 1);
+ assertThat(dnVersionManager.getMetadataLayoutVersion()).isGreaterThanOrEqualTo(1);
// Also verify that all the existing containers are closed.
for (Container> container :
@@ -228,7 +229,7 @@ public static void testPostUpgradeConditionsDataNodes(
countContainers++;
}
}
- assertTrue(countContainers >= numContainers);
+ assertThat(countContainers).isGreaterThanOrEqualTo(numContainers);
}
public static void testDataNodesStateOnSCM(List scms,
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java
index f9f6871f546..d2ae30efceb 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java
@@ -37,7 +37,6 @@
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ozone.test.tag.Flaky;
import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
@@ -56,6 +55,12 @@
import java.util.stream.Stream;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.CLOSED;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
/**
* Tests upgrade finalization failure scenarios and corner cases specific to SCM
@@ -167,7 +172,7 @@ public void testFinalizationWithLeaderChange(
// Make sure the original SCM leader is not the leader anymore.
StorageContainerManager newLeaderScm = cluster.getActiveSCM();
- Assertions.assertNotEquals(newLeaderScm.getSCMNodeId(),
+ assertNotEquals(newLeaderScm.getSCMNodeId(),
oldLeaderScm.getSCMNodeId());
// Resume finalization from the new leader.
@@ -288,8 +293,8 @@ public void testSnapshotFinalization() throws Exception {
inactiveScm, 0, NUM_DATANODES);
// Use log to verify a snapshot was installed.
- Assertions.assertTrue(logCapture.getOutput().contains("New SCM snapshot " +
- "received with metadata layout version"));
+ assertThat(logCapture.getOutput()).contains("New SCM snapshot " +
+ "received with metadata layout version");
}
private void waitForScmsToFinalize(Collection scms)
@@ -319,35 +324,31 @@ private void checkMidFinalizationConditions(
for (StorageContainerManager scm: scms) {
switch (haltingPoint) {
case BEFORE_PRE_FINALIZE_UPGRADE:
- Assertions.assertFalse(
- scm.getPipelineManager().isPipelineCreationFrozen());
- Assertions.assertEquals(
+ assertFalse(scm.getPipelineManager().isPipelineCreationFrozen());
+ assertEquals(
scm.getScmContext().getFinalizationCheckpoint(),
FinalizationCheckpoint.FINALIZATION_REQUIRED);
break;
case AFTER_PRE_FINALIZE_UPGRADE:
- Assertions.assertTrue(
- scm.getPipelineManager().isPipelineCreationFrozen());
- Assertions.assertEquals(
+ assertTrue(scm.getPipelineManager().isPipelineCreationFrozen());
+ assertEquals(
scm.getScmContext().getFinalizationCheckpoint(),
FinalizationCheckpoint.FINALIZATION_STARTED);
break;
case AFTER_COMPLETE_FINALIZATION:
- Assertions.assertFalse(
- scm.getPipelineManager().isPipelineCreationFrozen());
- Assertions.assertEquals(
+ assertFalse(scm.getPipelineManager().isPipelineCreationFrozen());
+ assertEquals(
scm.getScmContext().getFinalizationCheckpoint(),
FinalizationCheckpoint.MLV_EQUALS_SLV);
break;
case AFTER_POST_FINALIZE_UPGRADE:
- Assertions.assertFalse(
- scm.getPipelineManager().isPipelineCreationFrozen());
- Assertions.assertEquals(
+ assertFalse(scm.getPipelineManager().isPipelineCreationFrozen());
+ assertEquals(
scm.getScmContext().getFinalizationCheckpoint(),
FinalizationCheckpoint.FINALIZATION_COMPLETE);
break;
default:
- Assertions.fail("Unknown halting point in test: " + haltingPoint);
+ fail("Unknown halting point in test: " + haltingPoint);
}
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java
index 87fbe23ac76..4197ac8a816 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java
@@ -27,10 +27,10 @@
import org.junit.jupiter.api.Test;
import java.io.IOException;
-import java.util.UUID;
import java.util.concurrent.TimeoutException;
import java.util.function.Supplier;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -49,12 +49,9 @@ public class TestRocksObjectLeakDetector {
static void setUp() throws IOException, InterruptedException,
TimeoutException {
OzoneConfiguration conf = new OzoneConfiguration();
- String clusterId = UUID.randomUUID().toString();
- String scmId = UUID.randomUUID().toString();
+ conf.set(OZONE_METADATA_STORE_ROCKSDB_STATISTICS, "ALL");
String omServiceId = "omServiceId1";
cluster = MiniOzoneCluster.newBuilder(conf)
- .setClusterId(clusterId)
- .setScmId(scmId)
.setOMServiceId(omServiceId)
.setNumOfOzoneManagers(1)
.build();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ClientConfigForTesting.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ClientConfigForTesting.java
new file mode 100644
index 00000000000..d436a65dab2
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ClientConfigForTesting.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone;
+
+import org.apache.hadoop.hdds.conf.MutableConfigurationSource;
+import org.apache.hadoop.hdds.conf.StorageUnit;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE;
+
+/**
+ * Helper for tests that want to set client stream properties.
+ */
+public final class ClientConfigForTesting {
+
+ private int chunkSize = 1024 * 1024;
+ private Long blockSize;
+ private Integer streamBufferSize;
+ private Long streamBufferFlushSize;
+ private Long dataStreamBufferFlushSize;
+ private Long dataStreamWindowSize;
+ private Long streamBufferMaxSize;
+ private Integer dataStreamMinPacketSize;
+ private final StorageUnit unit;
+
+ /**
+ * @param unit Defines the unit in which size properties will be passed to the builder.
+ * All sizes are stored internally converted to {@link StorageUnit#BYTES}.
+ */
+ public static ClientConfigForTesting newBuilder(StorageUnit unit) {
+ return new ClientConfigForTesting(unit);
+ }
+
+ private ClientConfigForTesting(StorageUnit unit) {
+ this.unit = unit;
+ }
+
+ public ClientConfigForTesting setChunkSize(int size) {
+ chunkSize = (int) toBytes(size);
+ return this;
+ }
+
+ public ClientConfigForTesting setBlockSize(long size) {
+ blockSize = toBytes(size);
+ return this;
+ }
+
+ @SuppressWarnings("unused") // kept for completeness
+ public ClientConfigForTesting setStreamBufferSize(int size) {
+ streamBufferSize = (int) toBytes(size);
+ return this;
+ }
+
+ public ClientConfigForTesting setStreamBufferFlushSize(long size) {
+ streamBufferFlushSize = toBytes(size);
+ return this;
+ }
+
+ public ClientConfigForTesting setStreamBufferMaxSize(long size) {
+ streamBufferMaxSize = toBytes(size);
+ return this;
+ }
+
+ public ClientConfigForTesting setDataStreamMinPacketSize(int size) {
+ dataStreamMinPacketSize = (int) toBytes(size);
+ return this;
+ }
+
+ public ClientConfigForTesting setDataStreamBufferFlushSize(long size) {
+ dataStreamBufferFlushSize = toBytes(size);
+ return this;
+ }
+
+ public ClientConfigForTesting setDataStreamWindowSize(long size) {
+ dataStreamWindowSize = toBytes(size);
+ return this;
+ }
+
+ public void applyTo(MutableConfigurationSource conf) {
+ if (streamBufferSize == null) {
+ streamBufferSize = chunkSize;
+ }
+ if (streamBufferFlushSize == null) {
+ streamBufferFlushSize = (long) chunkSize;
+ }
+ if (streamBufferMaxSize == null) {
+ streamBufferMaxSize = 2 * streamBufferFlushSize;
+ }
+ if (dataStreamBufferFlushSize == null) {
+ dataStreamBufferFlushSize = 4L * chunkSize;
+ }
+ if (dataStreamMinPacketSize == null) {
+ dataStreamMinPacketSize = chunkSize / 4;
+ }
+ if (dataStreamWindowSize == null) {
+ dataStreamWindowSize = 8L * chunkSize;
+ }
+ if (blockSize == null) {
+ blockSize = 2 * streamBufferMaxSize;
+ }
+
+ OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
+ clientConfig.setStreamBufferSize(streamBufferSize);
+ clientConfig.setStreamBufferMaxSize(streamBufferMaxSize);
+ clientConfig.setStreamBufferFlushSize(streamBufferFlushSize);
+ clientConfig.setDataStreamBufferFlushSize(dataStreamBufferFlushSize);
+ clientConfig.setDataStreamMinPacketSize(dataStreamMinPacketSize);
+ clientConfig.setStreamWindowSize(dataStreamWindowSize);
+
+ conf.setFromObject(clientConfig);
+ conf.setStorageSize(OZONE_SCM_CHUNK_SIZE_KEY, chunkSize, StorageUnit.BYTES);
+ conf.setStorageSize(OZONE_SCM_BLOCK_SIZE, blockSize, StorageUnit.BYTES);
+ }
+
+ private long toBytes(long value) {
+ return Math.round(unit.toBytes(value));
+ }
+
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index 458a0ca891f..e864cae00b3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -20,11 +20,9 @@
import java.io.IOException;
import java.util.List;
import java.util.Optional;
-import java.util.OptionalInt;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -319,23 +317,12 @@ abstract class Builder {
protected int numOfActiveSCMs = ACTIVE_SCMS_NOT_SET;
protected SCMConfigurator scmConfigurator;
- protected Optional enableTrace = Optional.of(false);
protected Optional hbInterval = Optional.empty();
protected Optional hbProcessorInterval = Optional.empty();
- protected Optional scmId = Optional.empty();
- protected Optional omId = Optional.empty();
+ protected String scmId = UUID.randomUUID().toString();
+ protected String omId = UUID.randomUUID().toString();
- protected Boolean enableContainerDatastream = true;
protected Optional datanodeReservedSpace = Optional.empty();
- protected Optional chunkSize = Optional.empty();
- protected OptionalInt streamBufferSize = OptionalInt.empty();
- protected Optional streamBufferFlushSize = Optional.empty();
- protected Optional dataStreamBufferFlushSize = Optional.empty();
- protected Optional datastreamWindowSize = Optional.empty();
- protected Optional streamBufferMaxSize = Optional.empty();
- protected OptionalInt dataStreamMinPacketSize = OptionalInt.empty();
- protected Optional blockSize = Optional.empty();
- protected Optional streamBufferSizeUnit = Optional.empty();
protected boolean includeRecon = false;
@@ -343,9 +330,6 @@ abstract class Builder {
protected Optional scmLayoutVersion = Optional.empty();
protected Optional dnLayoutVersion = Optional.empty();
- // Use relative smaller number of handlers for testing
- protected int numOfOmHandlers = 20;
- protected int numOfScmHandlers = 20;
protected int numOfDatanodes = 3;
protected int numDataVolumes = 1;
protected boolean startDataNodes = true;
@@ -375,14 +359,11 @@ public Builder setSCMConfigurator(SCMConfigurator configurator) {
* Sets the cluster Id.
*
* @param id cluster Id
- *
- * @return MiniOzoneCluster.Builder
*/
- public Builder setClusterId(String id) {
+ void setClusterId(String id) {
clusterId = id;
path = GenericTestUtils.getTempPath(
MiniOzoneClusterImpl.class.getSimpleName() + "-" + clusterId);
- return this;
}
/**
@@ -418,30 +399,6 @@ public Builder setSecretKeyClient(SecretKeyClient client) {
return this;
}
- /**
- * Sets the SCM id.
- *
- * @param id SCM Id
- *
- * @return MiniOzoneCluster.Builder
- */
- public Builder setScmId(String id) {
- scmId = Optional.of(id);
- return this;
- }
-
- /**
- * Sets the OM id.
- *
- * @param id OM Id
- *
- * @return MiniOzoneCluster.Builder
- */
- public Builder setOmId(String id) {
- omId = Optional.of(id);
- return this;
- }
-
/**
* Sets the number of HddsDatanodes to be started as part of
* MiniOzoneCluster.
@@ -503,18 +460,6 @@ public Builder setHbProcessorInterval(int val) {
return this;
}
- /**
- * When set to true, enables trace level logging.
- *
- * @param trace true or false
- *
- * @return MiniOzoneCluster.Builder
- */
- public Builder setTrace(Boolean trace) {
- enableTrace = Optional.of(trace);
- return this;
- }
-
/**
* Sets the reserved space
* {@link org.apache.hadoop.hdds.scm.ScmConfigKeys}
@@ -533,66 +478,6 @@ public Builder setDatanodeReservedSpace(String reservedSpace) {
return this;
}
- /**
- * Sets the chunk size.
- *
- * @return MiniOzoneCluster.Builder
- */
- public Builder setChunkSize(int size) {
- chunkSize = Optional.of(size);
- return this;
- }
-
- public Builder setStreamBufferSize(int size) {
- streamBufferSize = OptionalInt.of(size);
- return this;
- }
-
- /**
- * Sets the flush size for stream buffer.
- *
- * @return MiniOzoneCluster.Builder
- */
- public Builder setStreamBufferFlushSize(long size) {
- streamBufferFlushSize = Optional.of(size);
- return this;
- }
-
- /**
- * Sets the max size for stream buffer.
- *
- * @return MiniOzoneCluster.Builder
- */
- public Builder setStreamBufferMaxSize(long size) {
- streamBufferMaxSize = Optional.of(size);
- return this;
- }
-
- public Builder setDataStreamBufferFlushize(long size) {
- dataStreamBufferFlushSize = Optional.of(size);
- return this;
- }
-
- public Builder setDataStreamMinPacketSize(int size) {
- dataStreamMinPacketSize = OptionalInt.of(size);
- return this;
- }
-
- public Builder setDataStreamStreamWindowSize(long size) {
- datastreamWindowSize = Optional.of(size);
- return this;
- }
-
- /**
- * Sets the block size for stream buffer.
- *
- * @return MiniOzoneCluster.Builder
- */
- public Builder setBlockSize(long size) {
- blockSize = Optional.of(size);
- return this;
- }
-
public Builder setNumOfOzoneManagers(int numOMs) {
this.numOfOMs = numOMs;
return this;
@@ -603,11 +488,6 @@ public Builder setNumOfActiveOMs(int numActiveOMs) {
return this;
}
- public Builder setStreamBufferSizeUnit(StorageUnit unit) {
- this.streamBufferSizeUnit = Optional.of(unit);
- return this;
- }
-
public Builder setOMServiceId(String serviceId) {
this.omServiceId = serviceId;
return this;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 24a3ff84f1c..400ae3ee2cc 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -26,8 +26,6 @@
import java.util.Collection;
import java.util.Collections;
import java.util.List;
-import java.util.Optional;
-import java.util.OptionalInt;
import java.util.Set;
import java.util.UUID;
import java.util.Iterator;
@@ -36,7 +34,6 @@
import java.util.concurrent.TimeoutException;
import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
@@ -45,7 +42,6 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.OzoneClientConfig;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.HddsTestUtils;
import org.apache.hadoop.hdds.scm.ha.SCMHANodeDetails;
@@ -108,7 +104,6 @@
import org.hadoop.ozone.recon.codegen.ReconSqlDbConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.slf4j.event.Level;
/**
* MiniOzoneCluster creates a complete in-process Ozone cluster suitable for
@@ -656,58 +651,7 @@ protected void initializeConfiguration() throws IOException {
Files.createDirectories(metaDir);
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString());
// conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
- if (!chunkSize.isPresent()) {
- //set it to 1MB by default in tests
- chunkSize = Optional.of(1);
- }
- if (!streamBufferSize.isPresent()) {
- streamBufferSize = OptionalInt.of(chunkSize.get());
- }
- if (!streamBufferFlushSize.isPresent()) {
- streamBufferFlushSize = Optional.of((long) chunkSize.get());
- }
- if (!streamBufferMaxSize.isPresent()) {
- streamBufferMaxSize = Optional.of(2 * streamBufferFlushSize.get());
- }
- if (!dataStreamBufferFlushSize.isPresent()) {
- dataStreamBufferFlushSize = Optional.of((long) 4 * chunkSize.get());
- }
- if (!dataStreamMinPacketSize.isPresent()) {
- dataStreamMinPacketSize = OptionalInt.of(chunkSize.get() / 4);
- }
- if (!datastreamWindowSize.isPresent()) {
- datastreamWindowSize = Optional.of((long) 8 * chunkSize.get());
- }
- if (!blockSize.isPresent()) {
- blockSize = Optional.of(2 * streamBufferMaxSize.get());
- }
- if (!streamBufferSizeUnit.isPresent()) {
- streamBufferSizeUnit = Optional.of(StorageUnit.MB);
- }
-
- OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
- clientConfig.setStreamBufferSize(
- (int) Math.round(
- streamBufferSizeUnit.get().toBytes(streamBufferSize.getAsInt())));
- clientConfig.setStreamBufferMaxSize(Math.round(
- streamBufferSizeUnit.get().toBytes(streamBufferMaxSize.get())));
- clientConfig.setStreamBufferFlushSize(Math.round(
- streamBufferSizeUnit.get().toBytes(streamBufferFlushSize.get())));
- clientConfig.setDataStreamBufferFlushSize(Math.round(
- streamBufferSizeUnit.get().toBytes(dataStreamBufferFlushSize.get())));
- clientConfig.setDataStreamMinPacketSize((int) Math.round(
- streamBufferSizeUnit.get()
- .toBytes(dataStreamMinPacketSize.getAsInt())));
- clientConfig.setStreamWindowSize(Math.round(
- streamBufferSizeUnit.get().toBytes(datastreamWindowSize.get())));
- conf.setFromObject(clientConfig);
-
- conf.setStorageSize(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY,
- chunkSize.get(), streamBufferSizeUnit.get());
-
- conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, blockSize.get(),
- streamBufferSizeUnit.get());
// MiniOzoneCluster should have global pipeline upper limit.
conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT,
pipelineNumLimit >= DEFAULT_PIPELINE_LIMIT ?
@@ -722,7 +666,6 @@ protected void initializeConfiguration() throws IOException {
// pipeline.
conf.setInt(HddsConfigKeys.HDDS_SCM_SAFEMODE_MIN_DATANODE,
numOfDatanodes >= 3 ? 3 : 1);
- configureTrace();
}
void removeConfiguration() {
@@ -764,10 +707,7 @@ protected void initializeScmStorage(SCMStorageConfig scmStore)
return;
}
scmStore.setClusterId(clusterId);
- if (!scmId.isPresent()) {
- scmId = Optional.of(UUID.randomUUID().toString());
- }
- scmStore.setScmId(scmId.get());
+ scmStore.setScmId(scmId);
scmStore.initialize();
//TODO: HDDS-6897
//Disabling Ratis for only of MiniOzoneClusterImpl.
@@ -777,7 +717,7 @@ protected void initializeScmStorage(SCMStorageConfig scmStore)
&& SCMHAUtils.isSCMHAEnabled(conf)) {
scmStore.setSCMHAFlag(true);
scmStore.persistCurrentState();
- SCMRatisServerImpl.initialize(clusterId, scmId.get(),
+ SCMRatisServerImpl.initialize(clusterId, scmId,
SCMHANodeDetails.loadSCMHAConfig(conf, scmStore)
.getLocalNodeDetails(), conf);
}
@@ -788,10 +728,10 @@ void initializeOmStorage(OMStorage omStorage) throws IOException {
return;
}
omStorage.setClusterId(clusterId);
- omStorage.setOmId(omId.orElse(UUID.randomUUID().toString()));
+ omStorage.setOmId(omId);
// Initialize ozone certificate client if security is enabled.
if (OzoneSecurityUtil.isSecurityEnabled(conf)) {
- OzoneManager.initializeSecurity(conf, omStorage, scmId.get());
+ OzoneManager.initializeSecurity(conf, omStorage, scmId);
}
omStorage.initialize();
}
@@ -838,7 +778,6 @@ protected String getSCMAddresses(List scms) {
protected List createHddsDatanodes(
List scms, ReconServer reconServer)
throws IOException {
- configureHddsDatanodes();
String scmAddress = getSCMAddresses(scms);
String[] args = new String[] {};
conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, scmAddress);
@@ -909,7 +848,6 @@ protected void configureSCM() {
localhostWithFreePort());
conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY,
localhostWithFreePort());
- conf.setInt(ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY, numOfScmHandlers);
conf.set(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT,
"3s");
configureSCMheartbeat();
@@ -944,12 +882,6 @@ private void configureOM() {
conf.set(OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY,
localhostWithFreePort());
conf.setInt(OMConfigKeys.OZONE_OM_RATIS_PORT_KEY, getFreePort());
- conf.setInt(OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY, numOfOmHandlers);
- }
-
- private void configureHddsDatanodes() {
- conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED,
- enableContainerDatastream);
}
protected void configureDatanodePorts(ConfigurationTarget conf) {
@@ -967,15 +899,6 @@ protected void configureDatanodePorts(ConfigurationTarget conf) {
conf.setFromObject(new ReplicationConfig().setPort(getFreePort()));
}
- private void configureTrace() {
- if (enableTrace.isPresent()) {
- conf.setBoolean(OzoneConfigKeys.OZONE_TRACE_ENABLED_KEY,
- enableTrace.get());
- GenericTestUtils.setRootLogLevel(Level.TRACE);
- }
- GenericTestUtils.setRootLogLevel(Level.INFO);
- }
-
protected void configureRecon() throws IOException {
ConfigurationProvider.resetConfiguration();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
index d79f28ba158..797a7515f20 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java
@@ -170,18 +170,14 @@ public StorageContainerManager getScmLeader() {
.findFirst().orElse(null);
}
- private OzoneManager getOMLeader(boolean waitForLeaderElection)
+ public OzoneManager waitForLeaderOM()
throws TimeoutException, InterruptedException {
- if (waitForLeaderElection) {
- final OzoneManager[] om = new OzoneManager[1];
- GenericTestUtils.waitFor(() -> {
- om[0] = getOMLeader();
- return om[0] != null;
- }, 200, waitForClusterToBeReadyTimeout);
- return om[0];
- } else {
- return getOMLeader();
- }
+ final OzoneManager[] om = new OzoneManager[1];
+ GenericTestUtils.waitFor(() -> {
+ om[0] = getOMLeader();
+ return om[0] != null;
+ }, 200, waitForClusterToBeReadyTimeout);
+ return om[0];
}
/**
@@ -431,7 +427,6 @@ public MiniOzoneCluster build() throws IOException {
protected void initOMRatisConf() {
conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true);
- conf.setInt(OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY, numOfOmHandlers);
// If test change the following config values we will respect,
// otherwise we will set lower timeout values.
@@ -731,7 +726,7 @@ public void bootstrapOzoneManager(String omNodeId,
int retryCount = 0;
OzoneManager om = null;
- OzoneManager omLeader = getOMLeader(true);
+ OzoneManager omLeader = waitForLeaderOM();
long leaderSnapshotIndex = omLeader.getRatisSnapshotIndex();
while (true) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
index c4b027074ff..884e435d25e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
@@ -35,8 +35,10 @@
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ozone.test.LambdaTestUtils.VoidCallable;
import org.apache.ratis.util.function.CheckedConsumer;
-import org.junit.jupiter.api.Assertions;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
/**
* Helper class for Tests.
@@ -92,7 +94,7 @@ public static void closeContainers(
.updateContainerState(ContainerID.valueOf(blockID.getContainerID()),
HddsProtos.LifeCycleEvent.CLOSE);
}
- Assertions.assertFalse(scm.getContainerManager()
+ assertFalse(scm.getContainerManager()
.getContainer(ContainerID.valueOf(blockID.getContainerID()))
.isOpen());
}, omKeyLocationInfoGroups);
@@ -140,14 +142,10 @@ public static void performOperationOnKeyContainers(
public static void expectOmException(
OMException.ResultCodes code,
- VoidCallable eval)
- throws Exception {
- try {
- eval.call();
- Assertions.fail("OMException is expected");
- } catch (OMException ex) {
- Assertions.assertEquals(code, ex.getResult());
- }
+ VoidCallable eval) {
+
+ OMException ex = assertThrows(OMException.class, () -> eval.call(), "OMException is expected");
+ assertEquals(code, ex.getResult());
}
/**
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
index 3f94387e1c6..5338cb8a0cc 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
@@ -18,87 +18,31 @@
package org.apache.hadoop.ozone;
-import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.ratis.RatisHelper;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.client.rpc.RpcClient;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-
import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis;
import org.apache.ratis.client.RaftClient;
import org.apache.ratis.protocol.RaftPeer;
import org.apache.ratis.rpc.RpcType;
-import org.apache.ratis.rpc.SupportedRpcType;
import org.apache.ratis.server.RaftServer;
import org.apache.ratis.statemachine.StateMachine;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+
/**
* Helpers for Ratis tests.
*/
public interface RatisTestHelper {
Logger LOG = LoggerFactory.getLogger(RatisTestHelper.class);
- /** For testing Ozone with Ratis. */
- class RatisTestSuite implements Closeable {
- static final RpcType RPC = SupportedRpcType.GRPC;
- static final int NUM_DATANODES = 3;
-
- private final OzoneConfiguration conf;
- private final MiniOzoneCluster cluster;
-
- /**
- * Create a {@link MiniOzoneCluster} for testing by setting.
- * OZONE_ENABLED = true
- * RATIS_ENABLED = true
- */
- public RatisTestSuite()
- throws IOException, TimeoutException, InterruptedException {
- conf = newOzoneConfiguration(RPC);
-
- cluster = newMiniOzoneCluster(NUM_DATANODES, conf);
- }
-
- public OzoneConfiguration getConf() {
- return conf;
- }
-
- public MiniOzoneCluster getCluster() {
- return cluster;
- }
-
- public ClientProtocol newOzoneClient()
- throws IOException {
- return new RpcClient(conf, null);
- }
-
- @Override
- public void close() {
- cluster.shutdown();
- }
-
- public int getDatanodeOzoneRestPort() {
- return cluster.getHddsDatanodes().get(0).getDatanodeDetails()
- .getPort(DatanodeDetails.Port.Name.REST).getValue();
- }
- }
-
- static OzoneConfiguration newOzoneConfiguration(RpcType rpc) {
- final OzoneConfiguration conf = new OzoneConfiguration();
- initRatisConf(rpc, conf);
- return conf;
- }
-
static void initRatisConf(RpcType rpc, OzoneConfiguration conf) {
conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true);
conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, rpc.name());
@@ -108,17 +52,6 @@ static void initRatisConf(RpcType rpc, OzoneConfiguration conf) {
rpc.name());
}
- static MiniOzoneCluster newMiniOzoneCluster(
- int numDatanodes, OzoneConfiguration conf)
- throws IOException, TimeoutException, InterruptedException {
- final MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
- .setHbInterval(1000)
- .setHbProcessorInterval(1000)
- .setNumDatanodes(numDatanodes).build();
- cluster.waitForClusterToBeReady();
- return cluster;
- }
-
static void initXceiverServerRatis(
RpcType rpc, DatanodeDetails dd, Pipeline pipeline) throws IOException {
final RaftPeer p = RatisHelper.toRaftPeer(dd);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java
index 9cf4f53a68f..a04c1236186 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java
@@ -116,8 +116,6 @@ public final class TestBlockTokens {
private static File testUserKeytab;
private static String testUserPrincipal;
private static String host;
- private static String clusterId;
- private static String scmId;
private static MiniOzoneHAClusterImpl cluster;
private static OzoneClient client;
private static BlockInputStreamFactory blockInputStreamFactory =
@@ -132,8 +130,6 @@ public static void init() throws Exception {
workDir =
GenericTestUtils.getTestDir(TestBlockTokens.class.getSimpleName());
- clusterId = UUID.randomUUID().toString();
- scmId = UUID.randomUUID().toString();
startMiniKdc();
setSecureConfig();
@@ -383,9 +379,7 @@ private static void startCluster()
throws IOException, TimeoutException, InterruptedException {
OzoneManager.setTestSecureOmFlag(true);
MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf)
- .setClusterId(clusterId)
.setSCMServiceId("TestSecretKey")
- .setScmId(scmId)
.setNumDatanodes(3)
.setNumOfStorageContainerManagers(3)
.setNumOfOzoneManagers(1);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java
index 9a98a0a1897..a181a6f45e9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java
@@ -47,7 +47,6 @@
import java.time.Instant;
import java.util.Map;
import java.util.Properties;
-import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
@@ -71,11 +70,13 @@
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY;
import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
-import org.junit.jupiter.api.Assertions;
+
/**
* Integration test class to verify block token CLI commands functionality in a
* secure cluster.
@@ -92,8 +93,6 @@ public final class TestBlockTokensCLI {
private static File ozoneKeytab;
private static File spnegoKeytab;
private static String host;
- private static String clusterId;
- private static String scmId;
private static String omServiceId;
private static String scmServiceId;
private static MiniOzoneHAClusterImpl cluster;
@@ -108,8 +107,6 @@ public static void init() throws Exception {
workDir =
GenericTestUtils.getTestDir(TestBlockTokens.class.getSimpleName());
- clusterId = UUID.randomUUID().toString();
- scmId = UUID.randomUUID().toString();
omServiceId = "om-service-test";
scmServiceId = "scm-service-test";
@@ -264,7 +261,7 @@ public void testRotateKeySCMAdminCommandUtil(String[] args) {
// rotating.
String currentKey =
getScmSecretKeyManager().getCurrentSecretKey().toString();
- Assertions.assertEquals(initialKey, currentKey);
+ assertEquals(initialKey, currentKey);
// Rotate the secret key.
ozoneAdmin.execute(args);
@@ -280,9 +277,9 @@ public void testRotateKeySCMAdminCommandUtil(String[] args) {
// Otherwise, both keys should be the same.
if (isForceFlagPresent(args) ||
shouldRotate(getScmSecretKeyManager().getCurrentSecretKey())) {
- Assertions.assertNotEquals(initialKey, newKey);
+ assertNotEquals(initialKey, newKey);
} else {
- Assertions.assertEquals(initialKey, newKey);
+ assertEquals(initialKey, newKey);
}
}
@@ -322,10 +319,8 @@ private static void startCluster()
throws IOException, TimeoutException, InterruptedException {
OzoneManager.setTestSecureOmFlag(true);
MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf)
- .setClusterId(clusterId)
.setSCMServiceId(scmServiceId)
.setOMServiceId(omServiceId)
- .setScmId(scmId)
.setNumDatanodes(3)
.setNumOfStorageContainerManagers(3)
.setNumOfOzoneManagers(3);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
index 56e744886b8..5f8f34a2e3c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
@@ -40,8 +40,11 @@
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
-import org.junit.jupiter.api.Assertions;
+
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
@@ -91,15 +94,10 @@ public void testCreate() throws Exception {
*/
@Test
public void testGetPipeline() throws Exception {
- try {
- storageClient.getPipeline(PipelineID.randomId().getProtobuf());
- Assertions.fail("Get Pipeline should fail");
- } catch (Exception e) {
- assertTrue(
- SCMHAUtils.unwrapException(e) instanceof PipelineNotFoundException);
- }
-
- Assertions.assertFalse(storageClient.listPipelines().isEmpty());
+ Exception e =
+ assertThrows(Exception.class, () -> storageClient.getPipeline(PipelineID.randomId().getProtobuf()));
+ assertInstanceOf(PipelineNotFoundException.class, SCMHAUtils.unwrapException(e));
+ assertThat(storageClient.listPipelines()).isNotEmpty();
}
@Test
@@ -154,8 +152,7 @@ public void testDatanodeUsageInfoContainerCount() throws IOException {
dn.getIpAddress(), dn.getUuidString());
assertEquals(1, usageInfoList.size());
- assertTrue(usageInfoList.get(0).getContainerCount() >= 0 &&
- usageInfoList.get(0).getContainerCount() <= 1);
+ assertThat(usageInfoList.get(0).getContainerCount()).isGreaterThanOrEqualTo(0).isLessThanOrEqualTo(1);
totalContainerCount[(int)usageInfoList.get(0).getContainerCount()]++;
}
assertEquals(2, totalContainerCount[0]);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
index 6e76a86dd94..c727ecd0a9d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
@@ -41,8 +41,6 @@
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
-import org.junit.jupiter.api.Assertions;
-import java.io.IOException;
import java.util.concurrent.TimeUnit;
import static java.nio.charset.StandardCharsets.UTF_8;
@@ -80,43 +78,39 @@ public static void shutdown() {
}
@Test
- public void testContainerStateMachineIdempotency() throws Exception {
+ void testContainerStateMachineIdempotency() throws Exception {
ContainerWithPipeline container = storageContainerLocationClient
.allocateContainer(HddsProtos.ReplicationType.RATIS,
HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
long containerID = container.getContainerInfo().getContainerID();
Pipeline pipeline = container.getPipeline();
XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline);
- try {
- //create the container
- ContainerProtocolCalls.createContainer(client, containerID, null);
- // call create Container again
- BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
- byte[] data =
- RandomStringUtils.random(RandomUtils.nextInt(0, 1024))
- .getBytes(UTF_8);
- ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
- ContainerTestHelper
- .getWriteChunkRequest(container.getPipeline(), blockID,
- data.length);
- client.sendCommand(writeChunkRequest);
+ //create the container
+ ContainerProtocolCalls.createContainer(client, containerID, null);
+ // call create Container again
+ BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
+ byte[] data =
+ RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes(UTF_8);
+ ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
+ ContainerTestHelper
+ .getWriteChunkRequest(container.getPipeline(), blockID,
+ data.length);
+ client.sendCommand(writeChunkRequest);
- //Make the write chunk request again without requesting for overWrite
- client.sendCommand(writeChunkRequest);
- // Now, explicitly make a putKey request for the block.
- ContainerProtos.ContainerCommandRequestProto putKeyRequest =
- ContainerTestHelper
- .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
- client.sendCommand(putKeyRequest).getPutBlock();
- // send the putBlock again
- client.sendCommand(putKeyRequest);
+ //Make the write chunk request again without requesting for overWrite
+ client.sendCommand(writeChunkRequest);
+ // Now, explicitly make a putKey request for the block.
+ ContainerProtos.ContainerCommandRequestProto putKeyRequest =
+ ContainerTestHelper
+ .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
+ client.sendCommand(putKeyRequest).getPutBlock();
+ // send the putBlock again
+ client.sendCommand(putKeyRequest);
+
+ // close container call
+ ContainerProtocolCalls.closeContainer(client, containerID, null);
+ ContainerProtocolCalls.closeContainer(client, containerID, null);
- // close container call
- ContainerProtocolCalls.closeContainer(client, containerID, null);
- ContainerProtocolCalls.closeContainer(client, containerID, null);
- } catch (IOException ioe) {
- Assertions.fail("Container operation failed" + ioe);
- }
xceiverClientManager.releaseClient(client, false);
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestCpuMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestCpuMetrics.java
index c70d4da4baa..e49a378a15c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestCpuMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestCpuMetrics.java
@@ -19,7 +19,7 @@
package org.apache.hadoop.ozone;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY;
-import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.assertj.core.api.Assertions.assertThat;
import java.io.IOException;
import java.util.concurrent.TimeoutException;
@@ -67,15 +67,12 @@ public void testCpuMetrics() throws IOException {
String metricsResponseBodyContent = metricsResponse.body().string();
// then
- assertTrue(metricsResponseBodyContent
- .contains("jvm_metrics_cpu_available_processors"),
- metricsResponseBodyContent);
- assertTrue(metricsResponseBodyContent
- .contains("jvm_metrics_cpu_system_load"),
- metricsResponseBodyContent);
- assertTrue(metricsResponseBodyContent
- .contains("jvm_metrics_cpu_jvm_load"),
- metricsResponseBodyContent);
+ assertThat(metricsResponseBodyContent)
+ .contains("jvm_metrics_cpu_available_processors");
+ assertThat(metricsResponseBodyContent)
+ .contains("jvm_metrics_cpu_system_load");
+ assertThat(metricsResponseBodyContent)
+ .contains("jvm_metrics_cpu_jvm_load");
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java
index da806ac2a3e..a82a1a8be70 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java
@@ -86,11 +86,11 @@
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND;
import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertFalse;
+
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.slf4j.event.Level.INFO;
import org.junit.jupiter.api.AfterEach;
@@ -323,8 +323,8 @@ public void testDelegationToken(boolean useIp) throws Exception {
RandomStringUtils.randomAscii(5));
// Assert if auth was successful via Kerberos
- assertFalse(logs.getOutput().contains(
- "Auth successful for " + username + " (auth:KERBEROS)"));
+ assertThat(logs.getOutput()).doesNotContain(
+ "Auth successful for " + username + " (auth:KERBEROS)");
// Case 1: Test successful delegation token.
Token token = omClient
@@ -332,7 +332,7 @@ public void testDelegationToken(boolean useIp) throws Exception {
// Case 2: Test successful token renewal.
long renewalTime = omClient.renewDelegationToken(token);
- assertTrue(renewalTime > 0);
+ assertThat(renewalTime).isGreaterThan(0);
// Check if token is of right kind and renewer is running om instance
assertNotNull(token);
@@ -358,13 +358,12 @@ public void testDelegationToken(boolean useIp) throws Exception {
});
// Case 3: Test Client can authenticate using token.
- assertFalse(logs.getOutput().contains(
- "Auth successful for " + username + " (auth:TOKEN)"));
+ assertThat(logs.getOutput()).doesNotContain(
+ "Auth successful for " + username + " (auth:TOKEN)");
OzoneTestUtils.expectOmException(VOLUME_NOT_FOUND,
() -> omClient.deleteVolume("vol1"));
- assertTrue(logs.getOutput().contains("Auth successful for "
- + username + " (auth:TOKEN)"),
- "Log file doesn't contain successful auth for user " + username);
+ assertThat(logs.getOutput())
+ .contains("Auth successful for " + username + " (auth:TOKEN)");
// Case 4: Test failure of token renewal.
// Call to renewDelegationToken will fail but it will confirm that
@@ -374,8 +373,8 @@ public void testDelegationToken(boolean useIp) throws Exception {
OMException ex = assertThrows(OMException.class,
() -> omClient.renewDelegationToken(token));
assertEquals(INVALID_AUTH_METHOD, ex.getResult());
- assertTrue(logs.getOutput().contains(
- "Auth successful for " + username + " (auth:TOKEN)"));
+ assertThat(logs.getOutput()).contains(
+ "Auth successful for " + username + " (auth:TOKEN)");
omLogs.clearOutput();
//testUser.setAuthenticationMethod(AuthMethod.KERBEROS);
omClient.close();
@@ -391,7 +390,7 @@ public void testDelegationToken(boolean useIp) throws Exception {
// Wait for client to timeout
Thread.sleep(CLIENT_TIMEOUT);
- assertFalse(logs.getOutput().contains("Auth failed for"));
+ assertThat(logs.getOutput()).doesNotContain("Auth failed for");
// Case 6: Test failure of token cancellation.
// Get Om client, this time authentication using Token will fail as
@@ -402,8 +401,8 @@ public void testDelegationToken(boolean useIp) throws Exception {
ex = assertThrows(OMException.class,
() -> omClient.cancelDelegationToken(token));
assertEquals(TOKEN_ERROR_OTHER, ex.getResult());
- assertTrue(ex.getMessage().contains("Cancel delegation token failed"));
- assertTrue(logs.getOutput().contains("Auth failed for"));
+ assertThat(ex.getMessage()).contains("Cancel delegation token failed");
+ assertThat(logs.getOutput()).contains("Auth failed for");
} finally {
om.stop();
om.join();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
index b8cf7906c0a..74d52c4a945 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
@@ -109,8 +109,8 @@ public void testStartMultipleDatanodes() throws Exception {
}
@Test
- public void testContainerRandomPort() throws IOException {
- OzoneConfiguration ozoneConf = SCMTestUtils.getConf();
+ void testContainerRandomPort(@TempDir File tempDir) throws IOException {
+ OzoneConfiguration ozoneConf = SCMTestUtils.getConf(tempDir);
// Each instance of SM will create an ozone container
// that bounds to a random port.
@@ -233,7 +233,6 @@ public void testDNstartAfterSCM() throws Exception {
EndpointStateMachine.EndPointStates.GETVERSION,
endpoint.getState());
}
- Thread.sleep(1000);
}
// DN should successfully register with the SCM after SCM is restarted.
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java
index a3e0be5a85d..0c51ba41311 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java
@@ -22,19 +22,19 @@
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.ozone.test.GenericTestUtils;
-import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicReference;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
-import org.junit.jupiter.api.Assertions;
/**
* This class tests MiniOzoneHAClusterImpl.
@@ -44,28 +44,22 @@ public class TestMiniOzoneOMHACluster {
private MiniOzoneHAClusterImpl cluster = null;
private OzoneConfiguration conf;
- private String clusterId;
- private String scmId;
private String omServiceId;
private int numOfOMs = 3;
/**
* Create a MiniOzoneHAClusterImpl for testing.
*
- * @throws IOException
+ * @throws Exception
*/
@BeforeEach
public void init() throws Exception {
conf = new OzoneConfiguration();
- clusterId = UUID.randomUUID().toString();
- scmId = UUID.randomUUID().toString();
omServiceId = "omServiceId1";
conf.setBoolean(OZONE_ACL_ENABLED, true);
conf.set(OzoneConfigKeys.OZONE_ADMINISTRATORS,
OZONE_ADMINISTRATORS_WILDCARD);
cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf)
- .setClusterId(clusterId)
- .setScmId(scmId)
.setOMServiceId(omServiceId)
.setNumOfOzoneManagers(numOfOMs)
.build();
@@ -91,9 +85,8 @@ public void testGetOMLeader() throws InterruptedException, TimeoutException {
ozoneManager.set(om);
return om != null;
}, 100, 120000);
- Assertions.assertNotNull(ozoneManager, "Timed out waiting OM leader election to finish: "
+ assertNotNull(ozoneManager, "Timed out waiting OM leader election to finish: "
+ "no leader or more than one leader.");
- Assertions.assertTrue(ozoneManager.get().isLeaderReady(),
- "Should have gotten the leader!");
+ assertTrue(ozoneManager.get().isLeaderReady(), "Should have gotten the leader!");
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java
index cec90067dac..852f351ee25 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.ozone;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.conf.DefaultConfigManager;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.IOUtils;
@@ -31,7 +32,6 @@
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
-import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -42,9 +42,7 @@
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.io.ByteArrayInputStream;
-import java.security.SecureRandom;
import java.util.ArrayList;
-import java.util.UUID;
import java.util.List;
import java.util.Base64;
import java.util.concurrent.TimeoutException;
@@ -53,6 +51,7 @@
import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
@@ -64,8 +63,6 @@ public class TestMultipartObjectGet {
public static final Logger LOG = LoggerFactory.getLogger(
TestMultipartObjectGet.class);
private static OzoneConfiguration conf;
- private static String clusterId;
- private static String scmId;
private static String omServiceId;
private static String scmServiceId;
private static final String BUCKET = OzoneConsts.BUCKET;
@@ -80,8 +77,6 @@ public class TestMultipartObjectGet {
@BeforeAll
public static void init() throws Exception {
conf = new OzoneConfiguration();
- clusterId = UUID.randomUUID().toString();
- scmId = UUID.randomUUID().toString();
omServiceId = "om-service-test";
scmServiceId = "scm-service-test";
@@ -89,13 +84,13 @@ public static void init() throws Exception {
client = cluster.newClient();
client.getObjectStore().createS3Bucket(BUCKET);
- headers = Mockito.mock(HttpHeaders.class);
+ headers = mock(HttpHeaders.class);
when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(
"STANDARD");
- context = Mockito.mock(ContainerRequestContext.class);
- Mockito.when(context.getUriInfo()).thenReturn(Mockito.mock(UriInfo.class));
- Mockito.when(context.getUriInfo().getQueryParameters())
+ context = mock(ContainerRequestContext.class);
+ when(context.getUriInfo()).thenReturn(mock(UriInfo.class));
+ when(context.getUriInfo().getQueryParameters())
.thenReturn(new MultivaluedHashMap<>());
REST.setHeaders(headers);
@@ -109,10 +104,8 @@ private static void startCluster()
throws IOException, TimeoutException, InterruptedException {
OzoneManager.setTestSecureOmFlag(true);
MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf)
- .setClusterId(clusterId)
.setSCMServiceId(scmServiceId)
.setOMServiceId(omServiceId)
- .setScmId(scmId)
.setNumDatanodes(3)
.setNumOfStorageContainerManagers(3)
.setNumOfOzoneManagers(3);
@@ -217,8 +210,7 @@ public void testMultipart() throws Exception {
private static String generateRandomContent(int sizeInMB) {
int bytesToGenerate = sizeInMB * 1024 * 1024;
- byte[] randomBytes = new byte[bytesToGenerate];
- new SecureRandom().nextBytes(randomBytes);
+ byte[] randomBytes = RandomUtils.nextBytes(bytesToGenerate);
return Base64.getEncoder().encodeToString(randomBytes);
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
index 6bc92418a2a..1be5b64ac87 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
@@ -180,8 +180,8 @@
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.slf4j.event.Level.INFO;
@@ -624,15 +624,11 @@ void testAccessControlExceptionOnClient() throws Exception {
new OzoneManagerProtocolClientSideTranslatorPB(
OmTransportFactory.create(conf, ugi, null),
ClientId.randomId().toString());
- try {
- secureClient.createVolume(
- new OmVolumeArgs.Builder().setVolume("vol1")
- .setOwnerName("owner1")
- .setAdminName("admin")
- .build());
- } catch (IOException ex) {
- fail("Secure client should be able to create volume.");
- }
+ secureClient.createVolume(
+ new OmVolumeArgs.Builder().setVolume("vol1")
+ .setOwnerName("owner1")
+ .setAdminName("admin")
+ .build());
ugi = UserGroupInformation.createUserForTesting(
"testuser1", new String[] {"test"});
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java
index 5e3c3ab5a75..f2a079ca80c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java
@@ -16,7 +16,8 @@
*/
package org.apache.hadoop.ozone.client;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.security.AccessControlException;
@@ -25,27 +26,20 @@
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
-import java.util.UUID;
/**
* Test implementation for OzoneClientFactory.
*/
public class TestOzoneClientFactory {
- private static String scmId = UUID.randomUUID().toString();
- private static String clusterId = UUID.randomUUID().toString();
-
@Test
public void testRemoteException() {
OzoneConfiguration conf = new OzoneConfiguration();
-
- try {
+ Exception e = assertThrows(Exception.class, () -> {
MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(3)
.setTotalPipelineNumLimit(10)
- .setScmId(scmId)
- .setClusterId(clusterId)
.build();
String omPort = cluster.getOzoneManager().getRpcPort();
@@ -59,17 +53,14 @@ public void testRemoteException() {
public Void run() throws IOException {
conf.set("ozone.security.enabled", "true");
try (OzoneClient ozoneClient =
- OzoneClientFactory.getRpcClient("localhost",
- Integer.parseInt(omPort), conf)) {
+ OzoneClientFactory.getRpcClient("localhost", Integer.parseInt(omPort), conf)) {
ozoneClient.getObjectStore().listVolumes("/");
}
return null;
}
});
- fail("Should throw exception here");
- } catch (IOException | InterruptedException e) {
- assert e instanceof AccessControlException;
- }
+ });
+ assertInstanceOf(AccessControlException.class, e);
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java
index 3478489edd6..0b0b2586c9e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java
@@ -16,13 +16,13 @@
*/
package org.apache.hadoop.ozone.client.rpc;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -31,6 +31,7 @@
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.client.BucketArgs;
@@ -120,11 +121,16 @@ protected static void init(boolean zeroCopyEnabled) throws Exception {
TimeUnit.SECONDS);
conf.setBoolean(OzoneConfigKeys.OZONE_EC_GRPC_ZERO_COPY_ENABLED,
zeroCopyEnabled);
- cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10)
- .setTotalPipelineNumLimit(10).setBlockSize(blockSize)
- .setChunkSize(chunkSize).setStreamBufferFlushSize(flushSize)
+
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
+ .setBlockSize(blockSize)
+ .setChunkSize(chunkSize)
+ .setStreamBufferFlushSize(flushSize)
.setStreamBufferMaxSize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES).build();
+ .applyTo(conf);
+
+ cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10)
+ .setTotalPipelineNumLimit(10).build();
cluster.waitForClusterToBeReady();
client = OzoneClientFactory.getRpcClient(conf);
objectStore = client.getObjectStore();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
index ce89e679df4..e7c8be170ca 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
@@ -17,10 +17,10 @@
package org.apache.hadoop.ozone.client.rpc;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
@@ -32,6 +32,7 @@
import org.apache.hadoop.hdds.scm.protocolPB.
StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
@@ -100,14 +101,16 @@ private void startCluster(OzoneConfiguration conf) throws Exception {
raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10));
conf.setFromObject(raftClientConfig);
- conf.setQuietMode(false);
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(3)
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
.setBlockSize(blockSize)
.setChunkSize(chunkSize)
.setStreamBufferFlushSize(flushSize)
.setStreamBufferMaxSize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
+ .applyTo(conf);
+
+ conf.setQuietMode(false);
+ cluster = MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(3)
.build();
cluster.waitForClusterToBeReady();
// the easiest way to create an open container is creating a key
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
index d3caf623873..8bb791bb103 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
@@ -17,9 +17,9 @@
package org.apache.hadoop.ozone.client.rpc;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.OzoneClientConfig;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
@@ -27,6 +27,7 @@
import org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput;
import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.client.ObjectStore;
@@ -92,17 +93,19 @@ public static void init() throws Exception {
conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4,
StorageUnit.MB);
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(5)
- .setTotalPipelineNumLimit(3)
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
.setBlockSize(blockSize)
.setChunkSize(chunkSize)
.setStreamBufferFlushSize(flushSize)
.setStreamBufferMaxSize(maxFlushSize)
- .setDataStreamBufferFlushize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
+ .setDataStreamBufferFlushSize(maxFlushSize)
.setDataStreamMinPacketSize(chunkSize)
- .setDataStreamStreamWindowSize(5 * chunkSize)
+ .setDataStreamWindowSize(5 * chunkSize)
+ .applyTo(conf);
+
+ cluster = MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(5)
+ .setTotalPipelineNumLimit(3)
.build();
cluster.waitForClusterToBeReady();
//the easiest way to create an open container is creating a key
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java
index 74686d363c8..1e9cefbaa48 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java
@@ -24,12 +24,12 @@
import java.util.concurrent.TimeoutException;
import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig;
@@ -38,6 +38,7 @@
import org.apache.hadoop.hdds.scm.XceiverClientMetrics;
import org.apache.hadoop.hdds.scm.storage.BufferPool;
import org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneClient;
@@ -105,14 +106,16 @@ static MiniOzoneCluster createCluster() throws IOException,
ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(30));
conf.setFromObject(ratisClientConfig);
- MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(5)
- .setTotalPipelineNumLimit(3)
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
.setBlockSize(BLOCK_SIZE)
.setChunkSize(CHUNK_SIZE)
.setStreamBufferFlushSize(FLUSH_SIZE)
.setStreamBufferMaxSize(MAX_FLUSH_SIZE)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
+ .applyTo(conf);
+
+ MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(5)
+ .setTotalPipelineNumLimit(3)
.build();
cluster.waitForClusterToBeReady();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
index f2a5748bffd..9609dea0481 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
@@ -89,7 +89,7 @@ public static void init() throws Exception {
chunkSize = (int) OzoneConsts.MB;
blockSize = 4 * chunkSize;
- OzoneClientConfig config = new OzoneClientConfig();
+ OzoneClientConfig config = conf.getObject(OzoneClientConfig.class);
config.setChecksumType(ChecksumType.NONE);
conf.setFromObject(config);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
index 5a2d67960fa..2c11177e5ea 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
@@ -95,6 +95,7 @@
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
@@ -376,7 +377,7 @@ public void testUnhealthyContainer() throws Exception {
int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails());
// restart the hdds datanode and see if the container is listed in the
// in the missing container set and not in the regular set
- cluster.restartHddsDatanode(dn.getDatanodeDetails(), false);
+ cluster.restartHddsDatanode(dn.getDatanodeDetails(), true);
// make sure the container state is still marked unhealthy after restart
keyValueContainerData = (KeyValueContainerData) ContainerDataYaml
.readContainerFile(containerFile);
@@ -459,10 +460,7 @@ public void testApplyTransactionFailure() throws Exception {
// a pipeline close action
try {
- xceiverClient.sendCommand(request.build());
- fail("Expected exception not thrown");
- } catch (IOException e) {
- // Exception should be thrown
+ assertThrows(IOException.class, () -> xceiverClient.sendCommand(request.build()));
} finally {
xceiverClientManager.releaseClient(xceiverClient, false);
}
@@ -494,7 +492,7 @@ public void testApplyTransactionFailure() throws Exception {
@Test
@Flaky("HDDS-6115")
- public void testApplyTransactionIdempotencyWithClosedContainer()
+ void testApplyTransactionIdempotencyWithClosedContainer()
throws Exception {
OzoneOutputStream key =
objectStore.getVolume(volumeName).getBucket(bucketName)
@@ -543,11 +541,7 @@ public void testApplyTransactionIdempotencyWithClosedContainer()
request.setContainerID(containerID);
request.setCloseContainer(
ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
- try {
- xceiverClient.sendCommand(request.build());
- } catch (IOException e) {
- fail("Exception should not be thrown");
- }
+ xceiverClient.sendCommand(request.build());
assertSame(
TestHelper.getDatanodeService(omKeyLocationInfo, cluster)
.getDatanodeStateMachine()
@@ -557,8 +551,6 @@ public void testApplyTransactionIdempotencyWithClosedContainer()
assertTrue(stateMachine.isStateMachineHealthy());
try {
stateMachine.takeSnapshot();
- } catch (IOException ioe) {
- fail("Exception should not be thrown");
} finally {
xceiverClientManager.releaseClient(xceiverClient, false);
}
@@ -585,7 +577,7 @@ public void testApplyTransactionIdempotencyWithClosedContainer()
// not be marked unhealthy and pipeline should not fail if container gets
// closed here.
@Test
- public void testWriteStateMachineDataIdempotencyWithClosedContainer()
+ void testWriteStateMachineDataIdempotencyWithClosedContainer()
throws Exception {
OzoneOutputStream key =
objectStore.getVolume(volumeName).getBucket(bucketName)
@@ -699,11 +691,7 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer()
.getContainerState(),
ContainerProtos.ContainerDataProto.State.CLOSED);
assertTrue(stateMachine.isStateMachineHealthy());
- try {
- stateMachine.takeSnapshot();
- } catch (IOException ioe) {
- fail("Exception should not be thrown");
- }
+ stateMachine.takeSnapshot();
final FileInfo latestSnapshot = getSnapshotFileInfo(storage);
assertNotEquals(snapshot.getPath(), latestSnapshot.getPath());
@@ -715,43 +703,37 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer()
}
@Test
- public void testContainerStateMachineSingleFailureRetry()
+ void testContainerStateMachineSingleFailureRetry()
throws Exception {
- OzoneOutputStream key =
- objectStore.getVolume(volumeName).getBucket(bucketName)
- .createKey("ratis1", 1024,
- ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS,
- ReplicationFactor.THREE), new HashMap<>());
+ try (OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName)
+ .createKey("ratis1", 1024,
+ ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS,
+ ReplicationFactor.THREE), new HashMap<>())) {
- key.write("ratis".getBytes(UTF_8));
- key.flush();
- key.write("ratis".getBytes(UTF_8));
- key.write("ratis".getBytes(UTF_8));
-
- KeyOutputStream groupOutputStream = (KeyOutputStream) key.
- getOutputStream();
- List locationInfoList =
- groupOutputStream.getLocationInfoList();
- assertEquals(1, locationInfoList.size());
+ key.write("ratis".getBytes(UTF_8));
+ key.flush();
+ key.write("ratis".getBytes(UTF_8));
+ key.write("ratis".getBytes(UTF_8));
- OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
+ KeyOutputStream groupOutputStream = (KeyOutputStream) key.
+ getOutputStream();
+ List locationInfoList =
+ groupOutputStream.getLocationInfoList();
+ assertEquals(1, locationInfoList.size());
- induceFollowerFailure(omKeyLocationInfo, 2);
+ OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
- try {
+ induceFollowerFailure(omKeyLocationInfo, 2);
key.flush();
key.write("ratis".getBytes(UTF_8));
key.flush();
- key.close();
- } catch (Exception ioe) {
- // Should not fail..
- fail("Exception " + ioe.getMessage());
}
+
validateData("ratis1", 2, "ratisratisratisratis");
}
@Test
- public void testContainerStateMachineDualFailureRetry()
+ void testContainerStateMachineDualFailureRetry()
throws Exception {
OzoneOutputStream key =
objectStore.getVolume(volumeName).getBucket(bucketName)
@@ -774,15 +756,10 @@ public void testContainerStateMachineDualFailureRetry()
induceFollowerFailure(omKeyLocationInfo, 1);
- try {
- key.flush();
- key.write("ratis".getBytes(UTF_8));
- key.flush();
- key.close();
- } catch (Exception ioe) {
- // Should not fail..
- fail("Exception " + ioe.getMessage());
- }
+ key.flush();
+ key.write("ratis".getBytes(UTF_8));
+ key.flush();
+ key.close();
validateData("ratis1", 2, "ratisratisratisratis");
}
@@ -819,31 +796,24 @@ private void induceFollowerFailure(OmKeyLocationInfo omKeyLocationInfo,
}
}
- private void validateData(String key, int locationCount, String payload) {
+ private void validateData(String key, int locationCount, String payload) throws Exception {
OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
.setVolumeName(volumeName)
.setBucketName(bucketName)
.setKeyName(key)
.build();
- OmKeyInfo keyInfo = null;
- try {
- keyInfo = cluster.getOzoneManager().lookupKey(omKeyArgs);
-
- assertEquals(locationCount,
- keyInfo.getLatestVersionLocations().getLocationListCount());
- byte[] buffer = new byte[1024];
- try (OzoneInputStream o = objectStore.getVolume(volumeName)
- .getBucket(bucketName).readKey(key)) {
- o.read(buffer, 0, 1024);
- }
- int end = ArrayUtils.indexOf(buffer, (byte) 0);
- String response = new String(buffer, 0,
- end,
- StandardCharsets.UTF_8);
- assertEquals(payload, response);
- } catch (IOException e) {
- fail("Exception not expected " + e.getMessage());
+ OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(omKeyArgs);
+
+ assertEquals(locationCount,
+ keyInfo.getLatestVersionLocations().getLocationListCount());
+ byte[] buffer = new byte[1024];
+ try (OzoneInputStream o = objectStore.getVolume(volumeName)
+ .getBucket(bucketName).readKey(key)) {
+ o.read(buffer, 0, 1024);
}
+ int end = ArrayUtils.indexOf(buffer, (byte) 0);
+ String response = new String(buffer, 0, end, StandardCharsets.UTF_8);
+ assertEquals(payload, response);
}
static FileInfo getSnapshotFileInfo(SimpleStateMachineStorage storage)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
index 32fc9ba5c93..23ab89b80c6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
@@ -17,16 +17,17 @@
package org.apache.hadoop.ozone.client.rpc;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.client.ObjectStore;
@@ -108,13 +109,16 @@ public void setup() throws Exception {
conf.set(OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION, "2s");
conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s");
conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, "5s");
+
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
+ .setBlockSize(blockSize)
+ .setChunkSize(chunkSize)
+ .setStreamBufferFlushSize(flushSize)
+ .setStreamBufferMaxSize(maxFlushSize)
+ .applyTo(conf);
+
cluster =
MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1)
- .setBlockSize(blockSize)
- .setChunkSize(chunkSize)
- .setStreamBufferFlushSize(flushSize)
- .setStreamBufferMaxSize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
.setHbInterval(200)
.setCertificateClient(new CertificateClientTestImpl(conf))
.setSecretKeyClient(new SecretKeyTestClient())
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
index ab2fbeadb61..97a3047bfdb 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
@@ -17,14 +17,15 @@
package org.apache.hadoop.ozone.client.rpc;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig;
import org.apache.hadoop.hdds.scm.OzoneClientConfig;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -112,18 +113,20 @@ public void setup() throws Exception {
raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10));
conf.setFromObject(raftClientConfig);
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
+ .setDataStreamMinPacketSize(1024)
+ .setBlockSize(BLOCK_SIZE)
+ .setChunkSize(CHUNK_SIZE)
+ .setStreamBufferFlushSize(FLUSH_SIZE)
+ .setStreamBufferMaxSize(MAX_FLUSH_SIZE)
+ .applyTo(conf);
+
conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1);
conf.setQuietMode(false);
cluster =
MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(3)
.setHbInterval(200)
- .setDataStreamMinPacketSize(1024)
- .setBlockSize(BLOCK_SIZE)
- .setChunkSize(CHUNK_SIZE)
- .setStreamBufferFlushSize(FLUSH_SIZE)
- .setStreamBufferMaxSize(MAX_FLUSH_SIZE)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
.build();
cluster.waitForClusterToBeReady();
cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 60000);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
index 0363d7aef14..fa50dac64f7 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
@@ -71,10 +71,9 @@
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertSame;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Assumptions;
import org.junit.jupiter.api.BeforeAll;
@@ -201,7 +200,7 @@ public static void shutdown() {
* data is not deleted from any of the nodes which have the closed replica.
*/
@Test
- public void testDeleteKeyWithInAdequateDN() throws Exception {
+ void testDeleteKeyWithInAdequateDN() throws Exception {
String keyName = "ratis";
OzoneOutputStream key =
objectStore.getVolume(volumeName).getBucket(bucketName)
@@ -289,14 +288,11 @@ public void testDeleteKeyWithInAdequateDN() throws Exception {
deleteKey("ratis");
// make sure the chunk was never deleted on the leader even though
// deleteBlock handler is invoked
- try {
- for (ContainerProtos.ChunkInfo chunkInfo : blockData.getChunks()) {
- keyValueHandler.getChunkManager()
- .readChunk(container, blockID, ChunkInfo.getFromProtoBuf(chunkInfo),
- null);
- }
- } catch (IOException ioe) {
- fail("Exception should not be thrown.");
+
+ for (ContainerProtos.ChunkInfo chunkInfo : blockData.getChunks()) {
+ keyValueHandler.getChunkManager()
+ .readChunk(container, blockID, ChunkInfo.getFromProtoBuf(chunkInfo),
+ null);
}
long numReadStateMachineOps =
stateMachine.getMetrics().getNumReadStateMachineOps();
@@ -319,16 +315,14 @@ public void testDeleteKeyWithInAdequateDN() throws Exception {
.getDispatcher()
.getHandler(ContainerProtos.ContainerType.KeyValueContainer);
// make sure the chunk is now deleted on the all dns
- try {
+ KeyValueHandler finalKeyValueHandler = keyValueHandler;
+ StorageContainerException e = assertThrows(StorageContainerException.class, () -> {
for (ContainerProtos.ChunkInfo chunkInfo : blockData.getChunks()) {
- keyValueHandler.getChunkManager().readChunk(container, blockID,
- ChunkInfo.getFromProtoBuf(chunkInfo), null);
+ finalKeyValueHandler.getChunkManager().readChunk(container, blockID,
+ ChunkInfo.getFromProtoBuf(chunkInfo), null);
}
- fail("Expected exception is not thrown");
- } catch (IOException ioe) {
- StorageContainerException e = assertInstanceOf(StorageContainerException.class, ioe);
- assertSame(ContainerProtos.Result.UNABLE_TO_FIND_CHUNK, e.getResult());
- }
+ });
+ assertSame(ContainerProtos.Result.UNABLE_TO_FIND_CHUNK, e.getResult());
}
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
index c689a692ae7..47891586827 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
@@ -85,7 +85,7 @@ public static void init() throws Exception {
chunkSize = (int) OzoneConsts.MB;
blockSize = 4 * chunkSize;
- OzoneClientConfig config = new OzoneClientConfig();
+ OzoneClientConfig config = conf.getObject(OzoneClientConfig.class);
config.setChecksumType(ChecksumType.NONE);
conf.setFromObject(config);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
index 41438996c27..fadc06bd57b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
@@ -17,13 +17,13 @@
package org.apache.hadoop.ozone.client.rpc;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig;
@@ -35,6 +35,7 @@
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.StaticMapping;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.client.ObjectStore;
@@ -129,14 +130,18 @@ private void init() throws Exception {
StaticMapping.addNodeToRack(NetUtils.normalizeHostNames(
Collections.singleton(HddsUtils.getHostName(conf))).get(0),
"/rack1");
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(10)
- .setTotalPipelineNumLimit(15)
- .setChunkSize(chunkSize)
+
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
.setBlockSize(blockSize)
+ .setChunkSize(chunkSize)
.setStreamBufferFlushSize(flushSize)
.setStreamBufferMaxSize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES).build();
+ .applyTo(conf);
+
+ cluster = MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(10)
+ .setTotalPipelineNumLimit(15)
+ .build();
cluster.waitForClusterToBeReady();
//the easiest way to create an open container is creating a key
client = OzoneClientFactory.getRpcClient(conf);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
index ab09bc24330..919654d82a9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
@@ -34,7 +34,6 @@
import java.util.UUID;
import com.google.common.cache.Cache;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
import org.apache.hadoop.crypto.key.kms.server.MiniKMS;
@@ -44,11 +43,13 @@
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl;
import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.BucketArgs;
@@ -78,6 +79,7 @@
import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
import static org.apache.hadoop.hdds.client.ReplicationType.RATIS;
+import static org.apache.ozone.test.GenericTestUtils.getTestStartTime;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -107,8 +109,6 @@ class TestOzoneAtRestEncryption {
private static StorageContainerLocationProtocolClientSideTranslatorPB
storageContainerLocationClient;
- private static final String SCM_ID = UUID.randomUUID().toString();
- private static final String CLUSTER_ID = UUID.randomUUID().toString();
private static File testDir;
private static OzoneConfiguration conf;
private static final String TEST_KEY = "key1";
@@ -141,13 +141,14 @@ static void init() throws Exception {
conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath());
CertificateClientTestImpl certificateClientTest =
new CertificateClientTestImpl(conf);
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(10)
- .setScmId(SCM_ID)
- .setClusterId(CLUSTER_ID)
+
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
.setBlockSize(BLOCK_SIZE)
.setChunkSize(CHUNK_SIZE)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
+ .applyTo(conf);
+
+ cluster = MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(10)
.setCertificateClient(certificateClientTest)
.setSecretKeyClient(new SecretKeyTestClient())
.build();
@@ -165,7 +166,6 @@ static void init() throws Exception {
TestOzoneRpcClient.setStorageContainerLocationClient(
storageContainerLocationClient);
TestOzoneRpcClient.setStore(store);
- TestOzoneRpcClient.setClusterId(CLUSTER_ID);
// create test key
createKey(TEST_KEY, cluster.getOzoneManager().getKmsProvider(), conf);
@@ -229,7 +229,7 @@ void testLinkEncryptedBuckets(BucketLayout bucketLayout) throws Exception {
static void createAndVerifyStreamKeyData(OzoneBucket bucket)
throws Exception {
- Instant testStartTime = Instant.now();
+ Instant testStartTime = getTestStartTime();
String keyName = UUID.randomUUID().toString();
String value = "sample value";
try (OzoneDataStreamOutput out = bucket.createStreamKey(keyName,
@@ -242,7 +242,7 @@ static void createAndVerifyStreamKeyData(OzoneBucket bucket)
}
static void createAndVerifyKeyData(OzoneBucket bucket) throws Exception {
- Instant testStartTime = Instant.now();
+ Instant testStartTime = getTestStartTime();
String keyName = UUID.randomUUID().toString();
String value = "sample value";
try (OzoneOutputStream out = bucket.createKey(keyName,
@@ -318,7 +318,7 @@ void testKeyWithEncryptionAndGdpr(BucketLayout bucketLayout)
//Step 1
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
- Instant testStartTime = Instant.now();
+ Instant testStartTime = getTestStartTime();
String value = "sample value";
store.createVolume(volumeName);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
index 4867be49066..a89e6176996 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
@@ -58,7 +58,6 @@
import org.apache.hadoop.ozone.om.helpers.QuotaUtil;
import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
-import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeEach;
@@ -83,13 +82,11 @@
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* This test verifies all the S3 multipart client apis - prefix layout.
@@ -101,7 +98,6 @@ public class TestOzoneClientMultipartUploadWithFSO {
private static MiniOzoneCluster cluster = null;
private static OzoneClient ozClient = null;
- private static String scmId = UUID.randomUUID().toString();
private String volumeName;
private String bucketName;
private String keyName;
@@ -140,7 +136,6 @@ static void startCluster(OzoneConfiguration conf) throws Exception {
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(5)
.setTotalPipelineNumLimit(10)
- .setScmId(scmId)
.build();
cluster.waitForClusterToBeReady();
ozClient = OzoneClientFactory.getRpcClient(conf);
@@ -490,15 +485,9 @@ public void testCommitPartAfterCompleteUpload() throws Exception {
String part1 = new String(data, UTF_8);
sb.append(part1);
assertEquals(sb.toString(), new String(fileContent, UTF_8));
-
- try {
- ozoneOutputStream.close();
- fail("testCommitPartAfterCompleteUpload failed");
- } catch (IOException ex) {
- assertInstanceOf(OMException.class, ex);
- assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR,
- ((OMException) ex).getResult());
- }
+ OzoneOutputStream finalOzoneOutputStream = ozoneOutputStream;
+ OMException ex = assertThrows(OMException.class, () -> finalOzoneOutputStream.close());
+ assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ex.getResult());
}
@Test
@@ -523,15 +512,8 @@ public void testAbortUploadFailWithInProgressPartUpload() throws Exception {
// Abort before completing part upload.
bucket.abortMultipartUpload(keyName, uploadID);
-
- try {
- ozoneOutputStream.close();
- fail("testAbortUploadFailWithInProgressPartUpload failed");
- } catch (IOException ex) {
- assertInstanceOf(OMException.class, ex);
- assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR,
- ((OMException) ex).getResult());
- }
+ OMException ome = assertThrows(OMException.class, () -> ozoneOutputStream.close());
+ assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ome.getResult());
}
@Test
@@ -568,8 +550,7 @@ public void testAbortUploadSuccessWithParts() throws Exception {
bucket.abortMultipartUpload(keyName, uploadID);
String multipartOpenKey =
- getMultipartOpenKey(uploadID, volumeName, bucketName, keyName,
- metadataMgr);
+ metadataMgr.getMultipartKeyFSO(volumeName, bucketName, keyName, uploadID);
OmKeyInfo omKeyInfo =
metadataMgr.getOpenKeyTable(bucketLayout).get(multipartOpenKey);
OmMultipartKeyInfo omMultipartKeyInfo =
@@ -853,8 +834,7 @@ private String verifyUploadedPart(String uploadID, String partName,
ozoneManager.getMetadataManager().getBucketTable().get(buckKey);
BucketLayout bucketLayout = buckInfo.getBucketLayout();
String multipartOpenKey =
- getMultipartOpenKey(uploadID, volumeName, bucketName, keyName,
- metadataMgr);
+ metadataMgr.getMultipartKeyFSO(volumeName, bucketName, keyName, uploadID);
String multipartKey = metadataMgr.getMultipartKey(volumeName, bucketName,
keyName, uploadID);
@@ -881,32 +861,6 @@ private String verifyUploadedPart(String uploadID, String partName,
return multipartKey;
}
- private String getMultipartOpenKey(String multipartUploadID,
- String volName, String buckName, String kName,
- OMMetadataManager omMetadataManager) throws IOException {
-
- String fileName = OzoneFSUtils.getFileName(kName);
- final long volumeId = omMetadataManager.getVolumeId(volName);
- final long bucketId = omMetadataManager.getBucketId(volName,
- buckName);
- long parentID = getParentID(volName, buckName, kName,
- omMetadataManager);
-
- String multipartKey = omMetadataManager.getMultipartKey(volumeId, bucketId,
- parentID, fileName, multipartUploadID);
-
- return multipartKey;
- }
-
- private long getParentID(String volName, String buckName,
- String kName, OMMetadataManager omMetadataManager) throws IOException {
- final long volumeId = omMetadataManager.getVolumeId(volName);
- final long bucketId = omMetadataManager.getBucketId(volName,
- buckName);
- return OMFileRequest.getParentID(volumeId, bucketId,
- kName, omMetadataManager);
- }
-
private String initiateMultipartUpload(OzoneBucket oBucket, String kName,
ReplicationType replicationType, ReplicationFactor replicationFactor)
throws IOException {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
index 925cfd9d954..c3e8a8d461b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
@@ -20,10 +20,10 @@
import java.io.OutputStream;
import java.util.UUID;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
import org.apache.hadoop.hdds.scm.OzoneClientConfig;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -35,6 +35,7 @@
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.storage.BlockOutputStream;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.client.ObjectStore;
@@ -88,7 +89,7 @@ public void init() throws Exception {
maxFlushSize = 2 * flushSize;
blockSize = 2 * maxFlushSize;
- OzoneClientConfig config = new OzoneClientConfig();
+ OzoneClientConfig config = conf.getObject(OzoneClientConfig.class);
config.setChecksumType(ChecksumType.NONE);
config.setMaxRetryCount(3);
conf.setFromObject(config);
@@ -98,14 +99,17 @@ public void init() throws Exception {
conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s");
conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, "5s");
conf.setQuietMode(false);
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(5)
- .setTotalPipelineNumLimit(3)
+
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
.setBlockSize(blockSize)
.setChunkSize(chunkSize)
.setStreamBufferFlushSize(flushSize)
.setStreamBufferMaxSize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
+ .applyTo(conf);
+
+ cluster = MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(5)
+ .setTotalPipelineNumLimit(3)
.build();
cluster.waitForClusterToBeReady();
//the easiest way to create an open container is creating a key
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
index 5d6b601ad9a..cd99382f300 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
@@ -22,10 +22,10 @@
import java.util.List;
import java.util.UUID;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
import org.apache.hadoop.hdds.scm.OzoneClientConfig;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -38,6 +38,7 @@
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.storage.BlockOutputStream;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.client.ObjectStore;
@@ -53,7 +54,7 @@
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import org.apache.ratis.protocol.exceptions.GroupMismatchException;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Assumptions;
@@ -108,14 +109,16 @@ public void init() throws Exception {
conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, "5s");
conf.setQuietMode(false);
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(5)
- .setTotalPipelineNumLimit(3)
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
.setBlockSize(blockSize)
.setChunkSize(chunkSize)
.setStreamBufferFlushSize(flushSize)
.setStreamBufferMaxSize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
+ .applyTo(conf);
+
+ cluster = MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(5)
+ .setTotalPipelineNumLimit(3)
.build();
cluster.waitForClusterToBeReady();
//the easiest way to create an open container is creating a key
@@ -188,72 +191,63 @@ public void testGroupMismatchExceptionHandling() throws Exception {
}
@Test
- public void testMaxRetriesByOzoneClient() throws Exception {
+ void testMaxRetriesByOzoneClient() throws Exception {
String keyName = getKeyName();
- OzoneOutputStream key = createKey(
- keyName, ReplicationType.RATIS, (MAX_RETRIES + 1) * blockSize);
- KeyOutputStream keyOutputStream =
- assertInstanceOf(KeyOutputStream.class, key.getOutputStream());
- List entries = keyOutputStream.getStreamEntries();
- assertEquals((MAX_RETRIES + 1),
- keyOutputStream.getStreamEntries().size());
- int dataLength = maxFlushSize + 50;
- // write data more than 1 chunk
- byte[] data1 =
- ContainerTestHelper.getFixedLengthString(keyString, dataLength)
- .getBytes(UTF_8);
- long containerID;
- List containerList = new ArrayList<>();
- for (BlockOutputStreamEntry entry : entries) {
- containerID = entry.getBlockID().getContainerID();
- ContainerInfo container =
- cluster.getStorageContainerManager().getContainerManager()
- .getContainer(ContainerID.valueOf(containerID));
- Pipeline pipeline =
- cluster.getStorageContainerManager().getPipelineManager()
- .getPipeline(container.getPipelineID());
- XceiverClientSpi xceiverClient =
- xceiverClientManager.acquireClient(pipeline);
- Assumptions.assumeFalse(containerList.contains(containerID));
- containerList.add(containerID);
- xceiverClient.sendCommand(ContainerTestHelper
- .getCreateContainerRequest(containerID, pipeline));
- xceiverClientManager.releaseClient(xceiverClient, false);
- }
- key.write(data1);
- OutputStream stream = entries.get(0).getOutputStream();
- BlockOutputStream blockOutputStream = assertInstanceOf(BlockOutputStream.class, stream);
- TestHelper.waitForContainerClose(key, cluster);
- // Ensure that blocks for the key have been allocated to at least N+1
- // containers so that write request will be tried on N+1 different blocks
- // of N+1 different containers and it will finally fail as it will hit
- // the max retry count of N.
- Assumptions.assumeTrue(containerList.size() > MAX_RETRIES,
- containerList.size() + " <= " + MAX_RETRIES);
- try {
+ try (OzoneOutputStream key = createKey(
+ keyName, ReplicationType.RATIS, (MAX_RETRIES + 1) * blockSize)) {
+ KeyOutputStream keyOutputStream =
+ assertInstanceOf(KeyOutputStream.class, key.getOutputStream());
+ List entries = keyOutputStream.getStreamEntries();
+ assertEquals((MAX_RETRIES + 1),
+ keyOutputStream.getStreamEntries().size());
+ int dataLength = maxFlushSize + 50;
+ // write data more than 1 chunk
+ byte[] data1 =
+ ContainerTestHelper.getFixedLengthString(keyString, dataLength)
+ .getBytes(UTF_8);
+ long containerID;
+ List containerList = new ArrayList<>();
+ for (BlockOutputStreamEntry entry : entries) {
+ containerID = entry.getBlockID().getContainerID();
+ ContainerInfo container =
+ cluster.getStorageContainerManager().getContainerManager()
+ .getContainer(ContainerID.valueOf(containerID));
+ Pipeline pipeline =
+ cluster.getStorageContainerManager().getPipelineManager()
+ .getPipeline(container.getPipelineID());
+ XceiverClientSpi xceiverClient =
+ xceiverClientManager.acquireClient(pipeline);
+ Assumptions.assumeFalse(containerList.contains(containerID));
+ containerList.add(containerID);
+ xceiverClient.sendCommand(ContainerTestHelper
+ .getCreateContainerRequest(containerID, pipeline));
+ xceiverClientManager.releaseClient(xceiverClient, false);
+ }
key.write(data1);
- // ensure that write is flushed to dn
- key.flush();
- fail("Expected exception not thrown");
- } catch (IOException ioe) {
+ OutputStream stream = entries.get(0).getOutputStream();
+ BlockOutputStream blockOutputStream = assertInstanceOf(BlockOutputStream.class, stream);
+ TestHelper.waitForContainerClose(key, cluster);
+ // Ensure that blocks for the key have been allocated to at least N+1
+ // containers so that write request will be tried on N+1 different blocks
+ // of N+1 different containers and it will finally fail as it will hit
+ // the max retry count of N.
+ Assumptions.assumeTrue(containerList.size() > MAX_RETRIES,
+ containerList.size() + " <= " + MAX_RETRIES);
+ IOException ioe = assertThrows(IOException.class, () -> {
+ key.write(data1);
+ // ensure that write is flushed to dn
+ key.flush();
+ });
assertInstanceOf(ContainerNotOpenException.class,
HddsClientUtils.checkForException(blockOutputStream.getIoException()));
assertThat(ioe.getMessage()).contains(
- "Retry request failed. " +
- "retries get failed due to exceeded maximum " +
- "allowed retries number: " + MAX_RETRIES);
- }
- try {
- key.flush();
- fail("Expected exception not thrown");
- } catch (IOException ioe) {
+ "Retry request failed. " +
+ "retries get failed due to exceeded maximum " +
+ "allowed retries number: " + MAX_RETRIES);
+
+ ioe = assertThrows(IOException.class, () -> key.flush());
assertThat(ioe.getMessage()).contains("Stream is closed");
}
- try {
- key.close();
- } catch (IOException ioe) {
- fail("Expected should not be thrown");
- }
}
private OzoneOutputStream createKey(String keyName, ReplicationType type,
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index 4e0c9bbf5b1..a87d05321e2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -38,7 +38,6 @@
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Stream;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.ECReplicationConfig.EcCodec;
@@ -49,6 +48,7 @@
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -62,6 +62,7 @@
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OmUtils;
@@ -84,7 +85,6 @@
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.common.OzoneChecksumException;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
@@ -129,6 +129,7 @@
import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
import static org.apache.hadoop.hdds.client.ReplicationType.RATIS;
+import static org.apache.hadoop.ozone.OmUtils.LOG;
import static org.apache.hadoop.ozone.OmUtils.MAX_TRXN_ID;
import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT;
@@ -144,11 +145,11 @@
import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ;
import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE;
+import static org.apache.ozone.test.GenericTestUtils.getTestStartTime;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
@@ -193,10 +194,6 @@ public abstract class TestOzoneRpcClientAbstract {
private static OzoneAcl inheritedGroupAcl = new OzoneAcl(GROUP,
remoteGroupName, READ, ACCESS);
- private static String scmId = UUID.randomUUID().toString();
- private static String clusterId;
-
-
/**
* Create a MiniOzoneCluster for testing.
* @param conf Configurations to start the cluster.
@@ -205,14 +202,15 @@ public abstract class TestOzoneRpcClientAbstract {
static void startCluster(OzoneConfiguration conf) throws Exception {
// Reduce long wait time in MiniOzoneClusterImpl#waitForHddsDatanodesStop
// for testZReadKeyWithUnhealthyContainerReplica.
- clusterId = UUID.randomUUID().toString();
conf.set("ozone.scm.stale.node.interval", "10s");
+
+ ClientConfigForTesting.newBuilder(StorageUnit.MB)
+ .setDataStreamMinPacketSize(1)
+ .applyTo(conf);
+
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(14)
.setTotalPipelineNumLimit(10)
- .setScmId(scmId)
- .setClusterId(clusterId)
- .setDataStreamMinPacketSize(1) // 1MB
.build();
cluster.waitForClusterToBeReady();
ozClient = OzoneClientFactory.getRpcClient(conf);
@@ -266,10 +264,6 @@ public static ObjectStore getStore() {
return TestOzoneRpcClientAbstract.store;
}
- public static void setClusterId(String clusterId) {
- TestOzoneRpcClientAbstract.clusterId = clusterId;
- }
-
public static OzoneClient getClient() {
return TestOzoneRpcClientAbstract.ozClient;
}
@@ -583,7 +577,7 @@ public void testCreateBucketWithMetadata()
@Test
public void testCreateBucket()
throws IOException {
- Instant testStartTime = Instant.now();
+ Instant testStartTime = getTestStartTime();
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
store.createVolume(volumeName);
@@ -598,7 +592,7 @@ public void testCreateBucket()
@Test
public void testCreateS3Bucket()
throws IOException {
- Instant testStartTime = Instant.now();
+ Instant testStartTime = getTestStartTime();
String bucketName = UUID.randomUUID().toString();
store.createS3Bucket(bucketName);
OzoneBucket bucket = store.getS3Bucket(bucketName);
@@ -610,7 +604,7 @@ public void testCreateS3Bucket()
@Test
public void testDeleteS3Bucket()
throws Exception {
- Instant testStartTime = Instant.now();
+ Instant testStartTime = getTestStartTime();
String bucketName = UUID.randomUUID().toString();
store.createS3Bucket(bucketName);
OzoneBucket bucket = store.getS3Bucket(bucketName);
@@ -1005,7 +999,7 @@ public void testPutKeyWithReplicationConfig(String replicationValue,
public void testPutKey() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
- Instant testStartTime = Instant.now();
+ Instant testStartTime = getTestStartTime();
String value = "sample value";
store.createVolume(volumeName);
@@ -1545,7 +1539,7 @@ public void testValidateBlockLengthWithCommitKey() throws IOException {
public void testPutKeyRatisOneNode() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
- Instant testStartTime = Instant.now();
+ Instant testStartTime = getTestStartTime();
String value = "sample value";
store.createVolume(volumeName);
@@ -1580,7 +1574,7 @@ public void testPutKeyRatisOneNode() throws IOException {
public void testPutKeyRatisThreeNodes() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
- Instant testStartTime = Instant.now();
+ Instant testStartTime = getTestStartTime();
String value = "sample value";
store.createVolume(volumeName);
@@ -1617,7 +1611,7 @@ public void testPutKeyRatisThreeNodesParallel() throws IOException,
InterruptedException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
- Instant testStartTime = Instant.now();
+ Instant testStartTime = getTestStartTime();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
@@ -1911,7 +1905,7 @@ public void testReadKeyWithCorruptedData() throws IOException {
// Make this executed at last, for it has some side effect to other UTs
@Test
@Flaky("HDDS-6151")
- public void testZReadKeyWithUnhealthyContainerReplica() throws Exception {
+ void testZReadKeyWithUnhealthyContainerReplica() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
@@ -1994,16 +1988,12 @@ public void testZReadKeyWithUnhealthyContainerReplica() throws Exception {
}, 1000, 10000);
// Try reading keyName2
- try {
- GenericTestUtils.setLogLevel(XceiverClientGrpc.getLogger(), DEBUG);
- try (OzoneInputStream is = bucket.readKey(keyName2)) {
- byte[] content = new byte[100];
- is.read(content);
- String retValue = new String(content, UTF_8);
- assertEquals(value, retValue.trim());
- }
- } catch (IOException e) {
- fail("Reading unhealthy replica should succeed.");
+ GenericTestUtils.setLogLevel(XceiverClientGrpc.getLogger(), DEBUG);
+ try (OzoneInputStream is = bucket.readKey(keyName2)) {
+ byte[] content = new byte[100];
+ is.read(content);
+ String retValue = new String(content, UTF_8);
+ assertEquals(value, retValue.trim());
}
}
@@ -2012,7 +2002,7 @@ public void testZReadKeyWithUnhealthyContainerReplica() throws Exception {
* @throws IOException
*/
@Test
- public void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException {
+ void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
@@ -2061,8 +2051,6 @@ public void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException {
byte[] b = new byte[data.length];
is.read(b);
assertArrayEquals(b, data);
- } catch (OzoneChecksumException e) {
- fail("Reading corrupted data should not fail.");
}
corruptData(containerList.get(1), key);
// Try reading the key. Read will fail on the first node and will eventually
@@ -2071,8 +2059,6 @@ public void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException {
byte[] b = new byte[data.length];
is.read(b);
assertArrayEquals(b, data);
- } catch (OzoneChecksumException e) {
- fail("Reading corrupted data should not fail.");
}
corruptData(containerList.get(2), key);
// Try reading the key. Read will fail here as all the replicas are corrupt
@@ -2114,7 +2100,7 @@ private void corruptData(Container container, OzoneKey key)
String containreBaseDir =
container.getContainerData().getVolume().getHddsRootDir().getPath();
File chunksLocationPath = KeyValueContainerLocationUtil
- .getChunksLocationPath(containreBaseDir, clusterId, containerID);
+ .getChunksLocationPath(containreBaseDir, cluster.getClusterId(), containerID);
byte[] corruptData = "corrupted data".getBytes(UTF_8);
// Corrupt the contents of chunk files
for (File file : FileUtils.listFiles(chunksLocationPath, null, false)) {
@@ -2808,13 +2794,10 @@ public void testMultipartUploadWithACL() throws Exception {
String keyName2 = UUID.randomUUID().toString();
OzoneBucket bucket2 = client.getObjectStore().getVolume(volumeName)
.getBucket(bucketName);
- try {
- initiateMultipartUpload(bucket2, keyName2, anyReplication());
- fail("User without permission should fail");
- } catch (Exception e) {
- OMException ome = assertInstanceOf(OMException.class, e);
- assertEquals(ResultCodes.PERMISSION_DENIED, ome.getResult());
- }
+ OMException ome =
+ assertThrows(OMException.class, () -> initiateMultipartUpload(bucket2, keyName2, anyReplication()),
+ "User without permission should fail");
+ assertEquals(ResultCodes.PERMISSION_DENIED, ome.getResult());
// Add create permission for user, and try multi-upload init again
OzoneAcl acl7 = new OzoneAcl(USER, userName, ACLType.CREATE, DEFAULT);
@@ -2843,12 +2826,12 @@ public void testMultipartUploadWithACL() throws Exception {
completeMultipartUpload(bucket2, keyName2, uploadId, partsMap);
// User without permission cannot read multi-uploaded object
- try (OzoneInputStream ignored = bucket2.readKey(keyName)) {
- fail("User without permission should fail");
- } catch (Exception e) {
- OMException ome = assertInstanceOf(OMException.class, e);
- assertEquals(ResultCodes.PERMISSION_DENIED, ome.getResult());
- }
+ OMException ex = assertThrows(OMException.class, () -> {
+ try (OzoneInputStream ignored = bucket2.readKey(keyName)) {
+ LOG.error("User without permission should fail");
+ }
+ }, "User without permission should fail");
+ assertEquals(ResultCodes.PERMISSION_DENIED, ex.getResult());
}
}
@@ -3052,14 +3035,8 @@ void testAbortUploadFailWithInProgressPartUpload() throws Exception {
// Abort before completing part upload.
bucket.abortMultipartUpload(keyName, omMultipartInfo.getUploadID());
-
- try {
- ozoneOutputStream.close();
- fail("testAbortUploadFailWithInProgressPartUpload failed");
- } catch (IOException ex) {
- OMException ome = assertInstanceOf(OMException.class, ex);
- assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ome.getResult());
- }
+ OMException ome = assertThrows(OMException.class, () -> ozoneOutputStream.close());
+ assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ome.getResult());
}
@Test
@@ -3114,14 +3091,8 @@ void testCommitPartAfterCompleteUpload() throws Exception {
String part1 = new String(data, UTF_8);
sb.append(part1);
assertEquals(sb.toString(), new String(fileContent, UTF_8));
-
- try {
- ozoneOutputStream.close();
- fail("testCommitPartAfterCompleteUpload failed");
- } catch (IOException ex) {
- OMException ome = assertInstanceOf(OMException.class, ex);
- assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ome.getResult());
- }
+ OMException ex = assertThrows(OMException.class, ozoneOutputStream::close);
+ assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ex.getResult());
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
index a830c8c739d..e373b06d950 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
@@ -51,7 +51,6 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
-import java.util.UUID;
import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS;
@@ -94,8 +93,6 @@ public class TestOzoneRpcClientForAclAuditLog {
private static ObjectStore store = null;
private static StorageContainerLocationProtocolClientSideTranslatorPB
storageContainerLocationClient;
- private static String scmId = UUID.randomUUID().toString();
-
/**
* Create a MiniOzoneCluster for testing.
@@ -127,7 +124,6 @@ public static void init() throws Exception {
private static void startCluster(OzoneConfiguration conf) throws Exception {
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(3)
- .setScmId(scmId)
.build();
cluster.waitForClusterToBeReady();
ozClient = OzoneClientFactory.getRpcClient(conf);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java
index 253193c92e6..ffd80f359ff 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java
@@ -48,7 +48,6 @@
import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.common.OzoneChecksumException;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
@@ -135,8 +134,6 @@ void testGetKeyAndFileWithNetworkTopology() throws IOException {
byte[] b = new byte[value.getBytes(UTF_8).length];
is.read(b);
assertArrayEquals(b, value.getBytes(UTF_8));
- } catch (OzoneChecksumException e) {
- fail("Read key should succeed");
}
// read file with topology aware read enabled
@@ -144,8 +141,6 @@ void testGetKeyAndFileWithNetworkTopology() throws IOException {
byte[] b = new byte[value.getBytes(UTF_8).length];
is.read(b);
assertArrayEquals(b, value.getBytes(UTF_8));
- } catch (OzoneChecksumException e) {
- fail("Read file should succeed");
}
// read key with topology aware read disabled
@@ -159,8 +154,6 @@ void testGetKeyAndFileWithNetworkTopology() throws IOException {
byte[] b = new byte[value.getBytes(UTF_8).length];
is.read(b);
assertArrayEquals(b, value.getBytes(UTF_8));
- } catch (OzoneChecksumException e) {
- fail("Read key should succeed");
}
// read file with topology aware read disabled
@@ -168,8 +161,6 @@ void testGetKeyAndFileWithNetworkTopology() throws IOException {
byte[] b = new byte[value.getBytes(UTF_8).length];
is.read(b);
assertArrayEquals(b, value.getBytes(UTF_8));
- } catch (OzoneChecksumException e) {
- fail("Read file should succeed");
}
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
index 3301320c005..15af5a2d8e0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java
@@ -95,11 +95,12 @@
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED;
-import static org.junit.Assert.assertThrows;
+import static org.apache.ozone.test.GenericTestUtils.getTestStartTime;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
@@ -114,8 +115,6 @@ public class TestSecureOzoneRpcClient extends TestOzoneRpcClient {
private static StorageContainerLocationProtocolClientSideTranslatorPB
storageContainerLocationClient;
- private static final String SCM_ID = UUID.randomUUID().toString();
- private static final String CLUSTER_ID = UUID.randomUUID().toString();
private static File testDir;
private static OzoneConfiguration conf;
@@ -149,8 +148,6 @@ public static void init() throws Exception {
conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true);
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(14)
- .setScmId(SCM_ID)
- .setClusterId(CLUSTER_ID)
.setCertificateClient(certificateClientTest)
.setSecretKeyClient(new SecretKeyTestClient())
.build();
@@ -167,7 +164,6 @@ public static void init() throws Exception {
TestOzoneRpcClient.setStorageContainerLocationClient(
storageContainerLocationClient);
TestOzoneRpcClient.setStore(store);
- TestOzoneRpcClient.setClusterId(CLUSTER_ID);
}
/**
@@ -187,7 +183,7 @@ private void testPutKeySuccessWithBlockTokenWithBucketLayout(
BucketLayout bucketLayout) throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
- Instant testStartTime = Instant.now();
+ Instant testStartTime = getTestStartTime();
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
String value = "sample value";
@@ -426,7 +422,7 @@ public void testS3Auth() throws Exception {
// Add secret to S3Secret table.
s3SecretManager.storeSecret(accessKey,
- new S3SecretValue(accessKey, secret));
+ S3SecretValue.of(accessKey, secret));
OMRequest writeRequest = OMRequest.newBuilder()
.setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume)
@@ -475,7 +471,7 @@ public void testS3Auth() throws Exception {
// Override secret to S3Secret store with some dummy value
s3SecretManager
- .storeSecret(accessKey, new S3SecretValue(accessKey, "dummy"));
+ .storeSecret(accessKey, S3SecretValue.of(accessKey, "dummy"));
// Write request with invalid credentials.
omResponse = cluster.getOzoneManager().getOmServerProtocol()
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
index d03c57bf4e4..f8e9b552e3e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
@@ -22,16 +22,16 @@
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
-import java.util.Random;
+import org.apache.commons.lang3.RandomUtils;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig;
@@ -48,6 +48,7 @@
import org.apache.hadoop.hdds.scm.storage.BlockOutputStream;
import org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -69,7 +70,7 @@
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import org.apache.ratis.protocol.exceptions.GroupMismatchException;
import org.junit.jupiter.api.AfterEach;
@@ -140,13 +141,16 @@ public void init() throws Exception {
conf.setFromObject(raftClientConfig);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS);
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(9)
+
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
.setBlockSize(blockSize)
.setChunkSize(chunkSize)
.setStreamBufferFlushSize(flushSize)
.setStreamBufferMaxSize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
+ .applyTo(conf);
+
+ cluster = MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(9)
.build();
cluster.waitForClusterToBeReady();
cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.THREE, 60000);
@@ -275,25 +279,20 @@ public void testWatchForCommitForRetryfailure() throws Exception {
cluster.getStorageContainerManager()
.getPipelineManager().closePipeline(pipeline, false);
// again write data with more than max buffer limit. This wi
- try {
- // just watch for a log index which in not updated in the commitInfo Map
- // as well as there is no logIndex generate in Ratis.
- // The basic idea here is just to test if its throws an exception.
- xceiverClient
- .watchForCommit(index + new Random().nextInt(100) + 10);
- fail("expected exception not thrown");
- } catch (Exception e) {
- assertInstanceOf(ExecutionException.class, e);
- // since the timeout value is quite long, the watch request will either
- // fail with NotReplicated exceptio, RetryFailureException or
- // RuntimeException
- assertFalse(HddsClientUtils
- .checkForException(e) instanceof TimeoutException);
- // client should not attempt to watch with
- // MAJORITY_COMMITTED replication level, except the grpc IO issue
- if (!logCapturer.getOutput().contains("Connection refused")) {
- assertThat(e.getMessage()).doesNotContain("Watch-MAJORITY_COMMITTED");
- }
+ // just watch for a log index which in not updated in the commitInfo Map
+ // as well as there is no logIndex generate in Ratis.
+ // The basic idea here is just to test if its throws an exception.
+ ExecutionException e = assertThrows(ExecutionException.class,
+ () -> xceiverClient.watchForCommit(index + RandomUtils.nextInt(0, 100) + 10));
+ // since the timeout value is quite long, the watch request will either
+ // fail with NotReplicated exceptio, RetryFailureException or
+ // RuntimeException
+ assertFalse(HddsClientUtils
+ .checkForException(e) instanceof TimeoutException);
+ // client should not attempt to watch with
+ // MAJORITY_COMMITTED replication level, except the grpc IO issue
+ if (!logCapturer.getOutput().contains("Connection refused")) {
+ assertThat(e.getMessage()).doesNotContain("Watch-MAJORITY_COMMITTED");
}
clientManager.releaseClient(xceiverClient, false);
}
@@ -368,17 +367,13 @@ public void testWatchForCommitForGroupMismatchException() throws Exception {
List pipelineList = new ArrayList<>();
pipelineList.add(pipeline);
TestHelper.waitForPipelineClose(pipelineList, cluster);
- try {
- // just watch for a log index which in not updated in the commitInfo Map
- // as well as there is no logIndex generate in Ratis.
- // The basic idea here is just to test if its throws an exception.
- xceiverClient
- .watchForCommit(reply.getLogIndex() +
- new Random().nextInt(100) + 10);
- fail("Expected exception not thrown");
- } catch (Exception e) {
- assertInstanceOf(GroupMismatchException.class, HddsClientUtils.checkForException(e));
- }
+ // just watch for a log index which in not updated in the commitInfo Map
+ // as well as there is no logIndex generate in Ratis.
+ // The basic idea here is just to test if its throws an exception.
+ Exception e =
+ assertThrows(Exception.class,
+ () -> xceiverClient.watchForCommit(reply.getLogIndex() + RandomUtils.nextInt(0, 100) + 10));
+ assertInstanceOf(GroupMismatchException.class, HddsClientUtils.checkForException(e));
clientManager.releaseClient(xceiverClient, false);
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java
index 22ad4f036cf..9f5d04c56f9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java
@@ -20,11 +20,12 @@
import java.util.UUID;
import java.util.concurrent.TimeUnit;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.scm.OzoneClientConfig;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
@@ -45,7 +46,7 @@ protected static MiniOzoneCluster newCluster(
ContainerLayoutVersion containerLayout) throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
- OzoneClientConfig config = new OzoneClientConfig();
+ OzoneClientConfig config = conf.getObject(OzoneClientConfig.class);
config.setBytesPerChecksum(BYTES_PER_CHECKSUM);
conf.setFromObject(config);
@@ -63,14 +64,16 @@ protected static MiniOzoneCluster newCluster(
repConf.setInterval(Duration.ofSeconds(1));
conf.setFromObject(repConf);
- return MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(5)
- .setTotalPipelineNumLimit(5)
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
.setBlockSize(BLOCK_SIZE)
.setChunkSize(CHUNK_SIZE)
.setStreamBufferFlushSize(FLUSH_SIZE)
.setStreamBufferMaxSize(MAX_FLUSH_SIZE)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
+ .applyTo(conf);
+
+ return MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(5)
+ .setTotalPipelineNumLimit(5)
.build();
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
index 28d19d0be87..810a5725492 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
@@ -28,7 +28,7 @@
import static org.apache.hadoop.ozone.container.TestHelper.waitForContainerClose;
import static org.apache.hadoop.ozone.container.TestHelper.waitForReplicaCount;
import static org.apache.ozone.test.GenericTestUtils.setLogLevel;
-import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import java.io.IOException;
@@ -108,7 +108,7 @@ void testContainerReplication(
createTestData(client);
List keyLocations = lookupKey(cluster);
- assertFalse(keyLocations.isEmpty());
+ assertThat(keyLocations).isNotEmpty();
OmKeyLocationInfo keyLocation = keyLocations.get(0);
long containerID = keyLocation.getContainerID();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
index b50f2ac8d68..e045b48bda9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
@@ -16,11 +16,11 @@
*/
package org.apache.hadoop.ozone.container;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
@@ -34,6 +34,7 @@
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -52,7 +53,6 @@
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
-import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -72,6 +72,8 @@
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_RECOVERING_CONTAINER_TIMEOUT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_RECOVERING_CONTAINER_TIMEOUT_DEFAULT;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.doAnswer;
/**
* Tests the EC recovery and over replication processing.
@@ -138,11 +140,17 @@ public static void init() throws Exception {
TimeUnit.MILLISECONDS);
conf.setTimeDuration(HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL, 1,
TimeUnit.SECONDS);
- cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10)
- .setTotalPipelineNumLimit(10).setBlockSize(blockSize)
- .setChunkSize(chunkSize).setStreamBufferFlushSize(flushSize)
+
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
+ .setBlockSize(blockSize)
+ .setChunkSize(chunkSize)
+ .setStreamBufferFlushSize(flushSize)
.setStreamBufferMaxSize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES).build();
+ .applyTo(conf);
+
+ cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10)
+ .setTotalPipelineNumLimit(10)
+ .build();
cluster.waitForClusterToBeReady();
client = OzoneClientFactory.getRpcClient(conf);
objectStore = client.getObjectStore();
@@ -308,7 +316,7 @@ public void testECContainerRecoveryWithTimedOutRecovery() throws Exception {
.mockFieldReflection(handler,
"coordinator");
- Mockito.doAnswer(invocation -> {
+ doAnswer(invocation -> {
GenericTestUtils.waitFor(() ->
dn.getDatanodeStateMachine()
.getContainer()
@@ -320,8 +328,8 @@ public void testECContainerRecoveryWithTimedOutRecovery() throws Exception {
reconstructedDN.set(dn);
invocation.callRealMethod();
return null;
- }).when(coordinator).reconstructECBlockGroup(Mockito.any(), Mockito.any(),
- Mockito.any(), Mockito.any());
+ }).when(coordinator).reconstructECBlockGroup(any(), any(),
+ any(), any());
}
// Shutting down DN triggers close pipeline and close container.
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
index 665e07630d9..d6bb591979f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
@@ -65,7 +65,9 @@
import org.slf4j.LoggerFactory;
import static java.util.stream.Collectors.toList;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -205,7 +207,7 @@ public static void waitForContainerClose(OzoneOutputStream outputStream,
containerIdList.add(id);
}
}
- assertFalse(containerIdList.isEmpty());
+ assertThat(containerIdList).isNotEmpty();
waitForContainerClose(cluster, containerIdList.toArray(new Long[0]));
}
@@ -223,7 +225,7 @@ public static void waitForContainerClose(OzoneDataStreamOutput outputStream,
containerIdList.add(id);
}
}
- assertFalse(containerIdList.isEmpty());
+ assertThat(containerIdList).isNotEmpty();
waitForContainerClose(cluster, containerIdList.toArray(new Long[0]));
}
@@ -241,7 +243,7 @@ public static void waitForPipelineClose(OzoneOutputStream outputStream,
containerIdList.add(id);
}
}
- assertFalse(containerIdList.isEmpty());
+ assertThat(containerIdList).isNotEmpty();
waitForPipelineClose(cluster, waitForContainerCreation,
containerIdList.toArray(new Long[0]));
}
@@ -296,7 +298,7 @@ public static void waitForPipelineClose(List pipelineList,
XceiverServerSpi server =
cluster.getHddsDatanodes().get(cluster.getHddsDatanodeIndex(dn))
.getDatanodeStateMachine().getContainer().getWriteChannel();
- assertTrue(server instanceof XceiverServerRatis);
+ assertInstanceOf(XceiverServerRatis.class, server);
GenericTestUtils.waitFor(() -> !server.isExist(pipelineId),
100, 30_000);
}
@@ -313,7 +315,7 @@ public static void createPipelineOnDatanode(Pipeline pipeline,
cluster.getHddsDatanodes().get(cluster.getHddsDatanodeIndex(dn))
.getDatanodeStateMachine().getContainer()
.getWriteChannel();
- assertTrue(server instanceof XceiverServerRatis);
+ assertInstanceOf(XceiverServerRatis.class, server);
try {
server.addGroup(pipeline.getId().getProtobuf(), Collections.
unmodifiableList(pipeline.getNodes()));
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index 5dec1799b40..d5564ac2315 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -72,7 +72,6 @@
import org.apache.ozone.test.GenericTestUtils.LogCapturer;
import org.apache.ozone.test.tag.Flaky;
import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
@@ -95,14 +94,20 @@
import static java.lang.Math.max;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds
- .HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EXPIRED_CONTAINER_REPLICA_OP_SCRUB_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-import static org.apache.hadoop.ozone
- .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
/**
* Tests for Block deletion.
@@ -246,23 +251,23 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception {
}
}, 1000, 10000);
// No containers with deleted blocks
- Assertions.assertTrue(containerIdsWithDeletedBlocks.isEmpty());
+ assertThat(containerIdsWithDeletedBlocks).isEmpty();
// Delete transactionIds for the containers should be 0.
// NOTE: this test assumes that all the container is KetValueContainer. If
// other container types is going to be added, this test should be checked.
matchContainerTransactionIds();
- Assertions.assertEquals(0L,
+ assertEquals(0L,
metrics.getNumBlockDeletionTransactionCreated());
writeClient.deleteKey(keyArgs);
Thread.sleep(5000);
// The blocks should not be deleted in the DN as the container is open
- Throwable e = Assertions.assertThrows(AssertionError.class,
+ Throwable e = assertThrows(AssertionError.class,
() -> verifyBlocksDeleted(omKeyLocationInfoGroupList));
- Assertions.assertTrue(
+ assertTrue(
e.getMessage().startsWith("expected: but was:"));
- Assertions.assertEquals(0L, metrics.getNumBlockDeletionTransactionSent());
+ assertEquals(0L, metrics.getNumBlockDeletionTransactionSent());
// close the containers which hold the blocks for the key
OzoneTestUtils.closeAllContainers(scm.getEventQueue(), scm);
@@ -291,7 +296,7 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception {
}, 2000, 30000);
// Few containers with deleted blocks
- Assertions.assertFalse(containerIdsWithDeletedBlocks.isEmpty());
+ assertThat(containerIdsWithDeletedBlocks).isNotEmpty();
// Containers in the DN and SCM should have same delete transactionIds
matchContainerTransactionIds();
@@ -312,13 +317,13 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception {
cluster.restartHddsDatanode(0, true);
matchContainerTransactionIds();
- Assertions.assertEquals(metrics.getNumBlockDeletionTransactionCreated(),
+ assertEquals(metrics.getNumBlockDeletionTransactionCreated(),
metrics.getNumBlockDeletionTransactionCompleted());
- Assertions.assertTrue(metrics.getNumBlockDeletionCommandSent() >=
- metrics.getNumBlockDeletionCommandSuccess() +
+ assertThat(metrics.getNumBlockDeletionCommandSent())
+ .isGreaterThanOrEqualTo(metrics.getNumBlockDeletionCommandSuccess() +
metrics.getBNumBlockDeletionCommandFailure());
- Assertions.assertTrue(metrics.getNumBlockDeletionTransactionSent() >=
- metrics.getNumBlockDeletionTransactionFailure() +
+ assertThat(metrics.getNumBlockDeletionTransactionSent())
+ .isGreaterThanOrEqualTo(metrics.getNumBlockDeletionTransactionFailure() +
metrics.getNumBlockDeletionTransactionSuccess());
LOG.info(metrics.toString());
@@ -326,8 +331,8 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception {
for (int i = 5; i >= 0; i--) {
if (logCapturer.getOutput().contains("1(" + i + ")")) {
for (int j = 0; j <= i; j++) {
- Assertions.assertTrue(logCapturer.getOutput()
- .contains("1(" + i + ")"));
+ assertThat(logCapturer.getOutput())
+ .contains("1(" + i + ")");
}
break;
}
@@ -367,8 +372,8 @@ public void testContainerStatisticsAfterDelete() throws Exception {
final int valueSize = value.getBytes(UTF_8).length;
final int keyCount = 1;
containerInfos.stream().forEach(container -> {
- Assertions.assertEquals(valueSize, container.getUsedBytes());
- Assertions.assertEquals(keyCount, container.getNumberOfKeys());
+ assertEquals(valueSize, container.getUsedBytes());
+ assertEquals(keyCount, container.getNumberOfKeys());
});
OzoneTestUtils.closeAllContainers(scm.getEventQueue(), scm);
@@ -389,7 +394,7 @@ public void testContainerStatisticsAfterDelete() throws Exception {
containerMap.values().forEach(container -> {
KeyValueContainerData containerData =
(KeyValueContainerData)container.getContainerData();
- Assertions.assertEquals(0, containerData.getNumPendingDeletionBlocks());
+ assertEquals(0, containerData.getNumPendingDeletionBlocks());
});
});
@@ -398,7 +403,7 @@ public void testContainerStatisticsAfterDelete() throws Exception {
((EventQueue)scm.getEventQueue()).processAll(1000);
containerInfos = scm.getContainerManager().getContainers();
containerInfos.stream().forEach(container ->
- Assertions.assertEquals(HddsProtos.LifeCycleState.DELETING,
+ assertEquals(HddsProtos.LifeCycleState.DELETING,
container.getState()));
LogCapturer logCapturer = LogCapturer.captureLogs(
legacyEnabled ? LegacyReplicationManager.LOG : ReplicationManager.LOG);
@@ -422,14 +427,14 @@ public void testContainerStatisticsAfterDelete() throws Exception {
List infos = scm.getContainerManager().getContainers();
try {
infos.stream().forEach(container -> {
- Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED,
+ assertEquals(HddsProtos.LifeCycleState.DELETED,
container.getState());
try {
- Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED,
+ assertEquals(HddsProtos.LifeCycleState.DELETED,
scm.getScmMetadataStore().getContainerTable()
.get(container.containerID()).getState());
} catch (IOException e) {
- Assertions.fail(
+ fail(
"Container from SCM DB should be marked as DELETED");
}
});
@@ -477,8 +482,8 @@ public void testContainerStateAfterDNRestart() throws Exception {
final int keyCount = 1;
List containerIdList = new ArrayList<>();
containerInfos.stream().forEach(container -> {
- Assertions.assertEquals(valueSize, container.getUsedBytes());
- Assertions.assertEquals(keyCount, container.getNumberOfKeys());
+ assertEquals(valueSize, container.getUsedBytes());
+ assertEquals(keyCount, container.getNumberOfKeys());
containerIdList.add(container.getContainerID());
});
@@ -499,14 +504,14 @@ public void testContainerStateAfterDNRestart() throws Exception {
ContainerID containerId = ContainerID.valueOf(
containerInfos.get(0).getContainerID());
// Before restart container state is non-empty
- Assertions.assertFalse(getContainerFromDN(
+ assertFalse(getContainerFromDN(
cluster.getHddsDatanodes().get(0), containerId.getId())
.getContainerData().isEmpty());
// Restart DataNode
cluster.restartHddsDatanode(0, true);
// After restart also container state remains non-empty.
- Assertions.assertFalse(getContainerFromDN(
+ assertFalse(getContainerFromDN(
cluster.getHddsDatanodes().get(0), containerId.getId())
.getContainerData().isEmpty());
@@ -526,14 +531,14 @@ public void testContainerStateAfterDNRestart() throws Exception {
100, 10 * 1000);
// Container state should be empty now as key got deleted
- Assertions.assertTrue(getContainerFromDN(
+ assertTrue(getContainerFromDN(
cluster.getHddsDatanodes().get(0), containerId.getId())
.getContainerData().isEmpty());
// Restart DataNode
cluster.restartHddsDatanode(0, true);
// Container state should be empty even after restart
- Assertions.assertTrue(getContainerFromDN(
+ assertTrue(getContainerFromDN(
cluster.getHddsDatanodes().get(0), containerId.getId())
.getContainerData().isEmpty());
@@ -543,14 +548,14 @@ public void testContainerStateAfterDNRestart() throws Exception {
List infos = scm.getContainerManager().getContainers();
try {
infos.stream().forEach(container -> {
- Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED,
+ assertEquals(HddsProtos.LifeCycleState.DELETED,
container.getState());
try {
- Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED,
+ assertEquals(HddsProtos.LifeCycleState.DELETED,
scm.getScmMetadataStore().getContainerTable()
.get(container.containerID()).getState());
} catch (IOException e) {
- Assertions.fail(
+ fail(
"Container from SCM DB should be marked as DELETED");
}
});
@@ -607,8 +612,8 @@ public void testContainerDeleteWithInvalidKeyCount()
final int keyCount = 1;
List containerIdList = new ArrayList<>();
containerInfos.stream().forEach(container -> {
- Assertions.assertEquals(valueSize, container.getUsedBytes());
- Assertions.assertEquals(keyCount, container.getNumberOfKeys());
+ assertEquals(valueSize, container.getUsedBytes());
+ assertEquals(keyCount, container.getNumberOfKeys());
containerIdList.add(container.getContainerID());
});
@@ -635,7 +640,7 @@ public void testContainerDeleteWithInvalidKeyCount()
= scm.getContainerManager().getContainerReplicas(containerId);
// Ensure for all replica isEmpty are false in SCM
- Assertions.assertTrue(scm.getContainerManager().getContainerReplicas(
+ assertTrue(scm.getContainerManager().getContainerReplicas(
containerId).stream().
allMatch(replica -> !replica.isEmpty()));
@@ -680,14 +685,14 @@ public void testContainerDeleteWithInvalidKeyCount()
List infos = scm.getContainerManager().getContainers();
try {
infos.stream().forEach(container -> {
- Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED,
+ assertEquals(HddsProtos.LifeCycleState.DELETED,
container.getState());
try {
- Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED,
+ assertEquals(HddsProtos.LifeCycleState.DELETED,
scm.getScmMetadataStore().getContainerTable()
.get(container.containerID()).getState());
} catch (IOException e) {
- Assertions.fail(
+ fail(
"Container from SCM DB should be marked as DELETED");
}
});
@@ -702,7 +707,7 @@ public void testContainerDeleteWithInvalidKeyCount()
private void verifyTransactionsCommitted() throws IOException {
scm.getScmBlockManager().getDeletedBlockLog();
for (long txnID = 1; txnID <= maxTransactionId; txnID++) {
- Assertions.assertNull(
+ assertNull(
scm.getScmMetadataStore().getDeletedBlocksTXTable().get(txnID));
}
}
@@ -716,15 +721,15 @@ private void matchContainerTransactionIds() throws IOException {
for (ContainerData containerData : containerDataList) {
long containerId = containerData.getContainerID();
if (containerIdsWithDeletedBlocks.contains(containerId)) {
- Assertions.assertTrue(
- scm.getContainerInfo(containerId).getDeleteTransactionId() > 0);
+ assertThat(scm.getContainerInfo(containerId).getDeleteTransactionId())
+ .isGreaterThan(0);
maxTransactionId = max(maxTransactionId,
scm.getContainerInfo(containerId).getDeleteTransactionId());
} else {
- Assertions.assertEquals(
+ assertEquals(
scm.getContainerInfo(containerId).getDeleteTransactionId(), 0);
}
- Assertions.assertEquals(
+ assertEquals(
((KeyValueContainerData) dnContainerSet.getContainer(containerId)
.getContainerData()).getDeleteTransactionId(),
scm.getContainerInfo(containerId).getDeleteTransactionId());
@@ -741,7 +746,7 @@ private void verifyBlocksCreated(
KeyValueContainerData cData = (KeyValueContainerData) dnContainerSet
.getContainer(blockID.getContainerID()).getContainerData();
try (DBHandle db = BlockUtils.getDB(cData, conf)) {
- Assertions.assertNotNull(db.getStore().getBlockDataTable()
+ assertNotNull(db.getStore().getBlockDataTable()
.get(cData.getBlockKey(blockID.getLocalID())));
}
}, omKeyLocationInfoGroups);
@@ -763,11 +768,11 @@ private void verifyBlocksDeleted(
String blockKey = cData.getBlockKey(blockID.getLocalID());
BlockData blockData = blockDataTable.get(blockKey);
- Assertions.assertNull(blockData);
+ assertNull(blockData);
String deletingKey = cData.getDeletingBlockKey(
blockID.getLocalID());
- Assertions.assertNull(blockDataTable.get(deletingKey));
+ assertNull(blockDataTable.get(deletingKey));
}
containerIdsWithDeletedBlocks.add(blockID.getContainerID());
}, omKeyLocationInfoGroups);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
index c62f943ee87..cd25ee25c8f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
@@ -58,6 +58,7 @@
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -159,8 +160,7 @@ public void testIfCloseContainerCommandHandlerIsInvoked() throws Exception {
.waitFor(() -> isContainerClosed(cluster, containerID, datanodeDetails),
500, 5 * 1000);
// Make sure the closeContainerCommandHandler is Invoked
- assertTrue(
- closeContainerHandler.getInvocationCount() > lastInvocationCount);
+ assertThat(closeContainerHandler.getInvocationCount()).isGreaterThan(lastInvocationCount);
}
@Test
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
index ec47c76d94d..7cb3c7797fa 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
@@ -45,9 +45,10 @@
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -117,7 +118,7 @@ public void test() throws Exception {
Pipeline pipeline = cluster.getStorageContainerManager()
.getPipelineManager().getPipeline(container.getPipelineID());
- Assertions.assertFalse(isContainerClosed(cluster, containerId.getId()));
+ assertFalse(isContainerClosed(cluster, containerId.getId()));
DatanodeDetails datanodeDetails =
cluster.getHddsDatanodes().get(0).getDatanodeDetails();
@@ -135,7 +136,7 @@ public void test() throws Exception {
5 * 1000);
//double check if it's really closed (waitFor also throws an exception)
- Assertions.assertTrue(isContainerClosed(cluster, containerId.getId()));
+ assertTrue(isContainerClosed(cluster, containerId.getId()));
}
private static Boolean isContainerClosed(MiniOzoneCluster cluster,
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
index 332683658b1..00654d943f7 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
@@ -57,7 +57,6 @@
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -77,6 +76,10 @@
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests DeleteContainerCommand Handler.
@@ -165,8 +168,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue()
HddsDatanodeService hddsDatanodeService =
cluster.getHddsDatanodes().get(0);
- Assertions.assertFalse(isContainerClosed(hddsDatanodeService,
- containerId.getId()));
+ assertFalse(isContainerClosed(hddsDatanodeService, containerId.getId()));
DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails();
@@ -189,8 +191,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue()
500, 5 * 1000);
//double check if it's really closed (waitFor also throws an exception)
- Assertions.assertTrue(isContainerClosed(hddsDatanodeService,
- containerId.getId()));
+ assertTrue(isContainerClosed(hddsDatanodeService, containerId.getId()));
// Delete key, which will make isEmpty flag to true in containerData
objectStore.getVolume(volumeName)
@@ -217,8 +218,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue()
lingeringBlock.createNewFile();
// Check container exists before sending delete container command
- Assertions.assertFalse(isContainerDeleted(hddsDatanodeService,
- containerId.getId()));
+ assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId()));
// Set container blockCount to 0 to mock that it is empty as per RocksDB
getContainerfromDN(hddsDatanodeService, containerId.getId())
@@ -243,10 +243,8 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue()
contains("Files still part of the container on delete"),
500,
5 * 2000);
- Assertions.assertTrue(!isContainerDeleted(hddsDatanodeService,
- containerId.getId()));
- Assertions.assertTrue(beforeDeleteFailedCount <
- metrics.getContainerDeleteFailedNonEmpty());
+ assertTrue(!isContainerDeleted(hddsDatanodeService, containerId.getId()));
+ assertThat(beforeDeleteFailedCount).isLessThan(metrics.getContainerDeleteFailedNonEmpty());
// Send the delete command. It should pass with force flag.
// Deleting a non-empty container should pass on the DN when the force flag
// is true
@@ -260,10 +258,8 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue()
GenericTestUtils.waitFor(() ->
isContainerDeleted(hddsDatanodeService, containerId.getId()),
500, 5 * 1000);
- Assertions.assertTrue(isContainerDeleted(hddsDatanodeService,
- containerId.getId()));
- Assertions.assertTrue(beforeForceCount <
- metrics.getContainerForceDelete());
+ assertTrue(isContainerDeleted(hddsDatanodeService, containerId.getId()));
+ assertThat(beforeForceCount).isLessThan(metrics.getContainerForceDelete());
kv.setCheckChunksFilePath(false);
}
@@ -297,8 +293,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckFalse()
HddsDatanodeService hddsDatanodeService =
cluster.getHddsDatanodes().get(0);
- Assertions.assertFalse(isContainerClosed(hddsDatanodeService,
- containerId.getId()));
+ assertFalse(isContainerClosed(hddsDatanodeService, containerId.getId()));
DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails();
@@ -313,8 +308,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckFalse()
500, 5 * 1000);
//double check if it's really closed (waitFor also throws an exception)
- Assertions.assertTrue(isContainerClosed(hddsDatanodeService,
- containerId.getId()));
+ assertTrue(isContainerClosed(hddsDatanodeService, containerId.getId()));
// Delete key, which will make isEmpty flag to true in containerData
objectStore.getVolume(volumeName)
@@ -341,8 +335,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckFalse()
lingeringBlock.createNewFile();
// Check container exists before sending delete container command
- Assertions.assertFalse(isContainerDeleted(hddsDatanodeService,
- containerId.getId()));
+ assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId()));
// send delete container to the datanode
SCMCommand> command = new DeleteContainerCommand(containerId.getId(),
@@ -357,8 +350,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckFalse()
GenericTestUtils.waitFor(() ->
isContainerDeleted(hddsDatanodeService, containerId.getId()),
500, 5 * 1000);
- Assertions.assertTrue(isContainerDeleted(hddsDatanodeService,
- containerId.getId()));
+ assertTrue(isContainerDeleted(hddsDatanodeService, containerId.getId()));
}
@Test
@@ -384,8 +376,7 @@ public void testDeleteNonEmptyContainerBlockTable()
HddsDatanodeService hddsDatanodeService =
cluster.getHddsDatanodes().get(0);
- Assertions.assertFalse(isContainerClosed(hddsDatanodeService,
- containerId.getId()));
+ assertFalse(isContainerClosed(hddsDatanodeService, containerId.getId()));
DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails();
@@ -418,11 +409,11 @@ public void testDeleteNonEmptyContainerBlockTable()
500, 5 * 1000);
//double check if it's really closed (waitFor also throws an exception)
- Assertions.assertTrue(isContainerClosed(hddsDatanodeService,
+ assertTrue(isContainerClosed(hddsDatanodeService,
containerId.getId()));
// Check container exists before sending delete container command
- Assertions.assertFalse(isContainerDeleted(hddsDatanodeService,
+ assertFalse(isContainerDeleted(hddsDatanodeService,
containerId.getId()));
long containerDeleteFailedNonEmptyBlockDB =
@@ -446,10 +437,10 @@ public void testDeleteNonEmptyContainerBlockTable()
contains("the container is not empty with blockCount"),
500,
5 * 2000);
- Assertions.assertTrue(!isContainerDeleted(hddsDatanodeService,
+ assertTrue(!isContainerDeleted(hddsDatanodeService,
containerId.getId()));
- Assertions.assertTrue(containerDeleteFailedNonEmptyBlockDB <
- metrics.getContainerDeleteFailedNonEmpty());
+ assertThat(containerDeleteFailedNonEmptyBlockDB)
+ .isLessThan(metrics.getContainerDeleteFailedNonEmpty());
// Now empty the container Dir and try with a non-empty block table
Container containerToDelete = getContainerfromDN(
@@ -470,8 +461,7 @@ public void testDeleteNonEmptyContainerBlockTable()
cluster.getStorageContainerManager().getScmContext().getTermOfLeader());
nodeManager.addDatanodeCommand(datanodeDetails.getUuid(), command);
Thread.sleep(5000);
- Assertions.assertTrue(!isContainerDeleted(hddsDatanodeService,
- containerId.getId()));
+ assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId()));
// Send the delete command. It should pass with force flag.
long beforeForceCount = metrics.getContainerForceDelete();
command = new DeleteContainerCommand(containerId.getId(), true);
@@ -483,10 +473,9 @@ public void testDeleteNonEmptyContainerBlockTable()
GenericTestUtils.waitFor(() ->
isContainerDeleted(hddsDatanodeService, containerId.getId()),
500, 5 * 1000);
- Assertions.assertTrue(isContainerDeleted(hddsDatanodeService,
+ assertTrue(isContainerDeleted(hddsDatanodeService,
containerId.getId()));
- Assertions.assertTrue(beforeForceCount <
- metrics.getContainerForceDelete());
+ assertThat(beforeForceCount).isLessThan(metrics.getContainerForceDelete());
}
@Test
@@ -507,8 +496,7 @@ public void testContainerDeleteWithInvalidBlockCount()
HddsDatanodeService hddsDatanodeService =
cluster.getHddsDatanodes().get(0);
- Assertions.assertFalse(isContainerClosed(hddsDatanodeService,
- containerId.getId()));
+ assertFalse(isContainerClosed(hddsDatanodeService, containerId.getId()));
DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails();
NodeManager nodeManager =
@@ -525,12 +513,10 @@ public void testContainerDeleteWithInvalidBlockCount()
500, 5 * 1000);
//double check if it's really closed (waitFor also throws an exception)
- Assertions.assertTrue(isContainerClosed(hddsDatanodeService,
- containerId.getId()));
+ assertTrue(isContainerClosed(hddsDatanodeService, containerId.getId()));
// Check container exists before sending delete container command
- Assertions.assertFalse(isContainerDeleted(hddsDatanodeService,
- containerId.getId()));
+ assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId()));
// Clear block table
clearBlocksTable(getContainerfromDN(hddsDatanodeService,
@@ -561,8 +547,7 @@ public void testContainerDeleteWithInvalidBlockCount()
GenericTestUtils.waitFor(() ->
isContainerDeleted(hddsDatanodeService, containerId.getId()),
500, 5 * 1000);
- Assertions.assertTrue(isContainerDeleted(hddsDatanodeService,
- containerId.getId()));
+ assertTrue(isContainerDeleted(hddsDatanodeService, containerId.getId()));
}
@@ -612,8 +597,7 @@ public void testDeleteContainerRequestHandlerOnClosedContainer()
HddsDatanodeService hddsDatanodeService =
cluster.getHddsDatanodes().get(0);
- Assertions.assertFalse(isContainerClosed(hddsDatanodeService,
- containerId.getId()));
+ assertFalse(isContainerClosed(hddsDatanodeService, containerId.getId()));
DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails();
@@ -630,12 +614,10 @@ public void testDeleteContainerRequestHandlerOnClosedContainer()
500, 5 * 1000);
//double check if it's really closed (waitFor also throws an exception)
- Assertions.assertTrue(isContainerClosed(hddsDatanodeService,
- containerId.getId()));
+ assertTrue(isContainerClosed(hddsDatanodeService, containerId.getId()));
// Check container exists before sending delete container command
- Assertions.assertFalse(isContainerDeleted(hddsDatanodeService,
- containerId.getId()));
+ assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId()));
// send delete container to the datanode
SCMCommand> command = new DeleteContainerCommand(containerId.getId(),
@@ -656,8 +638,7 @@ public void testDeleteContainerRequestHandlerOnClosedContainer()
ContainerMetrics metrics =
hddsDatanodeService
.getDatanodeStateMachine().getContainer().getMetrics();
- Assertions.assertEquals(1,
- metrics.getContainerDeleteFailedNonEmpty());
+ assertEquals(1, metrics.getContainerDeleteFailedNonEmpty());
// Delete key, which will make isEmpty flag to true in containerData
objectStore.getVolume(volumeName)
@@ -678,7 +659,7 @@ public void testDeleteContainerRequestHandlerOnClosedContainer()
isContainerDeleted(hddsDatanodeService, containerId.getId()),
500, 5 * 1000);
- Assertions.assertTrue(isContainerDeleted(hddsDatanodeService,
+ assertTrue(isContainerDeleted(hddsDatanodeService,
containerId.getId()));
}
@@ -723,7 +704,7 @@ public void testDeleteContainerRequestHandlerOnOpenContainer()
}
}
- Assertions.assertFalse(isContainerDeleted(hddsDatanodeService,
+ assertFalse(isContainerDeleted(hddsDatanodeService,
containerId.getId()));
@@ -738,7 +719,7 @@ public void testDeleteContainerRequestHandlerOnOpenContainer()
isContainerDeleted(hddsDatanodeService, containerId.getId()),
500, 5 * 1000);
- Assertions.assertTrue(isContainerDeleted(hddsDatanodeService,
+ assertTrue(isContainerDeleted(hddsDatanodeService,
containerId.getId()));
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestFinalizeBlock.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestFinalizeBlock.java
index ef65977017f..e60b1581b32 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestFinalizeBlock.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestFinalizeBlock.java
@@ -17,6 +17,7 @@
package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
+import jakarta.annotation.Nonnull;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.client.ReplicationFactor;
@@ -47,7 +48,6 @@
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.ozone.test.GenericTestUtils;
-import org.jetbrains.annotations.NotNull;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.params.ParameterizedTest;
@@ -273,7 +273,7 @@ private void testRejectPutAndWriteChunkAfterFinalizeBlock(ContainerID containerI
}
}
- @NotNull
+ @Nonnull
private ContainerProtos.ContainerCommandRequestProto getFinalizeBlockRequest(
List omKeyLocationInfoGroupList,
ContainerInfo container) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java
index c47f0993099..23382b2abe6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java
@@ -30,7 +30,6 @@
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -43,6 +42,7 @@
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Test the behaviour of the datanode and scm when communicating
@@ -101,7 +101,7 @@ public void test() throws Exception {
//a new key is created, but the datanode default REFRESH_PERIOD is 1 hour,
//still the cache is updated, so the scm will eventually get the new
//used space from the datanode through node report.
- Assertions.assertTrue(cluster.getStorageContainerManager()
+ assertTrue(cluster.getStorageContainerManager()
.getScmNodeManager().getUsageInfo(datanodeDetails)
.getScmNodeStat().getScmUsed().isEqual(currentScmUsed));
@@ -116,7 +116,7 @@ public void test() throws Exception {
//after waiting for several node report , this usage info
//in SCM should be updated as we have updated the DN's cached usage info.
- Assertions.assertTrue(cluster.getStorageContainerManager()
+ assertTrue(cluster.getStorageContainerManager()
.getScmNodeManager().getUsageInfo(datanodeDetails)
.getScmNodeStat().getScmUsed().isGreater(currentScmUsed));
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
index cd6dfb171c0..8c35d5011a5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
@@ -53,8 +53,8 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import static org.apache.ratis.rpc.SupportedRpcType.GRPC;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import org.apache.ratis.protocol.RaftGroupId;
import org.apache.ratis.util.ExitUtils;
@@ -66,7 +66,6 @@
import org.apache.ratis.util.function.CheckedBiFunction;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.Assertions;
/**
* This class tests the metrics of ContainerStateMachine.
@@ -142,7 +141,7 @@ static void runContainerStateMachineMetrics(
pipeline, blockID, 1024);
ContainerCommandResponseProto response =
client.sendCommand(writeChunkRequest);
- Assertions.assertEquals(ContainerProtos.Result.SUCCESS,
+ assertEquals(ContainerProtos.Result.SUCCESS,
response.getResult());
metric = getMetrics(CSMMetrics.SOURCE_NAME +
@@ -160,7 +159,7 @@ static void runContainerStateMachineMetrics(
ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest
.getWriteChunk());
response = client.sendCommand(readChunkRequest);
- Assertions.assertEquals(ContainerProtos.Result.SUCCESS,
+ assertEquals(ContainerProtos.Result.SUCCESS,
response.getResult());
metric = getMetrics(CSMMetrics.SOURCE_NAME +
@@ -169,10 +168,10 @@ static void runContainerStateMachineMetrics(
assertCounter("NumApplyTransactionOps", 1L, metric);
applyTransactionLatency = getDoubleGauge(
"ApplyTransactionNsAvgTime", metric);
- assertTrue(applyTransactionLatency > 0.0);
+ assertThat(applyTransactionLatency).isGreaterThan(0.0);
writeStateMachineLatency = getDoubleGauge(
"WriteStateMachineDataNsAvgTime", metric);
- assertTrue(writeStateMachineLatency > 0.0);
+ assertThat(writeStateMachineLatency).isGreaterThan(0.0);
} finally {
if (client != null) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
index 0b83c650fe0..d4900bb4878 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
@@ -55,7 +55,8 @@
import static org.apache.ozone.test.MetricsAsserts.assertCounter;
import static org.apache.ozone.test.MetricsAsserts.assertQuantileGauges;
import static org.apache.ozone.test.MetricsAsserts.getMetrics;
-import org.junit.jupiter.api.Assertions;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -121,7 +122,7 @@ public void testContainerMetrics() throws Exception {
pipeline, blockID, 1024);
ContainerCommandResponseProto response =
client.sendCommand(writeChunkRequest);
- Assertions.assertEquals(ContainerProtos.Result.SUCCESS,
+ assertEquals(ContainerProtos.Result.SUCCESS,
response.getResult());
//Read Chunk
@@ -129,7 +130,7 @@ public void testContainerMetrics() throws Exception {
ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest
.getWriteChunk());
response = client.sendCommand(readChunkRequest);
- Assertions.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
+ assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
MetricsRecordBuilder containerMetrics = getMetrics(
"StorageContainerMetrics");
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java
index 06e1f933749..a1d436b3360 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java
@@ -17,14 +17,12 @@
package org.apache.hadoop.ozone.container.metrics;
-import org.apache.commons.text.WordUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
import org.apache.hadoop.ozone.container.common.statemachine.DatanodeQueueMetrics;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -32,12 +30,13 @@
import org.slf4j.LoggerFactory;
import java.io.IOException;
-import java.util.UUID;
+import static org.apache.commons.text.WordUtils.capitalize;
import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeQueueMetrics.COMMAND_DISPATCHER_QUEUE_PREFIX;
import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeQueueMetrics.STATE_CONTEXT_COMMAND_QUEUE_PREFIX;
import static org.apache.ozone.test.MetricsAsserts.getLongGauge;
import static org.apache.ozone.test.MetricsAsserts.getMetrics;
+import static org.assertj.core.api.Assertions.assertThat;
/**
* Test for queue metrics of datanodes.
@@ -47,8 +46,6 @@ public class TestDatanodeQueueMetrics {
private MiniOzoneHAClusterImpl cluster = null;
private OzoneConfiguration conf;
- private String clusterId;
- private String scmId;
private String omServiceId;
private static int numOfOMs = 3;
private String scmServiceId;
@@ -68,13 +65,9 @@ public class TestDatanodeQueueMetrics {
public void init() throws Exception {
conf = new OzoneConfiguration();
conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s");
- clusterId = UUID.randomUUID().toString();
- scmId = UUID.randomUUID().toString();
omServiceId = "om-service-test1";
scmServiceId = "scm-service-test1";
cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf)
- .setClusterId(clusterId)
- .setScmId(scmId)
.setOMServiceId(omServiceId)
.setSCMServiceId(scmServiceId)
.setNumOfStorageContainerManagers(numOfSCMs)
@@ -89,14 +82,12 @@ public void init() throws Exception {
@Test
public void testQueueMetrics() {
-
for (SCMCommandProto.Type type: SCMCommandProto.Type.values()) {
- Assertions.assertTrue(
- getGauge(STATE_CONTEXT_COMMAND_QUEUE_PREFIX +
- WordUtils.capitalize(String.valueOf(type)) + "Size") >= 0);
- Assertions.assertTrue(
- getGauge(COMMAND_DISPATCHER_QUEUE_PREFIX +
- WordUtils.capitalize(String.valueOf(type)) + "Size") >= 0);
+ String typeSize = capitalize(String.valueOf(type)) + "Size";
+ assertThat(getGauge(STATE_CONTEXT_COMMAND_QUEUE_PREFIX + typeSize))
+ .isGreaterThanOrEqualTo(0);
+ assertThat(getGauge(COMMAND_DISPATCHER_QUEUE_PREFIX + typeSize))
+ .isGreaterThanOrEqualTo(0);
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 3a94f3410df..7a64ddc5d5e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -48,7 +48,6 @@
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
@@ -91,7 +90,7 @@ public void testCreateOzoneContainer(
}
@Test
- public void testOzoneContainerStart(
+ void testOzoneContainerStart(
@TempDir File ozoneMetaDir, @TempDir File hddsNodeDir) throws Exception {
OzoneConfiguration conf = newOzoneConfiguration();
OzoneContainer container = null;
@@ -110,18 +109,12 @@ public void testOzoneContainerStart(
String clusterId = UUID.randomUUID().toString();
container.start(clusterId);
- try {
- container.start(clusterId);
- } catch (Exception e) {
- fail();
- }
+
+ container.start(clusterId);
+
+ container.stop();
container.stop();
- try {
- container.stop();
- } catch (Exception e) {
- fail();
- }
} finally {
if (container != null) {
@@ -199,7 +192,7 @@ public static void runTestOzoneContainerViaDataNode(
response = client.sendCommand(request);
int chunksCount = putBlockRequest.getPutBlock().getBlockData().
getChunksCount();
- ContainerTestHelper.verifyGetBlock(request, response, chunksCount);
+ ContainerTestHelper.verifyGetBlock(response, chunksCount);
// Delete Block and Delete Chunk are handled by BlockDeletingService
// ContainerCommandRequestProto DeleteBlock and DeleteChunk requests
@@ -367,7 +360,7 @@ public void testCloseContainer(
response = client.sendCommand(request);
int chunksCount = putBlockRequest.getPutBlock().getBlockData()
.getChunksCount();
- ContainerTestHelper.verifyGetBlock(request, response, chunksCount);
+ ContainerTestHelper.verifyGetBlock(response, chunksCount);
} finally {
if (client != null) {
client.close();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
index 841f344fc34..73910ef00ff 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
@@ -42,7 +42,6 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.ozone.test.GenericTestUtils.LogCapturer;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -81,12 +80,13 @@
import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION;
import static org.apache.ozone.test.GenericTestUtils.LogCapturer.captureLogs;
import static org.apache.ozone.test.GenericTestUtils.waitFor;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import static org.slf4j.LoggerFactory.getLogger;
@@ -257,7 +257,7 @@ public void testLongLivingClientWithCertRenews() throws Exception {
while (e.getCause() != null) {
e = e.getCause();
}
- assertTrue((e instanceof CertificateExpiredException));
+ assertInstanceOf(CertificateExpiredException.class, e);
} finally {
clientManager.releaseClient(client, true);
}
@@ -288,10 +288,12 @@ private void assertClientTrustManagerLoading(
}
private void assertClientTrustManagerFailedAndRetried(LogCapturer logs) {
- assertTrue(logs.getOutput().contains("trying to re-fetch rootCA"),
- "Check client failed first, and initiates a reload.");
- assertTrue(logs.getOutput().contains("Loading certificates for client."),
- "Check client loaded certificates.");
+ assertThat(logs.getOutput())
+ .withFailMessage("Check client failed first, and initiates a reload.")
+ .contains("trying to re-fetch rootCA");
+ assertThat(logs.getOutput())
+ .withFailMessage("Check client loaded certificates.")
+ .contains("Loading certificates for client.");
logs.clearOutput();
}
@@ -320,8 +322,8 @@ private void assertDownloadContainerFails(long containerId,
sourceDatanodes, tempFolder.resolve("tmp"), NO_COMPRESSION);
downloader.close();
assertNull(file);
- assertTrue(logCapture.getOutput().contains(
- "java.security.cert.CertificateExpiredException"));
+ assertThat(logCapture.getOutput())
+ .contains("java.security.cert.CertificateExpiredException");
}
private void assertDownloadContainerWorks(List containers,
@@ -352,20 +354,15 @@ private Token createContainer(
}
private long createAndCloseContainer(
- XceiverClientSpi client, boolean useToken) {
+ XceiverClientSpi client, boolean useToken) throws IOException {
long id = getTestContainerID();
- try {
- Token
- token = createContainer(client, useToken, id);
-
- ContainerCommandRequestProto request =
- getCloseContainer(client.getPipeline(), id, token);
- ContainerCommandResponseProto response = client.sendCommand(request);
- assertNotNull(response);
- assertSame(response.getResult(), ContainerProtos.Result.SUCCESS);
- } catch (Exception e) {
- Assertions.fail(e);
- }
+ Token token = createContainer(client, useToken, id);
+
+ ContainerCommandRequestProto request =
+ getCloseContainer(client.getPipeline(), id, token);
+ ContainerCommandResponseProto response = client.sendCommand(request);
+ assertNotNull(response);
+ assertSame(response.getResult(), ContainerProtos.Result.SUCCESS);
return id;
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerReplication.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerReplication.java
index d3d9ad55c11..08932aa4e37 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerReplication.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerReplication.java
@@ -43,6 +43,7 @@
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration;
+import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
@@ -88,13 +89,8 @@ static void setup() throws Exception {
}
@AfterAll
- static void tearDown() throws IOException {
- if (clientFactory != null) {
- clientFactory.close();
- }
- if (cluster != null) {
- cluster.shutdown();
- }
+ static void tearDown() {
+ IOUtils.closeQuietly(clientFactory, cluster);
}
@ParameterizedTest
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
index 0451ba5c98e..3c89bb12ee7 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
@@ -67,7 +67,6 @@
import org.apache.ratis.util.function.CheckedBiConsumer;
import org.apache.ratis.util.function.CheckedBiFunction;
import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
@@ -75,6 +74,7 @@
import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
import static org.apache.ratis.rpc.SupportedRpcType.GRPC;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
/**
* Test Containers.
@@ -89,6 +89,7 @@ public class TestContainerServer {
public static void setup() {
DefaultMetricsSystem.setMiniClusterMode(true);
CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, TEST_DIR);
+ CONF.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, false);
DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
caClient = new DNCertificateClient(new SecurityConfig(CONF), null,
dn, null, null, null);
@@ -170,7 +171,7 @@ static void runTestClientServer(
ContainerTestHelper
.getCreateContainerRequest(
ContainerTestHelper.getTestContainerID(), pipeline);
- Assertions.assertNotNull(request.getTraceID());
+ assertNotNull(request.getTraceID());
client.sendCommand(request);
} finally {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
index 2880d90db2f..53420c0e220 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
@@ -95,23 +95,21 @@
import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.ratis.rpc.RpcType;
-
-import static org.apache.ratis.rpc.SupportedRpcType.GRPC;
-
import org.apache.ratis.util.ExitUtils;
import org.apache.ratis.util.function.CheckedBiConsumer;
import org.apache.ratis.util.function.CheckedBiFunction;
+
+import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import static org.apache.ratis.rpc.SupportedRpcType.GRPC;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.Test;
+import static org.junit.jupiter.api.Assertions.assertThrows;
/**
* Test Container servers when security is enabled.
@@ -318,9 +316,9 @@ private static void assertFailsTokenVerification(XceiverClientSpi client,
ContainerCommandResponseProto response = client.sendCommand(request);
assertNotEquals(response.getResult(), ContainerProtos.Result.SUCCESS);
String msg = response.getMessage();
- assertTrue(msg.contains(BLOCK_TOKEN_VERIFICATION_FAILED.name()), msg);
+ assertThat(msg).contains(BLOCK_TOKEN_VERIFICATION_FAILED.name());
} else {
- final Throwable t = Assertions.assertThrows(Throwable.class,
+ final Throwable t = assertThrows(Throwable.class,
() -> client.sendCommand(request));
assertRootCauseMessage(BLOCK_TOKEN_VERIFICATION_FAILED.name(), t);
}
@@ -331,7 +329,7 @@ private static void assertRootCauseMessage(String contained, Throwable t) {
Throwable rootCause = ExceptionUtils.getRootCause(t);
assertNotNull(rootCause);
String msg = rootCause.getMessage();
- assertTrue(msg.contains(contained), msg);
+ assertThat(msg).contains(contained);
}
private static String getToken(ContainerID containerID) throws IOException {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java
index 0d65d81c5ed..ec7eb81db33 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java
@@ -36,9 +36,8 @@
import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
-import org.jetbrains.annotations.NotNull;
+import jakarta.annotation.Nonnull;
import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Named;
import org.junit.jupiter.api.Test;
@@ -62,6 +61,8 @@
import java.util.stream.Stream;
import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* This class tests `ozone debug ldb` CLI that reads from a RocksDB directory.
@@ -197,8 +198,8 @@ private static Stream scanTestCases() {
@ParameterizedTest
@MethodSource("scanTestCases")
void testLDBScan(
- @NotNull Pair tableAndOption,
- @NotNull Pair expectedExitCodeStderrPair,
+ @Nonnull Pair tableAndOption,
+ @Nonnull Pair expectedExitCodeStderrPair,
List |