+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.pipeline.choose.algorithms;
+
+import org.apache.hadoop.hdds.scm.PipelineChoosePolicy;
+import org.apache.hadoop.hdds.scm.PipelineRequestInformation;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.Deque;
+import java.util.List;
+import java.util.Objects;
+
+/**
+ * Pipeline choose policy that randomly choose pipeline with relatively
+ * lower utilization.
+ *
+ * The Algorithm is as follows, Pick 2 random pipelines from a given pool of
+ * pipelines and then pick the pipeline which has lower utilization.
+ * This leads to a higher probability of pipelines with lower utilization
+ * to be picked.
+ *
+ * For those wondering why we choose two pipelines randomly and choose the
+ * pipeline with lower utilization. There are links to this original papers in
+ * HDFS-11564.
+ * Also, the same algorithm applies to SCMContainerPlacementCapacity.
+ *
+ */
+public class CapacityPipelineChoosePolicy implements PipelineChoosePolicy {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(PipelineChoosePolicy.class);
+
+ private NodeManager nodeManager;
+
+ private final PipelineChoosePolicy healthPolicy;
+
+ public CapacityPipelineChoosePolicy() {
+ healthPolicy = new HealthyPipelineChoosePolicy();
+ }
+
+ @Override
+ public PipelineChoosePolicy init(final NodeManager scmNodeManager) {
+ this.nodeManager = scmNodeManager;
+ return this;
+ }
+
+ @Override
+ public Pipeline choosePipeline(List pipelineList,
+ PipelineRequestInformation pri) {
+ Pipeline pipeline1 = healthPolicy.choosePipeline(pipelineList, pri);
+ Pipeline pipeline2 = healthPolicy.choosePipeline(pipelineList, pri);
+
+ int result = new CapacityPipelineComparator(this)
+ .compare(pipeline1, pipeline2);
+
+ LOG.debug("Chosen the {} pipeline", result <= 0 ? "first" : "second");
+ return result <= 0 ? pipeline1 : pipeline2;
+ }
+
+ @Override
+ public int choosePipelineIndex(List pipelineList,
+ PipelineRequestInformation pri) {
+ List mutableList = new ArrayList<>(pipelineList);
+ Pipeline pipeline = choosePipeline(mutableList, pri);
+ return pipelineList.indexOf(pipeline);
+ }
+
+ /**
+ * Return a list of SCMNodeMetrics corresponding to the DataNodes in the
+ * pipeline, sorted in descending order based on scm used storage.
+ * @param pipeline pipeline
+ * @return sorted SCMNodeMetrics corresponding the pipeline
+ */
+ private Deque getSortedNodeFromPipeline(Pipeline pipeline) {
+ Deque sortedNodeStack = new ArrayDeque<>();
+ pipeline.getNodes().stream()
+ .map(nodeManager::getNodeStat)
+ .filter(Objects::nonNull)
+ .sorted()
+ .forEach(sortedNodeStack::push);
+ return sortedNodeStack;
+ }
+
+ static class CapacityPipelineComparator implements Comparator {
+ private final CapacityPipelineChoosePolicy policy;
+
+ CapacityPipelineComparator(CapacityPipelineChoosePolicy policy) {
+ this.policy = policy;
+ }
+ @Override
+ public int compare(Pipeline p1, Pipeline p2) {
+ if (p1.getId().equals(p2.getId())) {
+ LOG.debug("Compare the same pipeline {}", p1);
+ return 0;
+ }
+ Deque sortedNodes1 = policy.getSortedNodeFromPipeline(p1);
+ Deque sortedNodes2 = policy.getSortedNodeFromPipeline(p2);
+
+ // Compare the scmUsed weight of the node in the two sorted node stacks
+ LOG.debug("Compare scmUsed weight in pipelines, first : {}, second : {}",
+ sortedNodes1, sortedNodes2);
+ int result = 0;
+ int count = 0;
+ while (result == 0 &&
+ !sortedNodes1.isEmpty() && !sortedNodes2.isEmpty()) {
+ count++;
+ LOG.debug("Compare {} round", count);
+ result = sortedNodes1.pop().compareTo(sortedNodes2.pop());
+ }
+ return result;
+ }
+ }
+
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java
index d040dbe2bcaf..90736a018132 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java
@@ -22,6 +22,7 @@
import org.apache.hadoop.hdds.scm.PipelineChoosePolicy;
import org.apache.hadoop.hdds.scm.ScmConfig;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -48,14 +49,14 @@ public final class PipelineChoosePolicyFactory {
private PipelineChoosePolicyFactory() {
}
- public static PipelineChoosePolicy getPolicy(
+ public static PipelineChoosePolicy getPolicy(final NodeManager nodeManager,
ScmConfig scmConfig, boolean forEC) throws SCMException {
Class extends PipelineChoosePolicy> policyClass = null;
String policyName = forEC ? scmConfig.getECPipelineChoosePolicyName() :
scmConfig.getPipelineChoosePolicyName();
try {
policyClass = getClass(policyName, PipelineChoosePolicy.class);
- return createPipelineChoosePolicyFromClass(policyClass);
+ return createPipelineChoosePolicyFromClass(nodeManager, policyClass);
} catch (Exception e) {
Class extends PipelineChoosePolicy> defaultPolicy = forEC ?
OZONE_SCM_EC_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT :
@@ -64,13 +65,14 @@ public static PipelineChoosePolicy getPolicy(
LOG.error("Met an exception while create pipeline choose policy "
+ "for the given class {}. Fallback to the default pipeline "
+ " choose policy {}", policyName, defaultPolicy, e);
- return createPipelineChoosePolicyFromClass(defaultPolicy);
+ return createPipelineChoosePolicyFromClass(nodeManager, defaultPolicy);
}
throw e;
}
}
private static PipelineChoosePolicy createPipelineChoosePolicyFromClass(
+ final NodeManager nodeManager,
Class extends PipelineChoosePolicy> policyClass) throws SCMException {
Constructor extends PipelineChoosePolicy> constructor;
try {
@@ -86,7 +88,7 @@ private static PipelineChoosePolicy createPipelineChoosePolicyFromClass(
}
try {
- return constructor.newInstance();
+ return constructor.newInstance().init(nodeManager);
} catch (Exception e) {
throw new RuntimeException("Failed to instantiate class " +
policyClass.getCanonicalName() + " for " + e.getMessage());
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
index 7738d0e3907e..f402b9309fe4 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.TransferLeadershipRequestProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.TransferLeadershipResponseProto;
@@ -51,6 +52,9 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerReplicasRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerReplicasResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainersOnDecomNodeRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainersOnDecomNodeProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainersOnDecomNodeResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerTokenRequestProto;
@@ -92,6 +96,8 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationRequest;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse.Status;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SingleNodeQueryResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SingleNodeQueryRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartMaintenanceNodesRequestProto;
@@ -120,6 +126,7 @@
import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics;
import org.apache.hadoop.ozone.ClientVersion;
import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages;
+import org.apache.hadoop.util.ProtobufUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -460,6 +467,13 @@ public ScmContainerLocationResponse processRequest(
.setNodeQueryResponse(queryNode(request.getNodeQueryRequest(),
request.getVersion()))
.build();
+ case SingleNodeQuery:
+ return ScmContainerLocationResponse.newBuilder()
+ .setCmdType(request.getCmdType())
+ .setStatus(Status.OK)
+ .setSingleNodeQueryResponse(querySingleNode(request
+ .getSingleNodeQueryRequest()))
+ .build();
case CloseContainer:
return ScmContainerLocationResponse.newBuilder()
.setCmdType(request.getCmdType())
@@ -604,6 +618,12 @@ public ScmContainerLocationResponse processRequest(
.setDecommissionNodesResponse(decommissionNodes(
request.getDecommissionNodesRequest()))
.build();
+ case GetContainersOnDecomNode:
+ return ScmContainerLocationResponse.newBuilder()
+ .setCmdType(request.getCmdType())
+ .setStatus(Status.OK)
+ .setGetContainersOnDecomNodeResponse(getContainersOnDecomNode(request.getGetContainersOnDecomNodeRequest()))
+ .build();
case RecommissionNodes:
return ScmContainerLocationResponse.newBuilder()
.setCmdType(request.getCmdType())
@@ -866,6 +886,16 @@ public NodeQueryResponseProto queryNode(
.build();
}
+ public SingleNodeQueryResponseProto querySingleNode(
+ SingleNodeQueryRequestProto request)
+ throws IOException {
+
+ HddsProtos.Node datanode = impl.queryNode(ProtobufUtils.fromProtobuf(request.getUuid()));
+ return SingleNodeQueryResponseProto.newBuilder()
+ .setDatanode(datanode)
+ .build();
+ }
+
public SCMCloseContainerResponseProto closeContainer(
SCMCloseContainerRequestProto request)
throws IOException {
@@ -1140,6 +1170,22 @@ public DecommissionNodesResponseProto decommissionNodes(
return response.build();
}
+ public GetContainersOnDecomNodeResponseProto getContainersOnDecomNode(GetContainersOnDecomNodeRequestProto request)
+ throws IOException {
+ Map> containerMap = impl.getContainersOnDecomNode(
+ DatanodeDetails.getFromProtoBuf(request.getDatanodeDetails()));
+ List containersProtoList = new ArrayList<>();
+ for (Map.Entry> containerList : containerMap.entrySet()) {
+ List containerIdsProto = new ArrayList<>();
+ for (ContainerID id : containerList.getValue()) {
+ containerIdsProto.add(id.getProtobuf());
+ }
+ containersProtoList.add(ContainersOnDecomNodeProto.newBuilder().setName(containerList.getKey())
+ .addAllId(containerIdsProto).build());
+ }
+ return GetContainersOnDecomNodeResponseProto.newBuilder().addAllContainersOnDecomNode(containersProtoList).build();
+ }
+
public RecommissionNodesResponseProto recommissionNodes(
RecommissionNodesRequestProto request) throws IOException {
List errors =
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index 3d38fdbe8199..13bef8590b79 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -109,6 +109,7 @@
import java.util.TreeSet;
import java.util.stream.Collectors;
import java.util.stream.Stream;
+import java.util.UUID;
import static org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StorageContainerLocationProtocolService.newReflectiveBlockingService;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_DEFAULT;
@@ -587,6 +588,15 @@ public void deleteContainer(long containerID) throws IOException {
}
}
+ @Override
+ public Map> getContainersOnDecomNode(DatanodeDetails dn) throws IOException {
+ try {
+ return scm.getScmDecommissionManager().getContainersReplicatedOnNode(dn);
+ } catch (NodeNotFoundException e) {
+ throw new IOException("Failed to get containers list. Unable to find required node", e);
+ }
+ }
+
@Override
public List queryNode(
HddsProtos.NodeOperationalState opState, HddsProtos.NodeState state,
@@ -613,6 +623,27 @@ public List queryNode(
return result;
}
+ @Override
+ public HddsProtos.Node queryNode(UUID uuid)
+ throws IOException {
+ HddsProtos.Node result = null;
+ try {
+ DatanodeDetails node = scm.getScmNodeManager().getNodeByUuid(uuid);
+ if (node != null) {
+ NodeStatus ns = scm.getScmNodeManager().getNodeStatus(node);
+ result = HddsProtos.Node.newBuilder()
+ .setNodeID(node.getProtoBufMessage())
+ .addNodeStates(ns.getHealth())
+ .addNodeOperationalStates(ns.getOperationalState())
+ .build();
+ }
+ } catch (NodeNotFoundException e) {
+ throw new IOException(
+ "An unexpected error occurred querying the NodeStatus", e);
+ }
+ return result;
+ }
+
@Override
public List decommissionNodes(List nodes)
throws IOException {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 1a3ea2515f2d..046be68760c6 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -804,9 +804,9 @@ private void initializeSystemManagers(OzoneConfiguration conf,
ScmConfig scmConfig = conf.getObject(ScmConfig.class);
pipelineChoosePolicy = PipelineChoosePolicyFactory
- .getPolicy(scmConfig, false);
+ .getPolicy(scmNodeManager, scmConfig, false);
ecPipelineChoosePolicy = PipelineChoosePolicyFactory
- .getPolicy(scmConfig, true);
+ .getPolicy(scmNodeManager, scmConfig, true);
if (configurator.getWritableContainerFactory() != null) {
writableContainerFactory = configurator.getWritableContainerFactory();
} else {
diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
index 214a2ad7868a..fdd8de15b6a9 100644
--- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
+++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
@@ -48,6 +48,10 @@
Node Status
HostName
+
Used Space Percent
+
Capacity
Operational State
element.key === "USEDSPACEPERCENT").value,
+ capacity: value && value.find((element) => element.key === "CAPACITY").value,
comstate: value && value.find((element) => element.key === "COMSTATE").value,
lastheartbeat: value && value.find((element) => element.key === "LASTHEARTBEAT").value,
port: portSpec.port,
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
index be57aa8ea6a3..9292ffa865c7 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdds.scm.block;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
@@ -70,7 +71,6 @@
import java.util.List;
import java.util.Map;
import java.util.Optional;
-import java.util.Random;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
@@ -218,9 +218,8 @@ private Map> generateData(int dataSize) throws IOException {
private Map> generateData(int dataSize,
HddsProtos.LifeCycleState state) throws IOException {
Map> blockMap = new HashMap<>();
- Random random = new Random(1);
- int continerIDBase = random.nextInt(100);
- int localIDBase = random.nextInt(1000);
+ int continerIDBase = RandomUtils.nextInt(0, 100);
+ int localIDBase = RandomUtils.nextInt(0, 1000);
for (int i = 0; i < dataSize; i++) {
long containerID = continerIDBase + i;
updateContainerMetadata(containerID, state);
@@ -692,13 +691,12 @@ public void testInadequateReplicaCommit() throws Exception {
@Test
public void testRandomOperateTransactions() throws Exception {
mockContainerHealthResult(true);
- Random random = new Random();
int added = 0, committed = 0;
List blocks = new ArrayList<>();
List txIDs;
// Randomly add/get/commit/increase transactions.
for (int i = 0; i < 100; i++) {
- int state = random.nextInt(4);
+ int state = RandomUtils.nextInt(0, 4);
if (state == 0) {
addTransactions(generateData(10), true);
added += 10;
@@ -803,8 +801,7 @@ public void testDeletedBlockTransactions()
// add two transactions for same container
containerID = blocks.get(0).getContainerID();
Map> deletedBlocksMap = new HashMap<>();
- Random random = new Random();
- long localId = random.nextLong();
+ long localId = RandomUtils.nextLong();
deletedBlocksMap.put(containerID, new LinkedList<>(
Collections.singletonList(localId)));
addTransactions(deletedBlocksMap, true);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
index 39e19135efa2..3ed6ac89d6fb 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
@@ -19,9 +19,11 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
-import java.util.Random;
import java.util.stream.IntStream;
+import org.apache.commons.lang3.RandomUtils;
+import org.apache.commons.lang3.StringUtils;
+
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -48,8 +50,6 @@
import org.junit.jupiter.params.provider.MethodSource;
import org.junit.jupiter.params.provider.ValueSource;
-import org.apache.commons.lang3.StringUtils;
-
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONED;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
@@ -625,7 +625,7 @@ public void testOutOfServiceNodesNotSelected(int datanodeCount) {
for (int i = 0; i < 10; i++) {
// Set a random DN to in_service and ensure it is always picked
- int index = new Random().nextInt(dnInfos.size());
+ int index = RandomUtils.nextInt(0, dnInfos.size());
dnInfos.get(index).setNodeStatus(NodeStatus.inServiceHealthy());
try {
List datanodeDetails =
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java
index ca86cb689fb0..3f724ba44a86 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java
@@ -39,11 +39,9 @@
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
import org.apache.ratis.protocol.exceptions.NotLeaderException;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.mockito.ArgumentCaptor;
-import org.mockito.Mockito;
import java.io.IOException;
import java.util.ArrayList;
@@ -605,11 +603,11 @@ public void testUnderReplicationWithVulnerableReplicasOnUniqueOrigins() throws I
DECOMMISSIONING, State.UNHEALTHY, sequenceID);
replicas.add(unhealthyReplica);
UnderReplicatedHealthResult result = getUnderReplicatedHealthResult();
- Mockito.when(result.hasVulnerableUnhealthy()).thenReturn(true);
+ when(result.hasVulnerableUnhealthy()).thenReturn(true);
final Set>> commands = testProcessing(replicas, Collections.emptyList(),
result, 2, 1);
- Assertions.assertEquals(unhealthyReplica.getDatanodeDetails(), commands.iterator().next().getKey());
+ assertEquals(unhealthyReplica.getDatanodeDetails(), commands.iterator().next().getKey());
}
/**
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
index fe1cdcc06957..c67008c097ba 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
@@ -56,7 +56,6 @@
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.EnumSource;
import org.mockito.ArgumentCaptor;
-import org.mockito.Mockito;
import java.io.IOException;
import java.time.Instant;
@@ -530,7 +529,7 @@ public void testQuasiClosedContainerWithUnhealthyReplicaOnDecommissioningNodeWit
ContainerReplicaProto.State.UNHEALTHY);
replicas.add(unhealthy);
storeContainerAndReplicas(container, replicas);
- Mockito.when(replicationManager.getNodeStatus(any(DatanodeDetails.class)))
+ when(replicationManager.getNodeStatus(any(DatanodeDetails.class)))
.thenAnswer(invocation -> {
DatanodeDetails dn = invocation.getArgument(0);
if (dn.equals(unhealthy.getDatanodeDetails())) {
@@ -550,9 +549,9 @@ public void testQuasiClosedContainerWithUnhealthyReplicaOnDecommissioningNodeWit
assertEquals(0, repQueue.overReplicatedQueueSize());
// next, this test sets up some mocks to test if RatisUnderReplicationHandler will handle this container correctly
- Mockito.when(ratisPlacementPolicy.chooseDatanodes(anyList(), anyList(), eq(null), eq(1), anyLong(),
+ when(ratisPlacementPolicy.chooseDatanodes(anyList(), anyList(), eq(null), eq(1), anyLong(),
anyLong())).thenAnswer(invocation -> ImmutableList.of(MockDatanodeDetails.randomDatanodeDetails()));
- Mockito.when(nodeManager.getTotalDatanodeCommandCounts(any(DatanodeDetails.class), any(), any()))
+ when(nodeManager.getTotalDatanodeCommandCounts(any(DatanodeDetails.class), any(), any()))
.thenAnswer(invocation -> {
Map map = new HashMap<>();
map.put(SCMCommandProto.Type.replicateContainerCommand, 0);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java
index 8fa4c974e1ba..28eccd5211c3 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java
@@ -35,7 +35,6 @@
import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
-import org.mockito.Mockito;
import java.util.Collections;
import java.util.HashSet;
@@ -190,7 +189,7 @@ public void testReturnsTrueForQuasiClosedContainerWithVulnerableReplicaWhenAllRe
ContainerReplica unhealthy =
createContainerReplica(container.containerID(), 0, DECOMMISSIONING, State.UNHEALTHY, sequenceId);
replicas.add(unhealthy);
- Mockito.when(replicationManager.getNodeStatus(Mockito.any(DatanodeDetails.class)))
+ when(replicationManager.getNodeStatus(any(DatanodeDetails.class)))
.thenAnswer(invocation -> {
DatanodeDetails dn = invocation.getArgument(0);
if (dn.equals(unhealthy.getDatanodeDetails())) {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
index 523d4226cb43..f4002a7da1ed 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
@@ -41,7 +41,6 @@
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
-import org.mockito.Mockito;
import java.io.IOException;
import java.util.Collections;
@@ -406,7 +405,7 @@ public void testDecommissionWaitsForUnhealthyReplicaWithUniqueOriginToReplicateN
replicas.add(unhealthy);
nodeManager.setContainers(dn1, ImmutableSet.of(containerID));
- Mockito.when(repManager.getContainerReplicaCount(Mockito.eq(containerID)))
+ when(repManager.getContainerReplicaCount(eq(containerID)))
.thenReturn(new RatisContainerReplicaCount(container, replicas,
Collections.emptyList(), 2, false));
DatanodeAdminMonitorTestUtil.mockCheckContainerState(repManager, true);
@@ -430,7 +429,7 @@ public void testDecommissionWaitsForUnhealthyReplicaWithUniqueOriginToReplicateN
.setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails())
.build();
replicas.add(copyOfUnhealthyOnNewNode);
- Mockito.when(repManager.getContainerReplicaCount(Mockito.eq(containerID)))
+ when(repManager.getContainerReplicaCount(eq(containerID)))
.thenReturn(new RatisContainerReplicaCount(container, replicas,
Collections.emptyList(), 2, false));
DatanodeAdminMonitorTestUtil.mockCheckContainerState(repManager, false);
@@ -837,6 +836,50 @@ public void testCancelledNodesMovedToInService()
nodeManager.getNodeStatus(dn1).getOperationalState());
}
+ @Test
+ public void testContainersReplicatedOnDecomDnAPI()
+ throws NodeNotFoundException, ContainerNotFoundException {
+ conf.setBoolean("hdds.scm.replication.enable.legacy", false);
+
+ DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails();
+ nodeManager.register(dn1,
+ new NodeStatus(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ HddsProtos.NodeState.HEALTHY));
+
+ Set containers = new HashSet<>();
+ containers.add(ContainerID.valueOf(1));
+ containers.add(ContainerID.valueOf(2));
+ nodeManager.setContainers(dn1, containers);
+ DatanodeAdminMonitorTestUtil
+ .mockGetContainerReplicaCount(repManager,
+ true,
+ HddsProtos.LifeCycleState.CLOSED,
+ DECOMMISSIONING,
+ IN_SERVICE,
+ IN_SERVICE);
+
+ monitor.startMonitoring(dn1);
+ monitor.run();
+ assertEquals(1, monitor.getTrackedNodeCount());
+ assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ nodeManager.getNodeStatus(dn1).getOperationalState());
+ assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnderReplicated").size(), 2);
+ assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnClosed").size(), 0);
+
+ DatanodeAdminMonitorTestUtil
+ .mockGetContainerReplicaCount(repManager,
+ true,
+ HddsProtos.LifeCycleState.OPEN,
+ IN_SERVICE);
+
+ monitor.run();
+ assertEquals(1, monitor.getTrackedNodeCount());
+ assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ nodeManager.getNodeStatus(dn1).getOperationalState());
+ assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnderReplicated").size(), 0);
+ assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnClosed").size(), 2);
+ }
+
/**
* Generate a set of ContainerID, starting from an ID of zero up to the given
* count minus 1.
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
index 85a70b646739..930774a54bf3 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
@@ -86,6 +86,7 @@
import java.util.Map;
import java.util.function.Predicate;
import java.util.stream.Collectors;
+import java.util.stream.Stream;
import static java.util.Collections.emptyList;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
@@ -123,6 +124,8 @@
import static org.mockito.Mockito.eq;
import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
import org.junit.jupiter.params.provider.ValueSource;
import org.mockito.ArgumentCaptor;
import org.slf4j.Logger;
@@ -1572,6 +1575,49 @@ public void testScmStatsFromNodeReport()
}
}
+ private List generateStorageReportProto(
+ int volumeCount, UUID dnId, long capacity, long used, long remaining) {
+ List reports = new ArrayList<>(volumeCount);
+ boolean failed = true;
+ for (int x = 0; x < volumeCount; x++) {
+ String storagePath = testDir.getAbsolutePath() + "/" + dnId;
+ reports.add(HddsTestUtils
+ .createStorageReport(dnId, storagePath, capacity,
+ used, remaining, null, failed));
+ failed = !failed;
+ }
+ return reports;
+ }
+
+ private static Stream calculateStoragePercentageScenarios() {
+ return Stream.of(
+ Arguments.of(600, 65, 500, 1, "600.0B", "10.83", "5.83"),
+ Arguments.of(10000, 1000, 8800, 12, "117.2KB", "10.00", "2.00"),
+ Arguments.of(100000000, 1000, 899999, 12, "1.1GB", "0.00", "99.10"),
+ Arguments.of(10000, 1000, 0, 0, "0.0B", "N/A", "N/A"),
+ Arguments.of(0, 0, 0, 0, "0.0B", "N/A", "N/A"),
+ Arguments.of(1010, 547, 400, 5, "4.9KB", "54.16", "6.24")
+ );
+ }
+
+ @ParameterizedTest
+ @MethodSource("calculateStoragePercentageScenarios")
+ public void testCalculateStoragePercentage(long perCapacity,
+ long used, long remaining, int volumeCount, String totalCapacity,
+ String scmUsedPerc, String nonScmUsedPerc) {
+ DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
+ UUID dnId = dn.getUuid();
+ List reports = volumeCount > 0 ?
+ generateStorageReportProto(volumeCount, dnId, perCapacity,
+ used, remaining) : null;
+ String capacityResult = SCMNodeManager.calculateStorageCapacity(reports);
+ assertEquals(totalCapacity, capacityResult);
+ String[] storagePercentage = SCMNodeManager.calculateStoragePercentage(
+ reports);
+ assertEquals(scmUsedPerc, storagePercentage[0]);
+ assertEquals(nonScmUsedPerc, storagePercentage[1]);
+ }
+
/**
* Test multiple nodes sending initial heartbeat with their node report
* with multiple volumes.
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java
index 54d2ffed8284..4f86450d03e7 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java
@@ -34,7 +34,11 @@
import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub;
import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition;
+import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
+import org.apache.hadoop.hdds.scm.net.NodeSchema;
+import org.apache.hadoop.hdds.scm.net.NodeSchemaManager;
import org.apache.hadoop.hdds.scm.pipeline.WritableECContainerProvider.WritableECContainerProviderConfig;
+import org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.CapacityPipelineChoosePolicy;
import org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.HealthyPipelineChoosePolicy;
import org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.RandomPipelineChoosePolicy;
import org.apache.hadoop.hdds.utils.db.DBStore;
@@ -54,8 +58,13 @@
import java.util.Map;
import java.util.NavigableSet;
import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
import static org.apache.hadoop.hdds.conf.StorageUnit.BYTES;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA;
import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.CLOSED;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -84,7 +93,7 @@ public class TestWritableECContainerProvider {
private OzoneConfiguration conf;
private DBStore dbStore;
private SCMHAManager scmhaManager;
- private MockNodeManager nodeManager;
+ private static MockNodeManager nodeManager;
private WritableContainerProvider provider;
private ECReplicationConfig repConfig;
@@ -93,8 +102,20 @@ public class TestWritableECContainerProvider {
public static Collection policies() {
Collection policies = new ArrayList<>();
+ // init nodeManager
+ NodeSchemaManager.getInstance().init(new NodeSchema[]
+ {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA}, true);
+ NetworkTopologyImpl cluster =
+ new NetworkTopologyImpl(NodeSchemaManager.getInstance());
+ int count = 10;
+ List datanodes = IntStream.range(0, count)
+ .mapToObj(i -> MockDatanodeDetails.randomDatanodeDetails())
+ .collect(Collectors.toList());
+ nodeManager = new MockNodeManager(cluster, datanodes, false, count);
+
policies.add(new RandomPipelineChoosePolicy());
policies.add(new HealthyPipelineChoosePolicy());
+ policies.add(new CapacityPipelineChoosePolicy().init(nodeManager));
return policies;
}
@@ -110,7 +131,6 @@ void setup(@TempDir File testDir) throws IOException {
dbStore = DBStoreBuilder.createDBStore(
conf, new SCMDBDefinition());
scmhaManager = SCMHAManagerStub.getInstance(true);
- nodeManager = new MockNodeManager(true, 10);
pipelineManager =
new MockPipelineManager(dbStore, scmhaManager, nodeManager);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestCapacityPipelineChoosePolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestCapacityPipelineChoosePolicy.java
new file mode 100644
index 000000000000..421d2396bfaf
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestCapacityPipelineChoosePolicy.java
@@ -0,0 +1,107 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ozone.test;
-
-import org.junit.rules.TestRule;
-import org.junit.runner.Description;
-import org.junit.runners.model.Statement;
-
-import java.util.Objects;
-
-/**
- * Disables the delegate rule if the given system property matches a specific
- * value.
- */
-public class DisableOnProperty implements TestRule {
-
- private final TestRule delegate;
- private final boolean enabled;
-
- public DisableOnProperty(TestRule delegate, String key, String value) {
- this.delegate = delegate;
- enabled = !Objects.equals(value, System.getProperty(key, ""));
- }
-
- @Override
- public Statement apply(Statement base, Description description) {
- return enabled ? delegate.apply(base, description) : base;
- }
-}
diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/JUnit5AwareTimeout.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/JUnit5AwareTimeout.java
deleted file mode 100644
index 22840bd7a304..000000000000
--- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/JUnit5AwareTimeout.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ozone.test;
-
-import org.junit.rules.Timeout;
-
-/**
- * Disables the given JUnit4 timeout rule if JUnit5-specific timeout-mode is set
- * to "disabled".
- */
-public class JUnit5AwareTimeout extends DisableOnProperty {
-
- public JUnit5AwareTimeout(Timeout delegate) {
- super(delegate, "junit.jupiter.execution.timeout.mode", "disabled");
- }
-
-}
diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java
index 28d3b936ecab..fb74a22c6266 100644
--- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java
+++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java
@@ -47,6 +47,8 @@
/**
* Helpers for metrics source tests.
+ *
+ * Copied from Hadoop and migrated to AssertJ.
*/
public final class MetricsAsserts {
diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml
index 665b56d3ab08..5122f1d4a450 100644
--- a/hadoop-hdds/tools/pom.xml
+++ b/hadoop-hdds/tools/pom.xml
@@ -30,7 +30,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
jar
- false
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
index 7aa91cec73c8..d07e696e7ef0 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
@@ -59,6 +59,7 @@
import java.util.List;
import java.util.Map;
import java.util.Optional;
+import java.util.UUID;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_TOKEN_ENABLED;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_TOKEN_ENABLED_DEFAULT;
@@ -215,6 +216,11 @@ public ContainerWithPipeline createContainer(HddsProtos.ReplicationType type,
}
}
+ @Override
+ public Map> getContainersOnDecomNode(DatanodeDetails dn) throws IOException {
+ return storageContainerLocationClient.getContainersOnDecomNode(dn);
+ }
+
@Override
public List queryNode(
HddsProtos.NodeOperationalState opState,
@@ -225,6 +231,11 @@ public List queryNode(
queryScope, poolName, ClientVersion.CURRENT_VERSION);
}
+ @Override
+ public HddsProtos.Node queryNode(UUID uuid) throws IOException {
+ return storageContainerLocationClient.queryNode(uuid);
+ }
+
@Override
public List decommissionNodes(List hosts)
throws IOException {
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java
index 8cb2114f57db..dde4ef505477 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java
@@ -17,10 +17,12 @@
*/
package org.apache.hadoop.hdds.scm.cli.datanode;
+import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hdds.cli.GenericCli;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.cli.OzoneAdmin;
import org.apache.hadoop.hdds.cli.SubcommandWithParent;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.kohsuke.MetaInfServices;
import picocli.CommandLine;
import picocli.CommandLine.Model.CommandSpec;
@@ -46,6 +48,9 @@
@MetaInfServices(SubcommandWithParent.class)
public class DatanodeCommands implements Callable, SubcommandWithParent {
+ @CommandLine.ParentCommand
+ private OzoneAdmin parent;
+
@Spec
private CommandSpec spec;
@@ -55,6 +60,14 @@ public Void call() throws Exception {
return null;
}
+ public OzoneAdmin getParent() {
+ return parent;
+ }
+
+ @VisibleForTesting
+ public void setParent(OzoneConfiguration conf) {
+ parent = new OzoneAdmin(conf);
+ }
@Override
public Class> getParentType() {
return OzoneAdmin.class;
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java
index bbf1d8407605..be7ff94b2262 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java
@@ -17,20 +17,49 @@
*/
package org.apache.hadoop.hdds.scm.cli.datanode;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
+import com.google.gson.Gson;
+import org.apache.commons.io.IOUtils;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.cli.ScmSubcommand;
import org.apache.hadoop.hdds.scm.client.ScmClient;
+import org.apache.hadoop.hdds.server.http.HttpConfig;
+import org.apache.hadoop.hdfs.web.URLConnectionFactory;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
import picocli.CommandLine;
+import javax.net.ssl.HttpsURLConnection;
import java.io.IOException;
+import java.io.InputStream;
+import java.net.HttpURLConnection;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.Stream;
+import static java.net.HttpURLConnection.HTTP_CREATED;
+import static java.net.HttpURLConnection.HTTP_OK;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HTTP_BIND_HOST_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HTTP_BIND_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HTTPS_BIND_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.server.http.HttpConfig.getHttpPolicy;
+import static org.apache.hadoop.http.HttpServer2.HTTPS_SCHEME;
+import static org.apache.hadoop.http.HttpServer2.HTTP_SCHEME;
/**
* Handler to print decommissioning nodes status.
@@ -43,6 +72,9 @@
public class DecommissionStatusSubCommand extends ScmSubcommand {
+ @CommandLine.ParentCommand
+ private StatusSubCommand parent;
+
@CommandLine.Option(names = { "--id" },
description = "Show info by datanode UUID",
defaultValue = "")
@@ -79,10 +111,21 @@ public void execute(ScmClient scmClient) throws IOException {
decommissioningNodes.size() + " node(s)");
}
+ Map counts = getCounts();
+ int numDecomNodes;
+ Double num = (Double) counts.get("DecommissioningMaintenanceNodesTotal");
+ if (num == null) {
+ numDecomNodes = -1;
+ } else {
+ numDecomNodes = num.intValue();
+ }
for (HddsProtos.Node node : decommissioningNodes) {
DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf(
node.getNodeID());
printDetails(datanode);
+ printCounts(datanode, counts, numDecomNodes);
+ Map> containers = scmClient.getContainersOnDecomNode(datanode);
+ System.out.println(containers);
}
}
private void printDetails(DatanodeDetails datanode) {
@@ -90,4 +133,96 @@ private void printDetails(DatanodeDetails datanode) {
" (" + datanode.getNetworkLocation() + "/" + datanode.getIpAddress()
+ "/" + datanode.getHostName() + ")");
}
+
+ private void printCounts(DatanodeDetails datanode, Map counts, int numDecomNodes) {
+ try {
+ for (int i = 1; i <= numDecomNodes; i++) {
+ if (datanode.getHostName().equals(counts.get("tag.datanode." + i))) {
+ int pipelines = ((Double)counts.get("PipelinesWaitingToCloseDN." + i)).intValue();
+ int underReplicated = ((Double)counts.get("UnderReplicatedDN." + i)).intValue();
+ int unclosed = ((Double)counts.get("UnclosedContainersDN." + i)).intValue();
+ long startTime = ((Double)counts.get("StartTimeDN." + i)).longValue();
+ System.out.print("Decommission started at : ");
+ Date date = new Date(startTime);
+ DateFormat formatter = new SimpleDateFormat("dd/MM/yyyy hh:mm:ss z");
+ System.out.println(formatter.format(date));
+ System.out.println("No. of Pipelines: " + pipelines);
+ System.out.println("No. of UnderReplicated containers: " + underReplicated);
+ System.out.println("No. of Unclosed Containers: " + unclosed);
+ return;
+ }
+ }
+ System.err.println("Error getting pipeline and container counts for " + datanode.getHostName());
+ } catch (NullPointerException ex) {
+ System.err.println("Error getting pipeline and container counts for " + datanode.getHostName());
+ }
+ }
+
+ private Map getCounts() {
+ Map finalResult = new HashMap<>();
+ try {
+ StringBuffer url = new StringBuffer();
+ final OzoneConfiguration ozoneConf = parent
+ .getParent()
+ .getParent()
+ .getOzoneConf();
+ final String protocol;
+ final URLConnectionFactory connectionFactory = URLConnectionFactory.newDefaultURLConnectionFactory(ozoneConf);
+ final HttpConfig.Policy webPolicy = getHttpPolicy(ozoneConf);
+ String host;
+ InputStream inputStream;
+ int errorCode;
+
+ if (webPolicy.isHttpsEnabled()) {
+ protocol = HTTPS_SCHEME;
+ host = ozoneConf.get(OZONE_SCM_HTTPS_ADDRESS_KEY,
+ OZONE_SCM_HTTP_BIND_HOST_DEFAULT + OZONE_SCM_HTTPS_BIND_PORT_DEFAULT);
+ url.append(protocol).append("://").append(host).append("/jmx")
+ .append("?qry=Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics");
+
+ HttpsURLConnection httpsURLConnection = (HttpsURLConnection) connectionFactory
+ .openConnection(new URL(url.toString()));
+ httpsURLConnection.connect();
+ errorCode = httpsURLConnection.getResponseCode();
+ inputStream = httpsURLConnection.getInputStream();
+ } else {
+ protocol = HTTP_SCHEME;
+ host = ozoneConf.get(OZONE_SCM_HTTP_ADDRESS_KEY,
+ OZONE_SCM_HTTP_BIND_HOST_DEFAULT + OZONE_SCM_HTTP_BIND_PORT_DEFAULT);
+ url.append(protocol + "://" + host).append("/jmx")
+ .append("?qry=Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics");
+
+ HttpURLConnection httpURLConnection = (HttpURLConnection) connectionFactory
+ .openConnection(new URL(url.toString()));
+ httpURLConnection.connect();
+ errorCode = httpURLConnection.getResponseCode();
+ inputStream = httpURLConnection.getInputStream();
+ }
+
+ if ((errorCode == HTTP_OK) || (errorCode == HTTP_CREATED)) {
+ String response = IOUtils.toString(inputStream, StandardCharsets.UTF_8);
+ HashMap> result = new Gson().fromJson(response, HashMap.class);
+ finalResult = result.get("beans").get(0);
+ return finalResult;
+ } else {
+ throw new IOException("Unable to retrieve pipeline and container counts.");
+ }
+ } catch (MalformedURLException ex) {
+ System.err.println("Unable to retrieve pipeline and container counts.");
+ return finalResult;
+ } catch (IOException ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+
+ public StatusSubCommand getParent() {
+ return parent;
+ }
+
+ @VisibleForTesting
+ public void setParent(OzoneConfiguration conf) {
+ parent = new StatusSubCommand();
+ parent.setParent(conf);
+ }
+
}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java
index 23ff9176df9f..e7d3a4443831 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java
@@ -27,6 +27,7 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
+import java.util.Scanner;
/**
* Decommission one or more datanodes.
@@ -41,12 +42,26 @@ public class DecommissionSubCommand extends ScmSubcommand {
@CommandLine.Spec
private CommandLine.Model.CommandSpec spec;
- @CommandLine.Parameters(description = "List of fully qualified host names")
- private List hosts = new ArrayList<>();
+ @CommandLine.Parameters(description = "One or more host names separated by spaces. " +
+ "To read from stdin, specify '-' and supply the host names " +
+ "separated by newlines.",
+ paramLabel = "")
+ private List parameters = new ArrayList<>();
@Override
public void execute(ScmClient scmClient) throws IOException {
- if (hosts.size() > 0) {
+ if (parameters.size() > 0) {
+ List hosts;
+ // Whether to read from stdin
+ if (parameters.get(0).equals("-")) {
+ hosts = new ArrayList<>();
+ Scanner scanner = new Scanner(System.in, "UTF-8");
+ while (scanner.hasNextLine()) {
+ hosts.add(scanner.nextLine().trim());
+ }
+ } else {
+ hosts = parameters;
+ }
List errors = scmClient.decommissionNodes(hosts);
System.out.println("Started decommissioning datanode(s):\n" +
String.join("\n", hosts));
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java
index db12ee2aacb1..325e362d4f4e 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java
@@ -29,6 +29,7 @@
import java.io.IOException;
import java.util.List;
+import java.util.UUID;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@@ -82,6 +83,15 @@ public class ListInfoSubcommand extends ScmSubcommand {
@Override
public void execute(ScmClient scmClient) throws IOException {
pipelines = scmClient.listPipelines();
+ if (!Strings.isNullOrEmpty(uuid)) {
+ HddsProtos.Node node = scmClient.queryNode(UUID.fromString(uuid));
+ DatanodeWithAttributes dwa = new DatanodeWithAttributes(DatanodeDetails
+ .getFromProtoBuf(node.getNodeID()),
+ node.getNodeOperationalStates(0),
+ node.getNodeStates(0));
+ printDatanodeInfo(dwa);
+ return;
+ }
Stream allNodes = getAllNodes(scmClient).stream();
if (!Strings.isNullOrEmpty(ipaddress)) {
allNodes = allNodes.filter(p -> p.getDatanodeDetails().getIpAddress()
@@ -91,10 +101,6 @@ public void execute(ScmClient scmClient) throws IOException {
allNodes = allNodes.filter(p -> p.getDatanodeDetails().getHostName()
.compareToIgnoreCase(hostname) == 0);
}
- if (!Strings.isNullOrEmpty(uuid)) {
- allNodes = allNodes.filter(p ->
- p.getDatanodeDetails().getUuidString().equals(uuid));
- }
if (!Strings.isNullOrEmpty(nodeOperationalState)) {
allNodes = allNodes.filter(p -> p.getOpState().toString()
.compareToIgnoreCase(nodeOperationalState) == 0);
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java
index a64c400f66f1..82d263b416fb 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java
@@ -27,6 +27,7 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
+import java.util.Scanner;
/**
* Place one or more datanodes into Maintenance Mode.
@@ -41,8 +42,11 @@ public class MaintenanceSubCommand extends ScmSubcommand {
@CommandLine.Spec
private CommandLine.Model.CommandSpec spec;
- @CommandLine.Parameters(description = "List of fully qualified host names")
- private List hosts = new ArrayList<>();
+ @CommandLine.Parameters(description = "One or more host names separated by spaces. " +
+ "To read from stdin, specify '-' and supply the host names " +
+ "separated by newlines.",
+ paramLabel = "")
+ private List parameters = new ArrayList<>();
@CommandLine.Option(names = {"--end"},
description = "Automatically end maintenance after the given hours. " +
@@ -51,7 +55,18 @@ public class MaintenanceSubCommand extends ScmSubcommand {
@Override
public void execute(ScmClient scmClient) throws IOException {
- if (hosts.size() > 0) {
+ if (parameters.size() > 0) {
+ List hosts;
+ // Whether to read from stdin
+ if (parameters.get(0).equals("-")) {
+ hosts = new ArrayList<>();
+ Scanner scanner = new Scanner(System.in, "UTF-8");
+ while (scanner.hasNextLine()) {
+ hosts.add(scanner.nextLine().trim());
+ }
+ } else {
+ hosts = parameters;
+ }
List errors =
scmClient.startMaintenanceNodes(hosts, endInHours);
System.out.println("Entering maintenance mode on datanode(s):\n" +
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java
index 61f7826cf647..e21d61ed3d7f 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java
@@ -27,6 +27,7 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
+import java.util.Scanner;
/**
* Recommission one or more datanodes.
@@ -42,12 +43,26 @@ public class RecommissionSubCommand extends ScmSubcommand {
@CommandLine.Spec
private CommandLine.Model.CommandSpec spec;
- @CommandLine.Parameters(description = "List of fully qualified host names")
- private List hosts = new ArrayList<>();
+ @CommandLine.Parameters(description = "One or more host names separated by spaces. " +
+ "To read from stdin, specify '-' and supply the host names " +
+ "separated by newlines.",
+ paramLabel = "")
+ private List parameters = new ArrayList<>();
@Override
public void execute(ScmClient scmClient) throws IOException {
- if (hosts.size() > 0) {
+ if (parameters.size() > 0) {
+ List hosts;
+ // Whether to read from stdin
+ if (parameters.get(0).equals("-")) {
+ hosts = new ArrayList<>();
+ Scanner scanner = new Scanner(System.in, "UTF-8");
+ while (scanner.hasNextLine()) {
+ hosts.add(scanner.nextLine().trim());
+ }
+ } else {
+ hosts = parameters;
+ }
List errors = scmClient.recommissionNodes(hosts);
System.out.println("Started recommissioning datanode(s):\n" +
String.join("\n", hosts));
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java
index 9edcd3425a0d..9f5892f04bca 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java
@@ -1,4 +1,3 @@
-package org.apache.hadoop.hdds.scm.cli.datanode;
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@@ -16,10 +15,13 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+package org.apache.hadoop.hdds.scm.cli.datanode;
+import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hdds.cli.GenericCli;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.cli.SubcommandWithParent;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.kohsuke.MetaInfServices;
import picocli.CommandLine;
import picocli.CommandLine.Command;
@@ -40,6 +42,8 @@
@MetaInfServices(SubcommandWithParent.class)
public class StatusSubCommand implements Callable, SubcommandWithParent {
+ @CommandLine.ParentCommand
+ private DatanodeCommands parent;
@CommandLine.Spec
private CommandLine.Model.CommandSpec spec;
@@ -49,6 +53,16 @@ public Void call() throws Exception {
return null;
}
+ public DatanodeCommands getParent() {
+ return parent;
+ }
+
+ @VisibleForTesting
+ public void setParent(OzoneConfiguration conf) {
+ parent = new DatanodeCommands();
+ parent.setParent(conf);
+ }
+
@Override
public Class> getParentType() {
return DatanodeCommands.class;
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java
index 902ee5e7a8d1..28dc60db8fc0 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java
@@ -17,8 +17,19 @@
*/
package org.apache.hadoop.hdds.scm.cli.datanode;
+import com.sun.net.httpserver.HttpExchange;
+import com.sun.net.httpserver.HttpHandler;
+import com.sun.net.httpserver.HttpServer;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.client.ScmClient;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -27,14 +38,18 @@
import java.io.IOException;
import java.io.PrintStream;
import java.io.UnsupportedEncodingException;
+import java.net.InetSocketAddress;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import java.util.UUID;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import picocli.CommandLine;
+import sun.net.www.protocol.http.HttpURLConnection;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -55,12 +70,47 @@ public class TestDecommissionStatusSubCommand {
private final PrintStream originalErr = System.err;
private DecommissionStatusSubCommand cmd;
private List nodes = getNodeDetails(2);
+ private Map> containerOnDecom = getContainersOnDecomNodes();
+ private static HttpServer httpServer;
+ private static OzoneConfiguration conf;
+
+ @BeforeAll
+ public static void setupScmHttp() throws Exception {
+ httpServer = HttpServer.create(new InetSocketAddress(15000), 0);
+ httpServer.createContext("/jmx", new HttpHandler() {
+ public void handle(HttpExchange exchange) throws IOException {
+ byte[] response = ("{ \"beans\" : [ { " +
+ "\"name\" : \"Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics\", " +
+ "\"modelerType\" : \"NodeDecommissionMetrics\", \"DecommissioningMaintenanceNodesTotal\" : 0, " +
+ "\"RecommissionNodesTotal\" : 0, \"PipelinesWaitingToCloseTotal\" : 0, " +
+ "\"ContainersUnderReplicatedTotal\" : 0, \"ContainersUnClosedTotal\" : 0, " +
+ "\"ContainersSufficientlyReplicatedTotal\" : 0 } ]}").getBytes();
+ exchange.sendResponseHeaders(HttpURLConnection.HTTP_OK, response.length);
+ exchange.getResponseBody().write(response);
+ exchange.close();
+ }
+ });
+ httpServer.start();
+ }
+
+ @AfterAll
+ public static void shutdownScmHttp() {
+ if (httpServer != null) {
+ httpServer.stop(0);
+ }
+ }
@BeforeEach
public void setup() throws UnsupportedEncodingException {
cmd = new DecommissionStatusSubCommand();
System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING));
System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING));
+ conf = new OzoneConfiguration();
+ HttpConfig.Policy policy = HttpConfig.Policy.HTTP_ONLY;
+ conf.set(OzoneConfigKeys.OZONE_HTTP_POLICY_KEY, policy.name());
+ conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "localhost:15000");
+ conf.set(ScmConfigKeys.OZONE_SCM_HTTP_BIND_HOST_KEY, "localhost");
+ cmd.setParent(conf);
}
@AfterEach
@@ -74,6 +124,7 @@ public void testSuccessWhenDecommissionStatus() throws IOException {
ScmClient scmClient = mock(ScmClient.class);
when(scmClient.queryNode(any(), any(), any(), any()))
.thenAnswer(invocation -> nodes); // 2 nodes decommissioning
+ when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom);
cmd.execute(scmClient);
Pattern p = Pattern.compile("Decommission\\sStatus:\\s" +
@@ -85,9 +136,15 @@ public void testSuccessWhenDecommissionStatus() throws IOException {
p = Pattern.compile("Datanode:\\s.*host0\\)");
m = p.matcher(outContent.toString(DEFAULT_ENCODING));
assertTrue(m.find());
+ p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
p = Pattern.compile("Datanode:\\s.*host1\\)");
m = p.matcher(outContent.toString(DEFAULT_ENCODING));
assertTrue(m.find());
+ p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
}
@Test
@@ -96,6 +153,7 @@ public void testNoNodesWhenDecommissionStatus() throws IOException {
// No nodes in decommissioning. No error is printed
when(scmClient.queryNode(any(), any(), any(), any()))
.thenReturn(new ArrayList<>());
+ when(scmClient.getContainersOnDecomNode(any())).thenReturn(new HashMap<>());
cmd.execute(scmClient);
Pattern p = Pattern.compile("Decommission\\sStatus:\\s" +
@@ -117,6 +175,7 @@ public void testIdOptionDecommissionStatusSuccess() throws IOException {
ScmClient scmClient = mock(ScmClient.class);
when(scmClient.queryNode(any(), any(), any(), any()))
.thenAnswer(invocation -> nodes); // 2 nodes decommissioning
+ when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom);
CommandLine c = new CommandLine(cmd);
c.parseArgs("--id", nodes.get(0).getNodeID().getUuid());
@@ -125,11 +184,17 @@ public void testIdOptionDecommissionStatusSuccess() throws IOException {
Pattern p = Pattern.compile("Datanode:\\s.*host0\\)", Pattern.MULTILINE);
Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING));
assertTrue(m.find());
+ p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
// as uuid of only host0 is passed, host1 should NOT be displayed
p = Pattern.compile("Datanode:\\s.*host1.\\)", Pattern.MULTILINE);
m = p.matcher(outContent.toString(DEFAULT_ENCODING));
assertFalse(m.find());
+ p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertFalse(m.find());
}
@Test
@@ -137,6 +202,10 @@ public void testIdOptionDecommissionStatusFail() throws IOException {
ScmClient scmClient = mock(ScmClient.class);
when(scmClient.queryNode(any(), any(), any(), any()))
.thenAnswer(invocation -> nodes.subList(0, 1)); // host0 decommissioning
+ when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(0).getNodeID())))
+ .thenReturn(containerOnDecom);
+ when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(1).getNodeID())))
+ .thenReturn(new HashMap<>());
CommandLine c = new CommandLine(cmd);
c.parseArgs("--id", nodes.get(1).getNodeID().getUuid());
@@ -161,6 +230,7 @@ public void testIpOptionDecommissionStatusSuccess() throws IOException {
ScmClient scmClient = mock(ScmClient.class);
when(scmClient.queryNode(any(), any(), any(), any()))
.thenAnswer(invocation -> nodes); // 2 nodes decommissioning
+ when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom);
CommandLine c = new CommandLine(cmd);
c.parseArgs("--ip", nodes.get(1).getNodeID().getIpAddress());
@@ -169,11 +239,17 @@ public void testIpOptionDecommissionStatusSuccess() throws IOException {
Pattern p = Pattern.compile("Datanode:\\s.*host1\\)", Pattern.MULTILINE);
Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING));
assertTrue(m.find());
+ p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
// as IpAddress of only host1 is passed, host0 should NOT be displayed
p = Pattern.compile("Datanode:\\s.*host0.\\)", Pattern.MULTILINE);
m = p.matcher(outContent.toString(DEFAULT_ENCODING));
assertFalse(m.find());
+ p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertFalse(m.find());
}
@Test
@@ -181,6 +257,10 @@ public void testIpOptionDecommissionStatusFail() throws IOException {
ScmClient scmClient = mock(ScmClient.class);
when(scmClient.queryNode(any(), any(), any(), any()))
.thenAnswer(invocation -> nodes.subList(0, 1)); // host0 decommissioning
+ when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(0).getNodeID())))
+ .thenReturn(containerOnDecom);
+ when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(1).getNodeID())))
+ .thenReturn(new HashMap<>());
CommandLine c = new CommandLine(cmd);
c.parseArgs("--ip", nodes.get(1).getNodeID().getIpAddress());
@@ -200,7 +280,6 @@ public void testIpOptionDecommissionStatusFail() throws IOException {
assertFalse(m.find());
}
-
private List getNodeDetails(int n) {
List nodesList = new ArrayList<>();
@@ -225,4 +304,19 @@ private List getNodeDetails(int n) {
return nodesList;
}
+ private Map> getContainersOnDecomNodes() {
+ Map> containerMap = new HashMap<>();
+ List underReplicated = new ArrayList<>();
+ underReplicated.add(new ContainerID(1L));
+ underReplicated.add(new ContainerID(2L));
+ underReplicated.add(new ContainerID(3L));
+ containerMap.put("UnderReplicated", underReplicated);
+ List unclosed = new ArrayList<>();
+ unclosed.add(new ContainerID(10L));
+ unclosed.add(new ContainerID(11L));
+ unclosed.add(new ContainerID(12L));
+ containerMap.put("UnClosed", unclosed);
+ return containerMap;
+ }
+
}
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java
index 7e5b857d179c..afce23b5fd54 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java
@@ -23,6 +23,7 @@
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
@@ -47,6 +48,7 @@
public class TestDecommissionSubCommand {
private DecommissionSubCommand cmd;
+ private ScmClient scmClient;
private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
private final ByteArrayOutputStream errContent = new ByteArrayOutputStream();
private final PrintStream originalOut = System.out;
@@ -56,6 +58,7 @@ public class TestDecommissionSubCommand {
@BeforeEach
public void setup() throws UnsupportedEncodingException {
cmd = new DecommissionSubCommand();
+ scmClient = mock(ScmClient.class);
System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING));
System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING));
}
@@ -66,9 +69,37 @@ public void tearDown() {
System.setErr(originalErr);
}
+ @Test
+ public void testMultipleHostnamesCanBeReadFromStdin() throws Exception {
+ when(scmClient.decommissionNodes(anyList()))
+ .thenAnswer(invocation -> new ArrayList());
+
+ String input = "host1\nhost2\nhost3\n";
+ System.setIn(new ByteArrayInputStream(input.getBytes(DEFAULT_ENCODING)));
+ CommandLine c = new CommandLine(cmd);
+ c.parseArgs("-");
+ cmd.execute(scmClient);
+
+ Pattern p = Pattern.compile(
+ "^Started\\sdecommissioning\\sdatanode\\(s\\)", Pattern.MULTILINE);
+ Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+
+ p = Pattern.compile("^host1$", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+
+ p = Pattern.compile("^host2$", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+
+ p = Pattern.compile("^host3$", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+ }
+
@Test
public void testNoErrorsWhenDecommissioning() throws IOException {
- ScmClient scmClient = mock(ScmClient.class);
when(scmClient.decommissionNodes(anyList()))
.thenAnswer(invocation -> new ArrayList());
@@ -92,7 +123,6 @@ public void testNoErrorsWhenDecommissioning() throws IOException {
@Test
public void testErrorsReportedWhenDecommissioning() throws IOException {
- ScmClient scmClient = mock(ScmClient.class);
when(scmClient.decommissionNodes(anyList()))
.thenAnswer(invocation -> {
ArrayList e = new ArrayList<>();
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java
index b6ae0a8ff4fa..1247b783b5cd 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java
@@ -32,6 +32,7 @@
import java.util.UUID;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import picocli.CommandLine;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.mock;
@@ -101,6 +102,32 @@ public void testDataNodeOperationalStateAndHealthIncludedInOutput()
assertTrue(m.find());
}
+ @Test
+ public void testDataNodeByUuidOutput()
+ throws Exception {
+ List nodes = getNodeDetails();
+
+ ScmClient scmClient = mock(ScmClient.class);
+ when(scmClient.queryNode(any()))
+ .thenAnswer(invocation -> nodes.get(0));
+ when(scmClient.listPipelines())
+ .thenReturn(new ArrayList<>());
+
+ CommandLine c = new CommandLine(cmd);
+ c.parseArgs("--id", nodes.get(0).getNodeID().getUuid());
+ cmd.execute(scmClient);
+
+ Pattern p = Pattern.compile(
+ "^Operational State:\\s+IN_SERVICE$", Pattern.MULTILINE);
+ Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+
+ p = Pattern.compile(nodes.get(0).getNodeID().getUuid().toString(),
+ Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+ }
+
private List getNodeDetails() {
List nodes = new ArrayList<>();
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java
index d3f7f026ddb9..694ba0e282c6 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java
@@ -23,6 +23,7 @@
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
@@ -48,6 +49,7 @@
public class TestMaintenanceSubCommand {
private MaintenanceSubCommand cmd;
+ private ScmClient scmClient;
private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
private final ByteArrayOutputStream errContent = new ByteArrayOutputStream();
private final PrintStream originalOut = System.out;
@@ -57,6 +59,7 @@ public class TestMaintenanceSubCommand {
@BeforeEach
public void setup() throws UnsupportedEncodingException {
cmd = new MaintenanceSubCommand();
+ scmClient = mock(ScmClient.class);
System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING));
System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING));
}
@@ -67,9 +70,37 @@ public void tearDown() {
System.setErr(originalErr);
}
+ @Test
+ public void testMultipleHostnamesCanBeReadFromStdin() throws Exception {
+ when(scmClient.decommissionNodes(anyList()))
+ .thenAnswer(invocation -> new ArrayList());
+
+ String input = "host1\nhost2\nhost3\n";
+ System.setIn(new ByteArrayInputStream(input.getBytes(DEFAULT_ENCODING)));
+ CommandLine c = new CommandLine(cmd);
+ c.parseArgs("-");
+ cmd.execute(scmClient);
+
+ Pattern p = Pattern.compile(
+ "^Entering\\smaintenance\\smode\\son\\sdatanode\\(s\\)", Pattern.MULTILINE);
+ Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+
+ p = Pattern.compile("^host1$", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+
+ p = Pattern.compile("^host2$", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+
+ p = Pattern.compile("^host3$", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+ }
+
@Test
public void testNoErrorsWhenEnteringMaintenance() throws IOException {
- ScmClient scmClient = mock(ScmClient.class);
when(scmClient.startMaintenanceNodes(anyList(), anyInt()))
.thenAnswer(invocation -> new ArrayList());
@@ -94,7 +125,6 @@ public void testNoErrorsWhenEnteringMaintenance() throws IOException {
@Test
public void testErrorsReportedWhenEnteringMaintenance() throws IOException {
- ScmClient scmClient = mock(ScmClient.class);
when(scmClient.startMaintenanceNodes(anyList(), anyInt()))
.thenAnswer(invocation -> {
ArrayList e = new ArrayList<>();
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java
index 41ce0d90cb78..7f4dbec77344 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java
@@ -23,6 +23,7 @@
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
@@ -47,6 +48,7 @@
public class TestRecommissionSubCommand {
private RecommissionSubCommand cmd;
+ private ScmClient scmClient;
private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
private final ByteArrayOutputStream errContent = new ByteArrayOutputStream();
private final PrintStream originalOut = System.out;
@@ -56,6 +58,7 @@ public class TestRecommissionSubCommand {
@BeforeEach
public void setup() throws UnsupportedEncodingException {
cmd = new RecommissionSubCommand();
+ scmClient = mock(ScmClient.class);
System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING));
System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING));
}
@@ -66,9 +69,37 @@ public void tearDown() {
System.setErr(originalErr);
}
+ @Test
+ public void testMultipleHostnamesCanBeReadFromStdin() throws Exception {
+ when(scmClient.decommissionNodes(anyList()))
+ .thenAnswer(invocation -> new ArrayList());
+
+ String input = "host1\nhost2\nhost3\n";
+ System.setIn(new ByteArrayInputStream(input.getBytes(DEFAULT_ENCODING)));
+ CommandLine c = new CommandLine(cmd);
+ c.parseArgs("-");
+ cmd.execute(scmClient);
+
+ Pattern p = Pattern.compile(
+ "^Started\\srecommissioning\\sdatanode\\(s\\)", Pattern.MULTILINE);
+ Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+
+ p = Pattern.compile("^host1$", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+
+ p = Pattern.compile("^host2$", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+
+ p = Pattern.compile("^host3$", Pattern.MULTILINE);
+ m = p.matcher(outContent.toString(DEFAULT_ENCODING));
+ assertTrue(m.find());
+ }
+
@Test
public void testNoErrorsWhenRecommissioning() throws IOException {
- ScmClient scmClient = mock(ScmClient.class);
when(scmClient.recommissionNodes(anyList()))
.thenAnswer(invocation -> new ArrayList());
@@ -92,7 +123,6 @@ public void testNoErrorsWhenRecommissioning() throws IOException {
@Test
public void testErrorsReportedWhenRecommissioning() throws IOException {
- ScmClient scmClient = mock(ScmClient.class);
when(scmClient.recommissionNodes(anyList()))
.thenAnswer(invocation -> {
ArrayList e = new ArrayList<>();
diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml
index a5598311c4c5..a5a436436183 100644
--- a/hadoop-ozone/client/pom.xml
+++ b/hadoop-ozone/client/pom.xml
@@ -28,7 +28,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
Apache Ozone Clientjar
- false
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index 441d9143b598..ca885b3b6b06 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -413,6 +413,12 @@ public void setListCacheSize(int listCacheSize) {
this.listCacheSize = listCacheSize;
}
+ @Deprecated
+ public void setEncryptionKey(String bekName) throws IOException {
+ proxy.setEncryptionKey(volumeName, name, bekName);
+ encryptionKeyName = bekName;
+ }
+
/**
* Creates a new key in the bucket, with default replication type RATIS and
* with replication factor THREE.
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 5316f7a99e9f..e455e3040adb 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -997,6 +997,24 @@ void setBucketQuota(String volumeName, String bucketName,
void setReplicationConfig(String volumeName, String bucketName,
ReplicationConfig replicationConfig) throws IOException;
+ /**
+ * Set Bucket Encryption Key (BEK).
+ *
+ * @param volumeName
+ * @param bucketName
+ * @param bekName
+ * @throws IOException
+ * @deprecated This functionality is deprecated as it is not intended for
+ * users to reset bucket encryption under normal circumstances and may be
+ * removed in the future. Users are advised to exercise caution and consider
+ * alternative approaches for managing bucket encryption unless HDDS-7449 or
+ * HDDS-7526 is encountered. As a result, the setter methods for this
+ * functionality have been marked as deprecated.
+ */
+ @Deprecated
+ void setEncryptionKey(String volumeName, String bucketName,
+ String bekName) throws IOException;
+
/**
* Returns OzoneKey that contains the application generated/visible
* metadata for an Ozone Object.
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 850ae0d19376..e14ae5828d70 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -1213,6 +1213,22 @@ public void setBucketQuota(String volumeName, String bucketName,
}
+ @Deprecated
+ @Override
+ public void setEncryptionKey(String volumeName, String bucketName,
+ String bekName) throws IOException {
+ verifyVolumeName(volumeName);
+ verifyBucketName(bucketName);
+ OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
+ BucketEncryptionKeyInfo bek = new BucketEncryptionKeyInfo.Builder()
+ .setKeyName(bekName).build();
+ builder.setVolumeName(volumeName)
+ .setBucketName(bucketName)
+ .setBucketEncryptionKey(bek);
+ OmBucketArgs finalArgs = builder.build();
+ ozoneManagerClient.setBucketProperty(finalArgs);
+ }
+
@Override
public void setReplicationConfig(
String volumeName, String bucketName, ReplicationConfig replicationConfig)
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java
index 5cf4401bae25..6162f1ae5a41 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.ozone.client.checksum;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.util.DataChecksum;
@@ -27,7 +28,6 @@
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
-import java.util.Random;
import static org.apache.hadoop.hdds.scm.OzoneClientConfig.ChecksumCombineMode.COMPOSITE_CRC;
import static org.apache.hadoop.hdds.scm.OzoneClientConfig.ChecksumCombineMode.MD5MD5CRC;
@@ -40,9 +40,8 @@ public class TestReplicatedBlockChecksumComputer {
@Test
public void testComputeMd5Crc() throws IOException {
final int lenOfBytes = 32;
- byte[] randomChunkChecksum = new byte[lenOfBytes];
- Random r = new Random();
- r.nextBytes(randomChunkChecksum);
+ byte[] randomChunkChecksum = RandomUtils.nextBytes(lenOfBytes);
+
MD5Hash emptyBlockMD5 = MD5Hash.digest(randomChunkChecksum);
byte[] emptyBlockMD5Hash = emptyBlockMD5.getDigest();
AbstractBlockChecksumComputer computer =
@@ -56,9 +55,7 @@ public void testComputeMd5Crc() throws IOException {
@Test
public void testComputeCompositeCrc() throws IOException {
final int lenOfBytes = 32;
- byte[] randomChunkChecksum = new byte[lenOfBytes];
- Random r = new Random();
- r.nextBytes(randomChunkChecksum);
+ byte[] randomChunkChecksum = RandomUtils.nextBytes(lenOfBytes);
CrcComposer crcComposer =
CrcComposer.newCrcComposer(DataChecksum.Type.CRC32C, 4);
diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml
index 813edcb7d714..4af3fb18523d 100644
--- a/hadoop-ozone/common/pom.xml
+++ b/hadoop-ozone/common/pom.xml
@@ -29,7 +29,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
jar
- false
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index 5dd7579eb916..58f341b74aca 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -390,18 +390,6 @@ private OMConfigKeys() {
public static final String OZONE_DIR_DELETING_SERVICE_INTERVAL_DEFAULT
= "60s";
- /**
- * Configuration properties for Snapshot Directory Service.
- */
- public static final String OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL =
- "ozone.snapshot.directory.service.interval";
- public static final String OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL_DEFAULT
- = "24h";
- public static final String OZONE_SNAPSHOT_DIRECTORY_SERVICE_TIMEOUT =
- "ozone.snapshot.directory.service.timeout";
- public static final String
- OZONE_SNAPSHOT_DIRECTORY_SERVICE_TIMEOUT_DEFAULT = "300s";
-
public static final String OZONE_PATH_DELETING_LIMIT_PER_TASK =
"ozone.path.deleting.limit.per.task";
// default is 6000 taking account of 32MB buffer size, and assuming
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
index f8c752aab271..e382377dff45 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketArgs;
import com.google.common.base.Preconditions;
+import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
/**
* A class that encapsulates Bucket Arguments.
@@ -50,6 +51,10 @@ public final class OmBucketArgs extends WithMetadata implements Auditable {
*/
private StorageType storageType;
+ /**
+ * Bucket encryption key info if encryption is enabled.
+ */
+ private BucketEncryptionKeyInfo bekInfo;
private long quotaInBytes = OzoneConsts.QUOTA_RESET;
private long quotaInNamespace = OzoneConsts.QUOTA_RESET;
private boolean quotaInBytesSet = false;
@@ -150,6 +155,10 @@ public DefaultReplicationConfig getDefaultReplicationConfig() {
return defaultReplicationConfig;
}
+ public BucketEncryptionKeyInfo getBucketEncryptionKeyInfo() {
+ return bekInfo;
+ }
+
/**
* Sets the Bucket default replication config.
*/
@@ -168,6 +177,12 @@ private void setQuotaInNamespace(long quotaInNamespace) {
this.quotaInNamespace = quotaInNamespace;
}
+ @Deprecated
+ private void setBucketEncryptionKey(
+ BucketEncryptionKeyInfo bucketEncryptionKey) {
+ this.bekInfo = bucketEncryptionKey;
+ }
+
/**
* Returns Bucket Owner Name.
*
@@ -216,6 +231,7 @@ public static class Builder {
private long quotaInBytes;
private boolean quotaInNamespaceSet = false;
private long quotaInNamespace;
+ private BucketEncryptionKeyInfo bekInfo;
private DefaultReplicationConfig defaultReplicationConfig;
private String ownerName;
/**
@@ -241,6 +257,12 @@ public Builder setIsVersionEnabled(Boolean versionFlag) {
return this;
}
+ @Deprecated
+ public Builder setBucketEncryptionKey(BucketEncryptionKeyInfo info) {
+ this.bekInfo = info;
+ return this;
+ }
+
public Builder addMetadata(Map metadataMap) {
this.metadata = metadataMap;
return this;
@@ -291,6 +313,9 @@ public OmBucketArgs build() {
if (quotaInNamespaceSet) {
omBucketArgs.setQuotaInNamespace(quotaInNamespace);
}
+ if (bekInfo != null && bekInfo.getKeyName() != null) {
+ omBucketArgs.setBucketEncryptionKey(bekInfo);
+ }
return omBucketArgs;
}
}
@@ -322,6 +347,11 @@ public BucketArgs getProtobuf() {
if (ownerName != null) {
builder.setOwnerName(ownerName);
}
+
+ if (bekInfo != null && bekInfo.getKeyName() != null) {
+ builder.setBekInfo(OMPBHelper.convert(bekInfo));
+ }
+
return builder.build();
}
@@ -355,6 +385,11 @@ public static OmBucketArgs getFromProtobuf(BucketArgs bucketArgs) {
if (bucketArgs.hasQuotaInNamespace()) {
omBucketArgs.setQuotaInNamespace(bucketArgs.getQuotaInNamespace());
}
+
+ if (bucketArgs.hasBekInfo()) {
+ omBucketArgs.setBucketEncryptionKey(
+ OMPBHelper.convert(bucketArgs.getBekInfo()));
+ }
return omBucketArgs;
}
}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java
index 56103ccb3ab8..8ee9c6ee1f52 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java
@@ -123,7 +123,6 @@ public static SnapshotStatus valueOf(SnapshotStatusProto status) {
private long referencedReplicatedSize;
private long exclusiveSize;
private long exclusiveReplicatedSize;
- private boolean deepCleanedDeletedDir;
/**
* Private constructor, constructed via builder.
@@ -163,8 +162,7 @@ private SnapshotInfo(UUID snapshotId,
long referencedSize,
long referencedReplicatedSize,
long exclusiveSize,
- long exclusiveReplicatedSize,
- boolean deepCleanedDeletedDir) {
+ long exclusiveReplicatedSize) {
this.snapshotId = snapshotId;
this.name = name;
this.volumeName = volumeName;
@@ -183,7 +181,6 @@ private SnapshotInfo(UUID snapshotId,
this.referencedReplicatedSize = referencedReplicatedSize;
this.exclusiveSize = exclusiveSize;
this.exclusiveReplicatedSize = exclusiveReplicatedSize;
- this.deepCleanedDeletedDir = deepCleanedDeletedDir;
}
public void setName(String name) {
@@ -288,7 +285,7 @@ public void setSstFiltered(boolean sstFiltered) {
}
public SnapshotInfo.Builder toBuilder() {
- return new Builder()
+ return new SnapshotInfo.Builder()
.setSnapshotId(snapshotId)
.setName(name)
.setVolumeName(volumeName)
@@ -305,8 +302,7 @@ public SnapshotInfo.Builder toBuilder() {
.setReferencedSize(referencedSize)
.setReferencedReplicatedSize(referencedReplicatedSize)
.setExclusiveSize(exclusiveSize)
- .setExclusiveReplicatedSize(exclusiveReplicatedSize)
- .setDeepCleanedDeletedDir(deepCleanedDeletedDir);
+ .setExclusiveReplicatedSize(exclusiveReplicatedSize);
}
/**
@@ -331,7 +327,6 @@ public static class Builder {
private long referencedReplicatedSize;
private long exclusiveSize;
private long exclusiveReplicatedSize;
- private boolean deepCleanedDeletedDir;
public Builder() {
// default values
@@ -428,11 +423,6 @@ public Builder setExclusiveReplicatedSize(long exclusiveReplicatedSize) {
return this;
}
- public Builder setDeepCleanedDeletedDir(boolean deepCleanedDeletedDir) {
- this.deepCleanedDeletedDir = deepCleanedDeletedDir;
- return this;
- }
-
public SnapshotInfo build() {
Preconditions.checkNotNull(name);
return new SnapshotInfo(
@@ -453,8 +443,7 @@ public SnapshotInfo build() {
referencedSize,
referencedReplicatedSize,
exclusiveSize,
- exclusiveReplicatedSize,
- deepCleanedDeletedDir
+ exclusiveReplicatedSize
);
}
}
@@ -476,8 +465,7 @@ public OzoneManagerProtocolProtos.SnapshotInfo getProtobuf() {
.setReferencedSize(referencedSize)
.setReferencedReplicatedSize(referencedReplicatedSize)
.setExclusiveSize(exclusiveSize)
- .setExclusiveReplicatedSize(exclusiveReplicatedSize)
- .setDeepCleanedDeletedDir(deepCleanedDeletedDir);
+ .setExclusiveReplicatedSize(exclusiveReplicatedSize);
if (pathPreviousSnapshotId != null) {
sib.setPathPreviousSnapshotID(toProtobuf(pathPreviousSnapshotId));
@@ -550,11 +538,6 @@ public static SnapshotInfo getFromProtobuf(
snapshotInfoProto.getExclusiveReplicatedSize());
}
- if (snapshotInfoProto.hasDeepCleanedDeletedDir()) {
- osib.setDeepCleanedDeletedDir(
- snapshotInfoProto.getDeepCleanedDeletedDir());
- }
-
osib.setSnapshotPath(snapshotInfoProto.getSnapshotPath())
.setCheckpointDir(snapshotInfoProto.getCheckpointDir())
.setDbTxSequenceNumber(snapshotInfoProto.getDbTxSequenceNumber());
@@ -639,14 +622,6 @@ public long getExclusiveReplicatedSize() {
return exclusiveReplicatedSize;
}
- public boolean getDeepCleanedDeletedDir() {
- return deepCleanedDeletedDir;
- }
-
- public void setDeepCleanedDeletedDir(boolean deepCleanedDeletedDir) {
- this.deepCleanedDeletedDir = deepCleanedDeletedDir;
- }
-
/**
* Generate default name of snapshot, (used if user doesn't provide one).
*/
@@ -680,8 +655,7 @@ public static SnapshotInfo newInstance(String volumeName,
.setSnapshotPath(volumeName + OM_KEY_PREFIX + bucketName)
.setVolumeName(volumeName)
.setBucketName(bucketName)
- .setDeepClean(false)
- .setDeepCleanedDeletedDir(false);
+ .setDeepClean(true);
if (snapshotId != null) {
builder.setCheckpointDir(getCheckpointDirName(snapshotId));
@@ -714,8 +688,7 @@ public boolean equals(Object o) {
referencedSize == that.referencedSize &&
referencedReplicatedSize == that.referencedReplicatedSize &&
exclusiveSize == that.exclusiveSize &&
- exclusiveReplicatedSize == that.exclusiveReplicatedSize &&
- deepCleanedDeletedDir == that.deepCleanedDeletedDir;
+ exclusiveReplicatedSize == that.exclusiveReplicatedSize;
}
@Override
@@ -726,7 +699,7 @@ public int hashCode() {
globalPreviousSnapshotId, snapshotPath, checkpointDir,
deepClean, sstFiltered,
referencedSize, referencedReplicatedSize,
- exclusiveSize, exclusiveReplicatedSize, deepCleanedDeletedDir);
+ exclusiveSize, exclusiveReplicatedSize);
}
/**
@@ -753,7 +726,6 @@ public SnapshotInfo copyObject() {
.setReferencedReplicatedSize(referencedReplicatedSize)
.setExclusiveSize(exclusiveSize)
.setExclusiveReplicatedSize(exclusiveReplicatedSize)
- .setDeepCleanedDeletedDir(deepCleanedDeletedDir)
.build();
}
}
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java
index dd9cf34c8e29..6dc3f913d0f8 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java
@@ -66,13 +66,12 @@ private SnapshotInfo createSnapshotInfo() {
.setSnapshotPath(SNAPSHOT_PATH)
.setCheckpointDir(CHECKPOINT_DIR)
.setDbTxSequenceNumber(DB_TX_SEQUENCE_NUMBER)
- .setDeepClean(false)
+ .setDeepClean(true)
.setSstFiltered(false)
.setReferencedSize(2000L)
.setReferencedReplicatedSize(6000L)
.setExclusiveSize(1000L)
.setExclusiveReplicatedSize(3000L)
- .setDeepCleanedDeletedDir(false)
.build();
}
@@ -90,13 +89,12 @@ private OzoneManagerProtocolProtos.SnapshotInfo createSnapshotInfoProto() {
.setSnapshotPath(SNAPSHOT_PATH)
.setCheckpointDir(CHECKPOINT_DIR)
.setDbTxSequenceNumber(DB_TX_SEQUENCE_NUMBER)
- .setDeepClean(false)
+ .setDeepClean(true)
.setSstFiltered(false)
.setReferencedSize(2000L)
.setReferencedReplicatedSize(6000L)
.setExclusiveSize(1000L)
.setExclusiveReplicatedSize(3000L)
- .setDeepCleanedDeletedDir(false)
.build();
}
@@ -142,9 +140,6 @@ public void testSnapshotInfoToProto() {
assertEquals(
snapshotInfoEntryExpected.getExclusiveReplicatedSize(),
snapshotInfoEntryActual.getExclusiveReplicatedSize());
- assertEquals(
- snapshotInfoEntryExpected.getDeepCleanedDeletedDir(),
- snapshotInfoEntryActual.getDeepCleanedDeletedDir());
assertEquals(snapshotInfoEntryExpected, snapshotInfoEntryActual);
}
@@ -181,8 +176,6 @@ public void testSnapshotInfoProtoToSnapshotInfo() {
snapshotInfoActual.getExclusiveSize());
assertEquals(snapshotInfoExpected.getExclusiveReplicatedSize(),
snapshotInfoActual.getExclusiveReplicatedSize());
- assertEquals(snapshotInfoExpected.getDeepCleanedDeletedDir(),
- snapshotInfoActual.getDeepCleanedDeletedDir());
assertEquals(snapshotInfoExpected, snapshotInfoActual);
}
diff --git a/hadoop-ozone/dev-support/checks/junit.sh b/hadoop-ozone/dev-support/checks/junit.sh
index 417ae35e5e42..768a1f32a38b 100755
--- a/hadoop-ozone/dev-support/checks/junit.sh
+++ b/hadoop-ozone/dev-support/checks/junit.sh
@@ -79,6 +79,12 @@ for i in $(seq 1 ${ITERATIONS}); do
fi
if [[ ${ITERATIONS} -gt 1 ]]; then
+ if ! grep -q "Tests run: [^0]" "${REPORT_DIR}/output.log"; then
+ echo "No tests were run" >> "${REPORT_DIR}/summary.txt"
+ irc=1
+ FAIL_FAST=true
+ fi
+
REPORT_DIR="${original_report_dir}"
echo "Iteration ${i} exit code: ${irc}" | tee -a "${REPORT_FILE}"
fi
diff --git a/hadoop-ozone/dev-support/checks/native.sh b/hadoop-ozone/dev-support/checks/native.sh
index dc66f923a649..2bfa7733fcbd 100755
--- a/hadoop-ozone/dev-support/checks/native.sh
+++ b/hadoop-ozone/dev-support/checks/native.sh
@@ -19,6 +19,13 @@
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
CHECK=native
+zlib_version=$(mvn -N help:evaluate -Dexpression=zlib.version -q -DforceStdout)
+if [[ -z "${zlib_version}" ]]; then
+ echo "ERROR zlib.version not defined in pom.xml"
+ exit 1
+fi
+
source "${DIR}/junit.sh" -Pnative -Drocks_tools_native \
+ -Dzlib.url="https://github.com/madler/zlib/releases/download/v${zlib_version}/zlib-${zlib_version}.tar.gz" \
-DexcludedGroups="unhealthy" \
"$@"
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/test.sh b/hadoop-ozone/dist/src/main/compose/upgrade/test.sh
index a267080bb190..9d7ec5d4e604 100755
--- a/hadoop-ozone/dist/src/main/compose/upgrade/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/test.sh
@@ -35,8 +35,9 @@ RESULT_DIR="$ALL_RESULT_DIR" create_results_dir
# This is the version of Ozone that should use the runner image to run the
# code that was built. Other versions will pull images from docker hub.
-export OZONE_CURRENT_VERSION=1.4.0
-run_test ha non-rolling-upgrade 1.3.0 "$OZONE_CURRENT_VERSION"
+export OZONE_CURRENT_VERSION=1.5.0
+run_test ha non-rolling-upgrade 1.4.0 "$OZONE_CURRENT_VERSION"
+# run_test ha non-rolling-upgrade 1.3.0 "$OZONE_CURRENT_VERSION"
# run_test ha non-rolling-upgrade 1.2.1 "$OZONE_CURRENT_VERSION"
# run_test om-ha non-rolling-upgrade 1.1.0 "$OZONE_CURRENT_VERSION"
diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml b/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml
index 15d4c7e427da..2057cdd8a993 100644
--- a/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml
+++ b/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml
@@ -45,6 +45,13 @@ services:
volumes:
- ../..:/opt/ozone
command: ["sleep","1000000"]
+ old_client_1_4_0:
+ image: apache/ozone:1.4.0
+ env_file:
+ - docker-config
+ volumes:
+ - ../..:/opt/ozone
+ command: ["sleep","1000000"]
new_client:
image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION}
env_file:
diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh
index baa239d56a82..419d397c19ec 100755
--- a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh
@@ -21,8 +21,8 @@ COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
export COMPOSE_DIR
basename=$(basename ${COMPOSE_DIR})
-current_version=1.4.0
-old_versions="1.0.0 1.1.0 1.2.1 1.3.0" # container is needed for each version in clients.yaml
+current_version=1.5.0
+old_versions="1.0.0 1.1.0 1.2.1 1.3.0 1.4.0" # container is needed for each version in clients.yaml
# shellcheck source=hadoop-ozone/dist/src/main/compose/testlib.sh
source "${COMPOSE_DIR}/../testlib.sh"
@@ -77,7 +77,7 @@ test_cross_compatibility() {
test_ec_cross_compatibility() {
echo "Running Erasure Coded storage backward compatibility tests."
- local cluster_versions_with_ec="1.3.0"
+ local cluster_versions_with_ec="1.3.0 1.4.0"
local non_ec_client_versions="1.0.0 1.1.0 1.2.1"
for cluster_version in ${cluster_versions_with_ec}; do
diff --git a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot b/hadoop-ozone/dist/src/main/smoketest/commonlib.robot
index 7d9edcdef448..55ed9ddf5044 100644
--- a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/commonlib.robot
@@ -32,10 +32,12 @@ Get test user principal
[return] ${user}/${instance}@EXAMPLE.COM
Kinit HTTP user
+ Pass Execution If '${SECURITY_ENABLED}' == 'false' Skip in unsecure cluster
${principal} = Get test user principal HTTP
Wait Until Keyword Succeeds 2min 10sec Execute kinit -k -t /etc/security/keytabs/HTTP.keytab ${principal}
Kinit test user
+ Pass Execution If '${SECURITY_ENABLED}' == 'false' Skip in unsecure cluster
[arguments] ${user} ${keytab}
${TEST_USER} = Get test user principal ${user}
Set Suite Variable ${TEST_USER}
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot
index c0b2c9f7bfae..840fb963d8d1 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot
@@ -207,3 +207,9 @@ Verify Multipart Upload
${tmp} = Catenate @{files}
Execute cat ${tmp} > /tmp/original${random}
Compare files /tmp/original${random} /tmp/verify${random}
+
+Revoke S3 secrets
+ Execute and Ignore Error ozone s3 revokesecret -y
+ Execute and Ignore Error ozone s3 revokesecret -y -u testuser
+ Execute and Ignore Error ozone s3 revokesecret -y -u testuser2
+
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot
index b9f6993f45e2..70dcfa1abede 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot
@@ -21,30 +21,37 @@ Library String
Resource ../commonlib.robot
Resource ./commonawslib.robot
Test Timeout 5 minutes
-Suite Setup Setup s3 tests
Default Tags no-bucket-type
+Test Setup Run Keywords Kinit test user testuser testuser.keytab
+... AND Revoke S3 secrets
+Test Teardown Run Keyword Revoke S3 secrets
*** Variables ***
${ENDPOINT_URL} http://s3g:9878
+${SECURITY_ENABLED} true
*** Test Cases ***
S3 Gateway Generate Secret
- Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit HTTP user
+ Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled
${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret
- IF '${SECURITY_ENABLED}' == 'true'
- Should contain ${result} HTTP/1.1 200 OK ignore_case=True
- Should Match Regexp ${result} .*.*
- ELSE
- Should contain ${result} S3 Secret endpoint is disabled.
- END
+ Should contain ${result} HTTP/1.1 200 OK ignore_case=True
+ Should Match Regexp ${result} .*.*
+
+S3 Gateway Secret Already Exists
+ Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled
+ Execute ozone s3 getsecret ${OM_HA_PARAM}
+ ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret
+ Should contain ${result} HTTP/1.1 400 S3_SECRET_ALREADY_EXISTS ignore_case=True
S3 Gateway Generate Secret By Username
- Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab
+ Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled
+ ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser
+ Should contain ${result} HTTP/1.1 200 OK ignore_case=True
+ Should Match Regexp ${result} .*.*
+
+S3 Gateway Generate Secret By Username For Other User
+ Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled
${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser2
- IF '${SECURITY_ENABLED}' == 'true'
- Should contain ${result} HTTP/1.1 200 OK ignore_case=True
- Should Match Regexp ${result} .*.*
- ELSE
- Should contain ${result} S3 Secret endpoint is disabled.
- END
+ Should contain ${result} HTTP/1.1 200 OK ignore_case=True
+ Should Match Regexp ${result} .*.*
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot
index 27b4580f419b..0f15f23067b0 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot
@@ -21,8 +21,9 @@ Library String
Resource ../commonlib.robot
Resource ./commonawslib.robot
Test Timeout 5 minutes
-Suite Setup Setup s3 tests
Default Tags no-bucket-type
+Test Setup Run Keywords Kinit test user testuser testuser.keytab
+... AND Revoke S3 secrets
*** Variables ***
${ENDPOINT_URL} http://s3g:9878
@@ -31,19 +32,19 @@ ${SECURITY_ENABLED} true
*** Test Cases ***
S3 Gateway Revoke Secret
- Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit HTTP user
+ Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled
+ Execute ozone s3 getsecret ${OM_HA_PARAM}
${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret
- IF '${SECURITY_ENABLED}' == 'true'
- Should contain ${result} HTTP/1.1 200 OK ignore_case=True
- ELSE
- Should contain ${result} S3 Secret endpoint is disabled.
- END
+ Should contain ${result} HTTP/1.1 200 OK ignore_case=True
S3 Gateway Revoke Secret By Username
- Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab
+ Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled
+ Execute ozone s3 getsecret -u testuser ${OM_HA_PARAM}
+ ${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser
+ Should contain ${result} HTTP/1.1 200 OK ignore_case=True
+
+S3 Gateway Revoke Secret By Username For Other User
+ Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled
+ Execute ozone s3 getsecret -u testuser2 ${OM_HA_PARAM}
${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser2
- IF '${SECURITY_ENABLED}' == 'true'
- Should contain ${result} HTTP/1.1 200 OK ignore_case=True
- ELSE
- Should contain ${result} S3 Secret endpoint is disabled.
- END
\ No newline at end of file
+ Should contain ${result} HTTP/1.1 200 OK ignore_case=True
\ No newline at end of file
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml
index fa6e0ae57566..604608a07fb8 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml
@@ -34,21 +34,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
-
- junit
- junit
- test
- org.junit.jupiterjunit-jupiter-enginetest
-
- org.junit.vintage
- junit-vintage-engine
- test
- org.junit.platformjunit-platform-launcher
diff --git a/hadoop-ozone/insight/pom.xml b/hadoop-ozone/insight/pom.xml
index dcd03c04fa83..f5e044ddac2f 100644
--- a/hadoop-ozone/insight/pom.xml
+++ b/hadoop-ozone/insight/pom.xml
@@ -28,7 +28,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
Apache Ozone Insight Tooljar
- false
diff --git a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java
index b2d68545d06f..85faf99419a4 100644
--- a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java
+++ b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java
@@ -17,12 +17,14 @@
*/
package org.apache.hadoop.ozone.insight;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.util.HashMap;
import java.util.Map;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
/**
* Test common insight point utility methods.
*/
@@ -42,14 +44,14 @@ public String getDescription() {
Map filters = new HashMap<>();
filters.put("datanode", "123");
- Assertions.assertTrue(insightPoint
+ assertTrue(insightPoint
.filterLog(filters, "This a log specific to [datanode=123]"));
- Assertions.assertFalse(insightPoint
+ assertFalse(insightPoint
.filterLog(filters, "This a log specific to [datanode=234]"));
//with empty filters
- Assertions.assertTrue(insightPoint
+ assertTrue(insightPoint
.filterLog(new HashMap<>(), "This a log specific to [datanode=234]"));
//with multiple filters
@@ -57,14 +59,14 @@ public String getDescription() {
filters.put("datanode", "123");
filters.put("pipeline", "abcd");
- Assertions.assertFalse(insightPoint
+ assertFalse(insightPoint
.filterLog(filters, "This a log specific to [datanode=123]"));
- Assertions.assertTrue(insightPoint
+ assertTrue(insightPoint
.filterLog(filters,
"This a log specific to [datanode=123] [pipeline=abcd]"));
- Assertions.assertFalse(insightPoint
+ assertFalse(insightPoint
.filterLog(filters,
"This a log specific to [datanode=456] [pipeline=abcd]"));
diff --git a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java
index 9be82ebc41d6..701652bee09c 100644
--- a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java
+++ b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java
@@ -27,11 +27,12 @@
import org.apache.hadoop.hdds.conf.ConfigTag;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
+import static org.assertj.core.api.Assertions.assertThat;
+
/**
* Test insight report which prints out configs.
*/
@@ -60,12 +61,12 @@ public void testPrintConfig() throws UnsupportedEncodingException {
subCommand.printConfig(CustomConfig.class, conf);
final String output = out.toString(StandardCharsets.UTF_8.name());
- Assertions.assertTrue(output.contains(">>> ozone.scm.client.address"));
- Assertions.assertTrue(output.contains("default: localhost"));
- Assertions.assertTrue(output.contains("current: omclient"));
- Assertions.assertTrue(output.contains(">>> ozone.scm.client.secure"));
- Assertions.assertTrue(output.contains("default: true"));
- Assertions.assertTrue(output.contains("current: true"));
+ assertThat(output).contains(">>> ozone.scm.client.address");
+ assertThat(output).contains("default: localhost");
+ assertThat(output).contains("current: omclient");
+ assertThat(output).contains(">>> ozone.scm.client.secure");
+ assertThat(output).contains("default: true");
+ assertThat(output).contains("current: true");
}
/**
diff --git a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestLogSubcommand.java b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestLogSubcommand.java
index 01402085861d..f895a91c5376 100644
--- a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestLogSubcommand.java
+++ b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestLogSubcommand.java
@@ -17,9 +17,10 @@
*/
package org.apache.hadoop.ozone.insight;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
/**
* Testing utility methods of the log subcommand test.
*/
@@ -36,6 +37,6 @@ public void filterLog() {
+ "storageLocation: \"/tmp/hadoop-neo/dfs/data\"\\n capacity: "
+ "250438021120\\n scmUsed: 16384\\n remaining: 212041244672\\n "
+ "storageType: DISK\\n failed: false\\n}\\n");
- Assertions.assertEquals(10, result.split("\n").length);
+ assertEquals(10, result.split("\n").length);
}
}
diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml
index 3eef8fa58c05..913cd639bf7c 100644
--- a/hadoop-ozone/integration-test/pom.xml
+++ b/hadoop-ozone/integration-test/pom.xml
@@ -119,21 +119,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
test-jartest
-
- junit
- junit
- test
- org.junit.jupiterjunit-jupiter-enginetest
-
- org.junit.vintage
- junit-vintage-engine
- test
- org.junit.platformjunit-platform-launcher
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
index 49c693268e70..618025dc06f7 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.fs.contract;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataInputStream;
@@ -31,7 +32,6 @@
import java.io.EOFException;
import java.io.IOException;
-import java.util.Random;
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
@@ -341,15 +341,14 @@ public void testRandomSeeks() throws Throwable {
byte[] buf = dataset(filesize, 0, 255);
Path randomSeekFile = path("testrandomseeks.bin");
createFile(getFileSystem(), randomSeekFile, true, buf);
- Random r = new Random();
// Record the sequence of seeks and reads which trigger a failure.
int[] seeks = new int[10];
int[] reads = new int[10];
try (FSDataInputStream stm = getFileSystem().open(randomSeekFile)) {
for (int i = 0; i < limit; i++) {
- int seekOff = r.nextInt(buf.length);
- int toRead = r.nextInt(Math.min(buf.length - seekOff, 32000));
+ int seekOff = RandomUtils.nextInt(0, buf.length);
+ int toRead = RandomUtils.nextInt(0, Math.min(buf.length - seekOff, 32000));
seeks[i % seeks.length] = seekOff;
reads[i % reads.length] = toRead;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java
index 61b0281c659a..1675807d230f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java
@@ -96,7 +96,6 @@
import java.util.LinkedList;
import java.util.List;
import java.util.Optional;
-import java.util.Random;
import java.util.Set;
import java.util.TreeSet;
import java.util.UUID;
@@ -2000,7 +1999,7 @@ private void checkInvalidPath(Path path) {
@Test
void testRenameFile() throws Exception {
- final String dir = "/dir" + new Random().nextInt(1000);
+ final String dir = "/dir" + RandomUtils.nextInt(0, 1000);
Path dirPath = new Path(getBucketPath() + dir);
Path file1Source = new Path(getBucketPath() + dir
+ "/file1_Copy");
@@ -2026,7 +2025,7 @@ void testRenameFile() throws Exception {
*/
@Test
void testRenameFileToDir() throws Exception {
- final String dir = "/dir" + new Random().nextInt(1000);
+ final String dir = "/dir" + RandomUtils.nextInt(0, 1000);
Path dirPath = new Path(getBucketPath() + dir);
getFs().mkdirs(dirPath);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
index 725b17ee9d64..439b563d6330 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdds.scm.pipeline;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
@@ -34,7 +35,6 @@
import java.util.concurrent.TimeUnit;
import java.util.HashMap;
import java.util.Map;
-import java.util.Random;
import java.util.UUID;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE;
@@ -172,7 +172,6 @@ public void testMinLeaderCountChoosePolicy() throws Exception {
// each datanode has leaderNumOfEachDn leaders after balance
checkLeaderBalance(dnNum, leaderNumOfEachDn);
- Random r = new Random(0);
for (int i = 0; i < 10; i++) {
// destroy some pipelines, wait new pipelines created,
// then check leader balance
@@ -181,7 +180,7 @@ public void testMinLeaderCountChoosePolicy() throws Exception {
.getPipelines(RatisReplicationConfig.getInstance(
ReplicationFactor.THREE), Pipeline.PipelineState.OPEN);
- int destroyNum = r.nextInt(pipelines.size());
+ int destroyNum = RandomUtils.nextInt(0, pipelines.size());
for (int k = 0; k <= destroyNum; k++) {
pipelineManager.closePipeline(pipelines.get(k), false);
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java
index 0dae8a8b0dc6..c2e671b896e6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.ozone;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.conf.DefaultConfigManager;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.IOUtils;
@@ -41,7 +42,6 @@
import javax.ws.rs.core.UriInfo;
import java.io.IOException;
import java.io.ByteArrayInputStream;
-import java.security.SecureRandom;
import java.util.ArrayList;
import java.util.UUID;
import java.util.List;
@@ -217,8 +217,7 @@ public void testMultipart() throws Exception {
private static String generateRandomContent(int sizeInMB) {
int bytesToGenerate = sizeInMB * 1024 * 1024;
- byte[] randomBytes = new byte[bytesToGenerate];
- new SecureRandom().nextBytes(randomBytes);
+ byte[] randomBytes = RandomUtils.nextBytes(bytesToGenerate);
return Base64.getEncoder().encodeToString(randomBytes);
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
index 4867be49066f..2de5e83a4e9f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
@@ -58,7 +58,6 @@
import org.apache.hadoop.ozone.om.helpers.QuotaUtil;
import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
-import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeEach;
@@ -568,8 +567,7 @@ public void testAbortUploadSuccessWithParts() throws Exception {
bucket.abortMultipartUpload(keyName, uploadID);
String multipartOpenKey =
- getMultipartOpenKey(uploadID, volumeName, bucketName, keyName,
- metadataMgr);
+ metadataMgr.getMultipartKeyFSO(volumeName, bucketName, keyName, uploadID);
OmKeyInfo omKeyInfo =
metadataMgr.getOpenKeyTable(bucketLayout).get(multipartOpenKey);
OmMultipartKeyInfo omMultipartKeyInfo =
@@ -853,8 +851,7 @@ private String verifyUploadedPart(String uploadID, String partName,
ozoneManager.getMetadataManager().getBucketTable().get(buckKey);
BucketLayout bucketLayout = buckInfo.getBucketLayout();
String multipartOpenKey =
- getMultipartOpenKey(uploadID, volumeName, bucketName, keyName,
- metadataMgr);
+ metadataMgr.getMultipartKeyFSO(volumeName, bucketName, keyName, uploadID);
String multipartKey = metadataMgr.getMultipartKey(volumeName, bucketName,
keyName, uploadID);
@@ -881,32 +878,6 @@ private String verifyUploadedPart(String uploadID, String partName,
return multipartKey;
}
- private String getMultipartOpenKey(String multipartUploadID,
- String volName, String buckName, String kName,
- OMMetadataManager omMetadataManager) throws IOException {
-
- String fileName = OzoneFSUtils.getFileName(kName);
- final long volumeId = omMetadataManager.getVolumeId(volName);
- final long bucketId = omMetadataManager.getBucketId(volName,
- buckName);
- long parentID = getParentID(volName, buckName, kName,
- omMetadataManager);
-
- String multipartKey = omMetadataManager.getMultipartKey(volumeId, bucketId,
- parentID, fileName, multipartUploadID);
-
- return multipartKey;
- }
-
- private long getParentID(String volName, String buckName,
- String kName, OMMetadataManager omMetadataManager) throws IOException {
- final long volumeId = omMetadataManager.getVolumeId(volName);
- final long bucketId = omMetadataManager.getBucketId(volName,
- buckName);
- return OMFileRequest.getParentID(volumeId, bucketId,
- kName, omMetadataManager);
- }
-
private String initiateMultipartUpload(OzoneBucket oBucket, String kName,
ReplicationType replicationType, ReplicationFactor replicationFactor)
throws IOException {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
index d03c57bf4e4f..b053a4394bf9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
@@ -22,7 +22,7 @@
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
-import java.util.Random;
+import org.apache.commons.lang3.RandomUtils;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
@@ -280,7 +280,7 @@ public void testWatchForCommitForRetryfailure() throws Exception {
// as well as there is no logIndex generate in Ratis.
// The basic idea here is just to test if its throws an exception.
xceiverClient
- .watchForCommit(index + new Random().nextInt(100) + 10);
+ .watchForCommit(index + RandomUtils.nextInt(0, 100) + 10);
fail("expected exception not thrown");
} catch (Exception e) {
assertInstanceOf(ExecutionException.class, e);
@@ -374,7 +374,7 @@ public void testWatchForCommitForGroupMismatchException() throws Exception {
// The basic idea here is just to test if its throws an exception.
xceiverClient
.watchForCommit(reply.getLogIndex() +
- new Random().nextInt(100) + 10);
+ RandomUtils.nextInt(0, 100) + 10);
fail("Expected exception not thrown");
} catch (Exception e) {
assertInstanceOf(GroupMismatchException.class, HddsClientUtils.checkForException(e));
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
index b50f2ac8d68a..5a7e404f507b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
@@ -52,7 +52,6 @@
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
-import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -72,6 +71,8 @@
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_RECOVERING_CONTAINER_TIMEOUT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_RECOVERING_CONTAINER_TIMEOUT_DEFAULT;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.doAnswer;
/**
* Tests the EC recovery and over replication processing.
@@ -308,7 +309,7 @@ public void testECContainerRecoveryWithTimedOutRecovery() throws Exception {
.mockFieldReflection(handler,
"coordinator");
- Mockito.doAnswer(invocation -> {
+ doAnswer(invocation -> {
GenericTestUtils.waitFor(() ->
dn.getDatanodeStateMachine()
.getContainer()
@@ -320,8 +321,8 @@ public void testECContainerRecoveryWithTimedOutRecovery() throws Exception {
reconstructedDN.set(dn);
invocation.callRealMethod();
return null;
- }).when(coordinator).reconstructECBlockGroup(Mockito.any(), Mockito.any(),
- Mockito.any(), Mockito.any());
+ }).when(coordinator).reconstructECBlockGroup(any(), any(),
+ any(), any());
}
// Shutting down DN triggers close pipeline and close container.
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index 5dec1799b406..e5c0d5d2532e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -72,7 +72,6 @@
import org.apache.ozone.test.GenericTestUtils.LogCapturer;
import org.apache.ozone.test.tag.Flaky;
import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
@@ -95,14 +94,19 @@
import static java.lang.Math.max;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds
- .HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EXPIRED_CONTAINER_REPLICA_OP_SCRUB_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-import static org.apache.hadoop.ozone
- .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
/**
* Tests for Block deletion.
@@ -246,23 +250,23 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception {
}
}, 1000, 10000);
// No containers with deleted blocks
- Assertions.assertTrue(containerIdsWithDeletedBlocks.isEmpty());
+ assertTrue(containerIdsWithDeletedBlocks.isEmpty());
// Delete transactionIds for the containers should be 0.
// NOTE: this test assumes that all the container is KetValueContainer. If
// other container types is going to be added, this test should be checked.
matchContainerTransactionIds();
- Assertions.assertEquals(0L,
+ assertEquals(0L,
metrics.getNumBlockDeletionTransactionCreated());
writeClient.deleteKey(keyArgs);
Thread.sleep(5000);
// The blocks should not be deleted in the DN as the container is open
- Throwable e = Assertions.assertThrows(AssertionError.class,
+ Throwable e = assertThrows(AssertionError.class,
() -> verifyBlocksDeleted(omKeyLocationInfoGroupList));
- Assertions.assertTrue(
+ assertTrue(
e.getMessage().startsWith("expected: but was:"));
- Assertions.assertEquals(0L, metrics.getNumBlockDeletionTransactionSent());
+ assertEquals(0L, metrics.getNumBlockDeletionTransactionSent());
// close the containers which hold the blocks for the key
OzoneTestUtils.closeAllContainers(scm.getEventQueue(), scm);
@@ -291,7 +295,7 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception {
}, 2000, 30000);
// Few containers with deleted blocks
- Assertions.assertFalse(containerIdsWithDeletedBlocks.isEmpty());
+ assertFalse(containerIdsWithDeletedBlocks.isEmpty());
// Containers in the DN and SCM should have same delete transactionIds
matchContainerTransactionIds();
@@ -312,12 +316,12 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception {
cluster.restartHddsDatanode(0, true);
matchContainerTransactionIds();
- Assertions.assertEquals(metrics.getNumBlockDeletionTransactionCreated(),
+ assertEquals(metrics.getNumBlockDeletionTransactionCreated(),
metrics.getNumBlockDeletionTransactionCompleted());
- Assertions.assertTrue(metrics.getNumBlockDeletionCommandSent() >=
+ assertTrue(metrics.getNumBlockDeletionCommandSent() >=
metrics.getNumBlockDeletionCommandSuccess() +
metrics.getBNumBlockDeletionCommandFailure());
- Assertions.assertTrue(metrics.getNumBlockDeletionTransactionSent() >=
+ assertTrue(metrics.getNumBlockDeletionTransactionSent() >=
metrics.getNumBlockDeletionTransactionFailure() +
metrics.getNumBlockDeletionTransactionSuccess());
LOG.info(metrics.toString());
@@ -326,7 +330,7 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception {
for (int i = 5; i >= 0; i--) {
if (logCapturer.getOutput().contains("1(" + i + ")")) {
for (int j = 0; j <= i; j++) {
- Assertions.assertTrue(logCapturer.getOutput()
+ assertTrue(logCapturer.getOutput()
.contains("1(" + i + ")"));
}
break;
@@ -367,8 +371,8 @@ public void testContainerStatisticsAfterDelete() throws Exception {
final int valueSize = value.getBytes(UTF_8).length;
final int keyCount = 1;
containerInfos.stream().forEach(container -> {
- Assertions.assertEquals(valueSize, container.getUsedBytes());
- Assertions.assertEquals(keyCount, container.getNumberOfKeys());
+ assertEquals(valueSize, container.getUsedBytes());
+ assertEquals(keyCount, container.getNumberOfKeys());
});
OzoneTestUtils.closeAllContainers(scm.getEventQueue(), scm);
@@ -389,7 +393,7 @@ public void testContainerStatisticsAfterDelete() throws Exception {
containerMap.values().forEach(container -> {
KeyValueContainerData containerData =
(KeyValueContainerData)container.getContainerData();
- Assertions.assertEquals(0, containerData.getNumPendingDeletionBlocks());
+ assertEquals(0, containerData.getNumPendingDeletionBlocks());
});
});
@@ -398,7 +402,7 @@ public void testContainerStatisticsAfterDelete() throws Exception {
((EventQueue)scm.getEventQueue()).processAll(1000);
containerInfos = scm.getContainerManager().getContainers();
containerInfos.stream().forEach(container ->
- Assertions.assertEquals(HddsProtos.LifeCycleState.DELETING,
+ assertEquals(HddsProtos.LifeCycleState.DELETING,
container.getState()));
LogCapturer logCapturer = LogCapturer.captureLogs(
legacyEnabled ? LegacyReplicationManager.LOG : ReplicationManager.LOG);
@@ -422,14 +426,14 @@ public void testContainerStatisticsAfterDelete() throws Exception {
List infos = scm.getContainerManager().getContainers();
try {
infos.stream().forEach(container -> {
- Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED,
+ assertEquals(HddsProtos.LifeCycleState.DELETED,
container.getState());
try {
- Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED,
+ assertEquals(HddsProtos.LifeCycleState.DELETED,
scm.getScmMetadataStore().getContainerTable()
.get(container.containerID()).getState());
} catch (IOException e) {
- Assertions.fail(
+ fail(
"Container from SCM DB should be marked as DELETED");
}
});
@@ -477,8 +481,8 @@ public void testContainerStateAfterDNRestart() throws Exception {
final int keyCount = 1;
List containerIdList = new ArrayList<>();
containerInfos.stream().forEach(container -> {
- Assertions.assertEquals(valueSize, container.getUsedBytes());
- Assertions.assertEquals(keyCount, container.getNumberOfKeys());
+ assertEquals(valueSize, container.getUsedBytes());
+ assertEquals(keyCount, container.getNumberOfKeys());
containerIdList.add(container.getContainerID());
});
@@ -499,14 +503,14 @@ public void testContainerStateAfterDNRestart() throws Exception {
ContainerID containerId = ContainerID.valueOf(
containerInfos.get(0).getContainerID());
// Before restart container state is non-empty
- Assertions.assertFalse(getContainerFromDN(
+ assertFalse(getContainerFromDN(
cluster.getHddsDatanodes().get(0), containerId.getId())
.getContainerData().isEmpty());
// Restart DataNode
cluster.restartHddsDatanode(0, true);
// After restart also container state remains non-empty.
- Assertions.assertFalse(getContainerFromDN(
+ assertFalse(getContainerFromDN(
cluster.getHddsDatanodes().get(0), containerId.getId())
.getContainerData().isEmpty());
@@ -526,14 +530,14 @@ public void testContainerStateAfterDNRestart() throws Exception {
100, 10 * 1000);
// Container state should be empty now as key got deleted
- Assertions.assertTrue(getContainerFromDN(
+ assertTrue(getContainerFromDN(
cluster.getHddsDatanodes().get(0), containerId.getId())
.getContainerData().isEmpty());
// Restart DataNode
cluster.restartHddsDatanode(0, true);
// Container state should be empty even after restart
- Assertions.assertTrue(getContainerFromDN(
+ assertTrue(getContainerFromDN(
cluster.getHddsDatanodes().get(0), containerId.getId())
.getContainerData().isEmpty());
@@ -543,14 +547,14 @@ public void testContainerStateAfterDNRestart() throws Exception {
List infos = scm.getContainerManager().getContainers();
try {
infos.stream().forEach(container -> {
- Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED,
+ assertEquals(HddsProtos.LifeCycleState.DELETED,
container.getState());
try {
- Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED,
+ assertEquals(HddsProtos.LifeCycleState.DELETED,
scm.getScmMetadataStore().getContainerTable()
.get(container.containerID()).getState());
} catch (IOException e) {
- Assertions.fail(
+ fail(
"Container from SCM DB should be marked as DELETED");
}
});
@@ -607,8 +611,8 @@ public void testContainerDeleteWithInvalidKeyCount()
final int keyCount = 1;
List containerIdList = new ArrayList<>();
containerInfos.stream().forEach(container -> {
- Assertions.assertEquals(valueSize, container.getUsedBytes());
- Assertions.assertEquals(keyCount, container.getNumberOfKeys());
+ assertEquals(valueSize, container.getUsedBytes());
+ assertEquals(keyCount, container.getNumberOfKeys());
containerIdList.add(container.getContainerID());
});
@@ -635,7 +639,7 @@ public void testContainerDeleteWithInvalidKeyCount()
= scm.getContainerManager().getContainerReplicas(containerId);
// Ensure for all replica isEmpty are false in SCM
- Assertions.assertTrue(scm.getContainerManager().getContainerReplicas(
+ assertTrue(scm.getContainerManager().getContainerReplicas(
containerId).stream().
allMatch(replica -> !replica.isEmpty()));
@@ -680,14 +684,14 @@ public void testContainerDeleteWithInvalidKeyCount()
List infos = scm.getContainerManager().getContainers();
try {
infos.stream().forEach(container -> {
- Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED,
+ assertEquals(HddsProtos.LifeCycleState.DELETED,
container.getState());
try {
- Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED,
+ assertEquals(HddsProtos.LifeCycleState.DELETED,
scm.getScmMetadataStore().getContainerTable()
.get(container.containerID()).getState());
} catch (IOException e) {
- Assertions.fail(
+ fail(
"Container from SCM DB should be marked as DELETED");
}
});
@@ -702,7 +706,7 @@ public void testContainerDeleteWithInvalidKeyCount()
private void verifyTransactionsCommitted() throws IOException {
scm.getScmBlockManager().getDeletedBlockLog();
for (long txnID = 1; txnID <= maxTransactionId; txnID++) {
- Assertions.assertNull(
+ assertNull(
scm.getScmMetadataStore().getDeletedBlocksTXTable().get(txnID));
}
}
@@ -716,15 +720,15 @@ private void matchContainerTransactionIds() throws IOException {
for (ContainerData containerData : containerDataList) {
long containerId = containerData.getContainerID();
if (containerIdsWithDeletedBlocks.contains(containerId)) {
- Assertions.assertTrue(
+ assertTrue(
scm.getContainerInfo(containerId).getDeleteTransactionId() > 0);
maxTransactionId = max(maxTransactionId,
scm.getContainerInfo(containerId).getDeleteTransactionId());
} else {
- Assertions.assertEquals(
+ assertEquals(
scm.getContainerInfo(containerId).getDeleteTransactionId(), 0);
}
- Assertions.assertEquals(
+ assertEquals(
((KeyValueContainerData) dnContainerSet.getContainer(containerId)
.getContainerData()).getDeleteTransactionId(),
scm.getContainerInfo(containerId).getDeleteTransactionId());
@@ -741,7 +745,7 @@ private void verifyBlocksCreated(
KeyValueContainerData cData = (KeyValueContainerData) dnContainerSet
.getContainer(blockID.getContainerID()).getContainerData();
try (DBHandle db = BlockUtils.getDB(cData, conf)) {
- Assertions.assertNotNull(db.getStore().getBlockDataTable()
+ assertNotNull(db.getStore().getBlockDataTable()
.get(cData.getBlockKey(blockID.getLocalID())));
}
}, omKeyLocationInfoGroups);
@@ -763,11 +767,11 @@ private void verifyBlocksDeleted(
String blockKey = cData.getBlockKey(blockID.getLocalID());
BlockData blockData = blockDataTable.get(blockKey);
- Assertions.assertNull(blockData);
+ assertNull(blockData);
String deletingKey = cData.getDeletingBlockKey(
blockID.getLocalID());
- Assertions.assertNull(blockDataTable.get(deletingKey));
+ assertNull(blockDataTable.get(deletingKey));
}
containerIdsWithDeletedBlocks.add(blockID.getContainerID());
}, omKeyLocationInfoGroups);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
index ec47c76d94d2..7cb3c7797fa0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
@@ -45,9 +45,10 @@
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -117,7 +118,7 @@ public void test() throws Exception {
Pipeline pipeline = cluster.getStorageContainerManager()
.getPipelineManager().getPipeline(container.getPipelineID());
- Assertions.assertFalse(isContainerClosed(cluster, containerId.getId()));
+ assertFalse(isContainerClosed(cluster, containerId.getId()));
DatanodeDetails datanodeDetails =
cluster.getHddsDatanodes().get(0).getDatanodeDetails();
@@ -135,7 +136,7 @@ public void test() throws Exception {
5 * 1000);
//double check if it's really closed (waitFor also throws an exception)
- Assertions.assertTrue(isContainerClosed(cluster, containerId.getId()));
+ assertTrue(isContainerClosed(cluster, containerId.getId()));
}
private static Boolean isContainerClosed(MiniOzoneCluster cluster,
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
index 332683658b14..b74e6f3c9116 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
@@ -57,7 +57,6 @@
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -77,6 +76,9 @@
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests DeleteContainerCommand Handler.
@@ -165,8 +167,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue()
HddsDatanodeService hddsDatanodeService =
cluster.getHddsDatanodes().get(0);
- Assertions.assertFalse(isContainerClosed(hddsDatanodeService,
- containerId.getId()));
+ assertFalse(isContainerClosed(hddsDatanodeService, containerId.getId()));
DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails();
@@ -189,8 +190,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue()
500, 5 * 1000);
//double check if it's really closed (waitFor also throws an exception)
- Assertions.assertTrue(isContainerClosed(hddsDatanodeService,
- containerId.getId()));
+ assertTrue(isContainerClosed(hddsDatanodeService, containerId.getId()));
// Delete key, which will make isEmpty flag to true in containerData
objectStore.getVolume(volumeName)
@@ -217,8 +217,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue()
lingeringBlock.createNewFile();
// Check container exists before sending delete container command
- Assertions.assertFalse(isContainerDeleted(hddsDatanodeService,
- containerId.getId()));
+ assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId()));
// Set container blockCount to 0 to mock that it is empty as per RocksDB
getContainerfromDN(hddsDatanodeService, containerId.getId())
@@ -243,10 +242,8 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue()
contains("Files still part of the container on delete"),
500,
5 * 2000);
- Assertions.assertTrue(!isContainerDeleted(hddsDatanodeService,
- containerId.getId()));
- Assertions.assertTrue(beforeDeleteFailedCount <
- metrics.getContainerDeleteFailedNonEmpty());
+ assertTrue(!isContainerDeleted(hddsDatanodeService, containerId.getId()));
+ assertTrue(beforeDeleteFailedCount < metrics.getContainerDeleteFailedNonEmpty());
// Send the delete command. It should pass with force flag.
// Deleting a non-empty container should pass on the DN when the force flag
// is true
@@ -260,10 +257,8 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue()
GenericTestUtils.waitFor(() ->
isContainerDeleted(hddsDatanodeService, containerId.getId()),
500, 5 * 1000);
- Assertions.assertTrue(isContainerDeleted(hddsDatanodeService,
- containerId.getId()));
- Assertions.assertTrue(beforeForceCount <
- metrics.getContainerForceDelete());
+ assertTrue(isContainerDeleted(hddsDatanodeService, containerId.getId()));
+ assertTrue(beforeForceCount < metrics.getContainerForceDelete());
kv.setCheckChunksFilePath(false);
}
@@ -297,8 +292,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckFalse()
HddsDatanodeService hddsDatanodeService =
cluster.getHddsDatanodes().get(0);
- Assertions.assertFalse(isContainerClosed(hddsDatanodeService,
- containerId.getId()));
+ assertFalse(isContainerClosed(hddsDatanodeService, containerId.getId()));
DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails();
@@ -313,8 +307,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckFalse()
500, 5 * 1000);
//double check if it's really closed (waitFor also throws an exception)
- Assertions.assertTrue(isContainerClosed(hddsDatanodeService,
- containerId.getId()));
+ assertTrue(isContainerClosed(hddsDatanodeService, containerId.getId()));
// Delete key, which will make isEmpty flag to true in containerData
objectStore.getVolume(volumeName)
@@ -341,8 +334,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckFalse()
lingeringBlock.createNewFile();
// Check container exists before sending delete container command
- Assertions.assertFalse(isContainerDeleted(hddsDatanodeService,
- containerId.getId()));
+ assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId()));
// send delete container to the datanode
SCMCommand> command = new DeleteContainerCommand(containerId.getId(),
@@ -357,8 +349,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckFalse()
GenericTestUtils.waitFor(() ->
isContainerDeleted(hddsDatanodeService, containerId.getId()),
500, 5 * 1000);
- Assertions.assertTrue(isContainerDeleted(hddsDatanodeService,
- containerId.getId()));
+ assertTrue(isContainerDeleted(hddsDatanodeService, containerId.getId()));
}
@Test
@@ -384,8 +375,7 @@ public void testDeleteNonEmptyContainerBlockTable()
HddsDatanodeService hddsDatanodeService =
cluster.getHddsDatanodes().get(0);
- Assertions.assertFalse(isContainerClosed(hddsDatanodeService,
- containerId.getId()));
+ assertFalse(isContainerClosed(hddsDatanodeService, containerId.getId()));
DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails();
@@ -418,11 +408,11 @@ public void testDeleteNonEmptyContainerBlockTable()
500, 5 * 1000);
//double check if it's really closed (waitFor also throws an exception)
- Assertions.assertTrue(isContainerClosed(hddsDatanodeService,
+ assertTrue(isContainerClosed(hddsDatanodeService,
containerId.getId()));
// Check container exists before sending delete container command
- Assertions.assertFalse(isContainerDeleted(hddsDatanodeService,
+ assertFalse(isContainerDeleted(hddsDatanodeService,
containerId.getId()));
long containerDeleteFailedNonEmptyBlockDB =
@@ -446,9 +436,9 @@ public void testDeleteNonEmptyContainerBlockTable()
contains("the container is not empty with blockCount"),
500,
5 * 2000);
- Assertions.assertTrue(!isContainerDeleted(hddsDatanodeService,
+ assertTrue(!isContainerDeleted(hddsDatanodeService,
containerId.getId()));
- Assertions.assertTrue(containerDeleteFailedNonEmptyBlockDB <
+ assertTrue(containerDeleteFailedNonEmptyBlockDB <
metrics.getContainerDeleteFailedNonEmpty());
// Now empty the container Dir and try with a non-empty block table
@@ -470,8 +460,7 @@ public void testDeleteNonEmptyContainerBlockTable()
cluster.getStorageContainerManager().getScmContext().getTermOfLeader());
nodeManager.addDatanodeCommand(datanodeDetails.getUuid(), command);
Thread.sleep(5000);
- Assertions.assertTrue(!isContainerDeleted(hddsDatanodeService,
- containerId.getId()));
+ assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId()));
// Send the delete command. It should pass with force flag.
long beforeForceCount = metrics.getContainerForceDelete();
command = new DeleteContainerCommand(containerId.getId(), true);
@@ -483,9 +472,9 @@ public void testDeleteNonEmptyContainerBlockTable()
GenericTestUtils.waitFor(() ->
isContainerDeleted(hddsDatanodeService, containerId.getId()),
500, 5 * 1000);
- Assertions.assertTrue(isContainerDeleted(hddsDatanodeService,
+ assertTrue(isContainerDeleted(hddsDatanodeService,
containerId.getId()));
- Assertions.assertTrue(beforeForceCount <
+ assertTrue(beforeForceCount <
metrics.getContainerForceDelete());
}
@@ -507,8 +496,7 @@ public void testContainerDeleteWithInvalidBlockCount()
HddsDatanodeService hddsDatanodeService =
cluster.getHddsDatanodes().get(0);
- Assertions.assertFalse(isContainerClosed(hddsDatanodeService,
- containerId.getId()));
+ assertFalse(isContainerClosed(hddsDatanodeService, containerId.getId()));
DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails();
NodeManager nodeManager =
@@ -525,12 +513,10 @@ public void testContainerDeleteWithInvalidBlockCount()
500, 5 * 1000);
//double check if it's really closed (waitFor also throws an exception)
- Assertions.assertTrue(isContainerClosed(hddsDatanodeService,
- containerId.getId()));
+ assertTrue(isContainerClosed(hddsDatanodeService, containerId.getId()));
// Check container exists before sending delete container command
- Assertions.assertFalse(isContainerDeleted(hddsDatanodeService,
- containerId.getId()));
+ assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId()));
// Clear block table
clearBlocksTable(getContainerfromDN(hddsDatanodeService,
@@ -561,8 +547,7 @@ public void testContainerDeleteWithInvalidBlockCount()
GenericTestUtils.waitFor(() ->
isContainerDeleted(hddsDatanodeService, containerId.getId()),
500, 5 * 1000);
- Assertions.assertTrue(isContainerDeleted(hddsDatanodeService,
- containerId.getId()));
+ assertTrue(isContainerDeleted(hddsDatanodeService, containerId.getId()));
}
@@ -612,8 +597,7 @@ public void testDeleteContainerRequestHandlerOnClosedContainer()
HddsDatanodeService hddsDatanodeService =
cluster.getHddsDatanodes().get(0);
- Assertions.assertFalse(isContainerClosed(hddsDatanodeService,
- containerId.getId()));
+ assertFalse(isContainerClosed(hddsDatanodeService, containerId.getId()));
DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails();
@@ -630,12 +614,10 @@ public void testDeleteContainerRequestHandlerOnClosedContainer()
500, 5 * 1000);
//double check if it's really closed (waitFor also throws an exception)
- Assertions.assertTrue(isContainerClosed(hddsDatanodeService,
- containerId.getId()));
+ assertTrue(isContainerClosed(hddsDatanodeService, containerId.getId()));
// Check container exists before sending delete container command
- Assertions.assertFalse(isContainerDeleted(hddsDatanodeService,
- containerId.getId()));
+ assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId()));
// send delete container to the datanode
SCMCommand> command = new DeleteContainerCommand(containerId.getId(),
@@ -656,8 +638,7 @@ public void testDeleteContainerRequestHandlerOnClosedContainer()
ContainerMetrics metrics =
hddsDatanodeService
.getDatanodeStateMachine().getContainer().getMetrics();
- Assertions.assertEquals(1,
- metrics.getContainerDeleteFailedNonEmpty());
+ assertEquals(1, metrics.getContainerDeleteFailedNonEmpty());
// Delete key, which will make isEmpty flag to true in containerData
objectStore.getVolume(volumeName)
@@ -678,7 +659,7 @@ public void testDeleteContainerRequestHandlerOnClosedContainer()
isContainerDeleted(hddsDatanodeService, containerId.getId()),
500, 5 * 1000);
- Assertions.assertTrue(isContainerDeleted(hddsDatanodeService,
+ assertTrue(isContainerDeleted(hddsDatanodeService,
containerId.getId()));
}
@@ -723,7 +704,7 @@ public void testDeleteContainerRequestHandlerOnOpenContainer()
}
}
- Assertions.assertFalse(isContainerDeleted(hddsDatanodeService,
+ assertFalse(isContainerDeleted(hddsDatanodeService,
containerId.getId()));
@@ -738,7 +719,7 @@ public void testDeleteContainerRequestHandlerOnOpenContainer()
isContainerDeleted(hddsDatanodeService, containerId.getId()),
500, 5 * 1000);
- Assertions.assertTrue(isContainerDeleted(hddsDatanodeService,
+ assertTrue(isContainerDeleted(hddsDatanodeService,
containerId.getId()));
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java
index c47f09930993..23382b2abe6c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java
@@ -30,7 +30,6 @@
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -43,6 +42,7 @@
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Test the behaviour of the datanode and scm when communicating
@@ -101,7 +101,7 @@ public void test() throws Exception {
//a new key is created, but the datanode default REFRESH_PERIOD is 1 hour,
//still the cache is updated, so the scm will eventually get the new
//used space from the datanode through node report.
- Assertions.assertTrue(cluster.getStorageContainerManager()
+ assertTrue(cluster.getStorageContainerManager()
.getScmNodeManager().getUsageInfo(datanodeDetails)
.getScmNodeStat().getScmUsed().isEqual(currentScmUsed));
@@ -116,7 +116,7 @@ public void test() throws Exception {
//after waiting for several node report , this usage info
//in SCM should be updated as we have updated the DN's cached usage info.
- Assertions.assertTrue(cluster.getStorageContainerManager()
+ assertTrue(cluster.getStorageContainerManager()
.getScmNodeManager().getUsageInfo(datanodeDetails)
.getScmNodeStat().getScmUsed().isGreater(currentScmUsed));
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
index cd6dfb171c05..76a0f1ed2142 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
@@ -66,7 +66,6 @@
import org.apache.ratis.util.function.CheckedBiFunction;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.Assertions;
/**
* This class tests the metrics of ContainerStateMachine.
@@ -142,7 +141,7 @@ static void runContainerStateMachineMetrics(
pipeline, blockID, 1024);
ContainerCommandResponseProto response =
client.sendCommand(writeChunkRequest);
- Assertions.assertEquals(ContainerProtos.Result.SUCCESS,
+ assertEquals(ContainerProtos.Result.SUCCESS,
response.getResult());
metric = getMetrics(CSMMetrics.SOURCE_NAME +
@@ -160,7 +159,7 @@ static void runContainerStateMachineMetrics(
ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest
.getWriteChunk());
response = client.sendCommand(readChunkRequest);
- Assertions.assertEquals(ContainerProtos.Result.SUCCESS,
+ assertEquals(ContainerProtos.Result.SUCCESS,
response.getResult());
metric = getMetrics(CSMMetrics.SOURCE_NAME +
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
index 0b83c650fe0a..d4900bb48783 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
@@ -55,7 +55,8 @@
import static org.apache.ozone.test.MetricsAsserts.assertCounter;
import static org.apache.ozone.test.MetricsAsserts.assertQuantileGauges;
import static org.apache.ozone.test.MetricsAsserts.getMetrics;
-import org.junit.jupiter.api.Assertions;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -121,7 +122,7 @@ public void testContainerMetrics() throws Exception {
pipeline, blockID, 1024);
ContainerCommandResponseProto response =
client.sendCommand(writeChunkRequest);
- Assertions.assertEquals(ContainerProtos.Result.SUCCESS,
+ assertEquals(ContainerProtos.Result.SUCCESS,
response.getResult());
//Read Chunk
@@ -129,7 +130,7 @@ public void testContainerMetrics() throws Exception {
ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest
.getWriteChunk());
response = client.sendCommand(readChunkRequest);
- Assertions.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
+ assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
MetricsRecordBuilder containerMetrics = getMetrics(
"StorageContainerMetrics");
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java
index 06e1f933749a..b3f3030d70ad 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java
@@ -17,14 +17,12 @@
package org.apache.hadoop.ozone.container.metrics;
-import org.apache.commons.text.WordUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
import org.apache.hadoop.ozone.container.common.statemachine.DatanodeQueueMetrics;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -34,10 +32,12 @@
import java.io.IOException;
import java.util.UUID;
+import static org.apache.commons.text.WordUtils.capitalize;
import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeQueueMetrics.COMMAND_DISPATCHER_QUEUE_PREFIX;
import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeQueueMetrics.STATE_CONTEXT_COMMAND_QUEUE_PREFIX;
import static org.apache.ozone.test.MetricsAsserts.getLongGauge;
import static org.apache.ozone.test.MetricsAsserts.getMetrics;
+import static org.assertj.core.api.Assertions.assertThat;
/**
* Test for queue metrics of datanodes.
@@ -89,14 +89,12 @@ public void init() throws Exception {
@Test
public void testQueueMetrics() {
-
for (SCMCommandProto.Type type: SCMCommandProto.Type.values()) {
- Assertions.assertTrue(
- getGauge(STATE_CONTEXT_COMMAND_QUEUE_PREFIX +
- WordUtils.capitalize(String.valueOf(type)) + "Size") >= 0);
- Assertions.assertTrue(
- getGauge(COMMAND_DISPATCHER_QUEUE_PREFIX +
- WordUtils.capitalize(String.valueOf(type)) + "Size") >= 0);
+ String typeSize = capitalize(String.valueOf(type)) + "Size";
+ assertThat(getGauge(STATE_CONTEXT_COMMAND_QUEUE_PREFIX + typeSize))
+ .isGreaterThanOrEqualTo(0);
+ assertThat(getGauge(COMMAND_DISPATCHER_QUEUE_PREFIX + typeSize))
+ .isGreaterThanOrEqualTo(0);
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
index 841f344fc346..b3c8b732c16c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
@@ -42,7 +42,6 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.ozone.test.GenericTestUtils.LogCapturer;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -352,20 +351,15 @@ private Token createContainer(
}
private long createAndCloseContainer(
- XceiverClientSpi client, boolean useToken) {
+ XceiverClientSpi client, boolean useToken) throws IOException {
long id = getTestContainerID();
- try {
- Token
- token = createContainer(client, useToken, id);
-
- ContainerCommandRequestProto request =
- getCloseContainer(client.getPipeline(), id, token);
- ContainerCommandResponseProto response = client.sendCommand(request);
- assertNotNull(response);
- assertSame(response.getResult(), ContainerProtos.Result.SUCCESS);
- } catch (Exception e) {
- Assertions.fail(e);
- }
+ Token token = createContainer(client, useToken, id);
+
+ ContainerCommandRequestProto request =
+ getCloseContainer(client.getPipeline(), id, token);
+ ContainerCommandResponseProto response = client.sendCommand(request);
+ assertNotNull(response);
+ assertSame(response.getResult(), ContainerProtos.Result.SUCCESS);
return id;
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
index 0451ba5c98e1..2e3cefb94fe7 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
@@ -67,7 +67,6 @@
import org.apache.ratis.util.function.CheckedBiConsumer;
import org.apache.ratis.util.function.CheckedBiFunction;
import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
@@ -75,6 +74,7 @@
import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
import static org.apache.ratis.rpc.SupportedRpcType.GRPC;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
/**
* Test Containers.
@@ -170,7 +170,7 @@ static void runTestClientServer(
ContainerTestHelper
.getCreateContainerRequest(
ContainerTestHelper.getTestContainerID(), pipeline);
- Assertions.assertNotNull(request.getTraceID());
+ assertNotNull(request.getTraceID());
client.sendCommand(request);
} finally {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
index 2880d90db2fa..3e2e092c2f5a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
@@ -95,24 +95,22 @@
import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.ratis.rpc.RpcType;
-
-import static org.apache.ratis.rpc.SupportedRpcType.GRPC;
-
import org.apache.ratis.util.ExitUtils;
import org.apache.ratis.util.function.CheckedBiConsumer;
import org.apache.ratis.util.function.CheckedBiFunction;
+
+import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import static org.apache.ratis.rpc.SupportedRpcType.GRPC;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.Test;
-
/**
* Test Container servers when security is enabled.
*/
@@ -320,7 +318,7 @@ private static void assertFailsTokenVerification(XceiverClientSpi client,
String msg = response.getMessage();
assertTrue(msg.contains(BLOCK_TOKEN_VERIFICATION_FAILED.name()), msg);
} else {
- final Throwable t = Assertions.assertThrows(Throwable.class,
+ final Throwable t = assertThrows(Throwable.class,
() -> client.sendCommand(request));
assertRootCauseMessage(BLOCK_TOKEN_VERIFICATION_FAILED.name(), t);
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java
index eae12fd4dc92..cca47e17e407 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java
@@ -30,12 +30,12 @@
import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.assertj.core.api.Assertions.assertThat;
/**
* Test Datanode Ratis log parser.
@@ -78,14 +78,14 @@ public void testRatisLogParsing() throws Exception {
File currentDir = new File(pipelineDir, "current");
File logFile = new File(currentDir, "log_inprogress_0");
GenericTestUtils.waitFor(logFile::exists, 100, 15000);
- Assertions.assertTrue(logFile.isFile());
+ assertThat(logFile).isFile();
DatanodeRatisLogParser datanodeRatisLogParser =
new DatanodeRatisLogParser();
datanodeRatisLogParser.setSegmentFile(logFile);
datanodeRatisLogParser.parseRatisLogs(
DatanodeRatisLogParser::smToContainerLogString);
- Assertions.assertTrue(out.toString(StandardCharsets.UTF_8.name())
- .contains("Num Total Entries:"));
+ assertThat(out.toString(StandardCharsets.UTF_8.name()))
+ .contains("Num Total Entries:");
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java
index 719c38816f4c..0273deb50e61 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java
@@ -34,7 +34,6 @@
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ozone.test.GenericTestUtils.LogCapturer;
import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -56,6 +55,7 @@
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
+import static org.assertj.core.api.Assertions.assertThat;
/**
* This class tests datanode can tolerate configured num of failed volumes.
@@ -141,8 +141,8 @@ public void testDNCorrectlyHandlesVolumeFailureOnStartup() throws Exception {
// cluster.
GenericTestUtils.waitFor(() -> exitCapturer.getOutput()
.contains("Exiting with status 1: ExitException"), 500, 60000);
- Assertions.assertTrue(dsmCapturer.getOutput()
- .contains("DatanodeStateMachine Shutdown due to too many bad volumes"));
+ assertThat(dsmCapturer.getOutput())
+ .contains("DatanodeStateMachine Shutdown due to too many bad volumes");
// restore bad volumes
DatanodeTestUtils.restoreBadRootDir(volRootDir0);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java
index 045470647351..5244bb857905 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java
@@ -34,7 +34,6 @@
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ratis.server.RaftServer;
import org.apache.ratis.server.raftlog.RaftLog;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
@@ -46,6 +45,9 @@
import java.io.IOException;
import java.net.URI;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
/**
* Test for OmBucketReadWriteFileOps.
*/
@@ -207,7 +209,7 @@ private void verifyFileCreation(int expectedCount, FileStatus[] fileStatuses,
}
}
}
- Assertions.assertEquals(expectedCount, actual, "Mismatch Count!");
+ assertEquals(expectedCount, actual, "Mismatch Count!");
}
private void verifyOMLockMetrics(OMLockMetrics omLockMetrics) {
@@ -218,7 +220,7 @@ private void verifyOMLockMetrics(OMLockMetrics omLockMetrics) {
omLockMetrics.getLongestReadLockWaitingTimeMs());
int readWaitingSamples =
Integer.parseInt(readLockWaitingTimeMsStat.split(" ")[2]);
- Assertions.assertTrue(readWaitingSamples > 0, "Read Lock Waiting Samples should be positive");
+ assertThat(readWaitingSamples).isPositive();
String readLockHeldTimeMsStat = omLockMetrics.getReadLockHeldTimeMsStat();
LOG.info("Read Lock Held Time Stat: " + readLockHeldTimeMsStat);
@@ -226,7 +228,7 @@ private void verifyOMLockMetrics(OMLockMetrics omLockMetrics) {
omLockMetrics.getLongestReadLockHeldTimeMs());
int readHeldSamples =
Integer.parseInt(readLockHeldTimeMsStat.split(" ")[2]);
- Assertions.assertTrue(readHeldSamples > 0, "Read Lock Held Samples should be positive");
+ assertThat(readHeldSamples).isPositive();
String writeLockWaitingTimeMsStat =
omLockMetrics.getWriteLockWaitingTimeMsStat();
@@ -235,7 +237,7 @@ private void verifyOMLockMetrics(OMLockMetrics omLockMetrics) {
omLockMetrics.getLongestWriteLockWaitingTimeMs());
int writeWaitingSamples =
Integer.parseInt(writeLockWaitingTimeMsStat.split(" ")[2]);
- Assertions.assertTrue(writeWaitingSamples > 0, "Write Lock Waiting Samples should be positive");
+ assertThat(writeWaitingSamples).isPositive();
String writeLockHeldTimeMsStat = omLockMetrics.getWriteLockHeldTimeMsStat();
LOG.info("Write Lock Held Time Stat: " + writeLockHeldTimeMsStat);
@@ -243,7 +245,7 @@ private void verifyOMLockMetrics(OMLockMetrics omLockMetrics) {
omLockMetrics.getLongestWriteLockHeldTimeMs());
int writeHeldSamples =
Integer.parseInt(writeLockHeldTimeMsStat.split(" ")[2]);
- Assertions.assertTrue(writeHeldSamples > 0, "Write Lock Held Samples should be positive");
+ assertThat(writeHeldSamples).isPositive();
}
private static class ParameterBuilder {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java
index 12844c23cd7b..f1f5aabe38ac 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java
@@ -197,7 +197,6 @@ public void testMultipleSnapshotKeyReclaim() throws Exception {
}
@SuppressWarnings("checkstyle:MethodLength")
- @Flaky("HDDS-9023")
@Test
public void testSnapshotWithFSO() throws Exception {
Table dirTable =
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java
deleted file mode 100644
index 6b39b76c5466..000000000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *