diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java
new file mode 100644
index 000000000000..7d5b610b0875
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.client;
+
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.base.Strings;
+import jakarta.annotation.Nullable;
+import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.hadoop.hdds.annotation.InterfaceStability;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+/**
+ * Decommission specific stateless utility functions.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public final class DecommissionUtils {
+
+
+ private static final Logger LOG = LoggerFactory.getLogger(DecommissionUtils.class);
+
+ private DecommissionUtils() {
+ }
+
+ /**
+ * Returns the list of uuid or ipAddress matching decommissioning status nodes.
+ *
+ * @param allNodes All datanodes which are in decommissioning status.
+ * @param uuid node uuid.
+ * @param ipAddress node ipAddress
+ * @return the list of uuid or ipAddress matching decommissioning status nodes.
+ */
+ public static List getDecommissioningNodesList(Stream allNodes,
+ String uuid,
+ String ipAddress) {
+ List decommissioningNodes;
+ if (!Strings.isNullOrEmpty(uuid)) {
+ decommissioningNodes = allNodes.filter(p -> p.getNodeID().getUuid()
+ .equals(uuid)).collect(Collectors.toList());
+ } else if (!Strings.isNullOrEmpty(ipAddress)) {
+ decommissioningNodes = allNodes.filter(p -> p.getNodeID().getIpAddress()
+ .compareToIgnoreCase(ipAddress) == 0).collect(Collectors.toList());
+ } else {
+ decommissioningNodes = allNodes.collect(Collectors.toList());
+ }
+ return decommissioningNodes;
+ }
+
+ /**
+ * Returns Json node of datanode metrics.
+ *
+ * @param metricsJson
+ * @return Json node of datanode metrics
+ * @throws IOException
+ */
+ public static JsonNode getBeansJsonNode(String metricsJson) throws IOException {
+ JsonNode jsonNode;
+ ObjectMapper objectMapper = new ObjectMapper();
+ JsonFactory factory = objectMapper.getFactory();
+ JsonParser parser = factory.createParser(metricsJson);
+ jsonNode = (JsonNode) objectMapper.readTree(parser).get("beans").get(0);
+ return jsonNode;
+ }
+
+ /**
+ * Returns the number of decommissioning nodes.
+ *
+ * @param jsonNode
+ * @return
+ */
+ public static int getNumDecomNodes(JsonNode jsonNode) {
+ int numDecomNodes;
+ JsonNode totalDecom = jsonNode.get("DecommissioningMaintenanceNodesTotal");
+ numDecomNodes = (totalDecom == null ? -1 : Integer.parseInt(totalDecom.toString()));
+ return numDecomNodes;
+ }
+
+ /**
+ * Returns the counts of following info attributes.
+ * - decommissionStartTime
+ * - numOfUnclosedPipelines
+ * - numOfUnderReplicatedContainers
+ * - numOfUnclosedContainers
+ *
+ * @param datanode
+ * @param counts
+ * @param numDecomNodes
+ * @param countsMap
+ * @param errMsg
+ * @return
+ * @throws IOException
+ */
+ @Nullable
+ public static Map getCountsMap(DatanodeDetails datanode, JsonNode counts, int numDecomNodes,
+ Map countsMap, String errMsg)
+ throws IOException {
+ for (int i = 1; i <= numDecomNodes; i++) {
+ if (datanode.getHostName().equals(counts.get("tag.datanode." + i).asText())) {
+ JsonNode pipelinesDN = counts.get("PipelinesWaitingToCloseDN." + i);
+ JsonNode underReplicatedDN = counts.get("UnderReplicatedDN." + i);
+ JsonNode unclosedDN = counts.get("UnclosedContainersDN." + i);
+ JsonNode startTimeDN = counts.get("StartTimeDN." + i);
+ if (pipelinesDN == null || underReplicatedDN == null || unclosedDN == null || startTimeDN == null) {
+ throw new IOException(errMsg);
+ }
+
+ int pipelines = Integer.parseInt(pipelinesDN.toString());
+ double underReplicated = Double.parseDouble(underReplicatedDN.toString());
+ double unclosed = Double.parseDouble(unclosedDN.toString());
+ long startTime = Long.parseLong(startTimeDN.toString());
+ Date date = new Date(startTime);
+ DateFormat formatter = new SimpleDateFormat("dd/MM/yyyy hh:mm:ss z");
+ countsMap.put("decommissionStartTime", formatter.format(date));
+ countsMap.put("numOfUnclosedPipelines", pipelines);
+ countsMap.put("numOfUnderReplicatedContainers", underReplicated);
+ countsMap.put("numOfUnclosedContainers", unclosed);
+ return countsMap;
+ }
+ }
+ return null;
+ }
+}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java
index b146d68a587f..18ddbd086d7a 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java
@@ -17,12 +17,10 @@
*/
package org.apache.hadoop.hdds.scm.cli.datanode;
-import com.fasterxml.jackson.core.JsonFactory;
-import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.databind.JsonNode;
-import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Strings;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.client.DecommissionUtils;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.cli.ScmSubcommand;
@@ -32,11 +30,8 @@
import picocli.CommandLine;
import java.io.IOException;
-import java.text.DateFormat;
-import java.text.SimpleDateFormat;
import java.util.LinkedHashMap;
import java.util.ArrayList;
-import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
@@ -72,29 +67,25 @@ public class DecommissionStatusSubCommand extends ScmSubcommand {
@Override
public void execute(ScmClient scmClient) throws IOException {
- List decommissioningNodes;
Stream allNodes = scmClient.queryNode(DECOMMISSIONING,
null, HddsProtos.QueryScope.CLUSTER, "").stream();
+ List decommissioningNodes =
+ DecommissionUtils.getDecommissioningNodesList(allNodes, uuid, ipAddress);
if (!Strings.isNullOrEmpty(uuid)) {
- decommissioningNodes = allNodes.filter(p -> p.getNodeID().getUuid()
- .equals(uuid)).collect(Collectors.toList());
if (decommissioningNodes.isEmpty()) {
System.err.println("Datanode: " + uuid + " is not in DECOMMISSIONING");
return;
}
} else if (!Strings.isNullOrEmpty(ipAddress)) {
- decommissioningNodes = allNodes.filter(p -> p.getNodeID().getIpAddress()
- .compareToIgnoreCase(ipAddress) == 0).collect(Collectors.toList());
if (decommissioningNodes.isEmpty()) {
System.err.println("Datanode: " + ipAddress + " is not in " +
"DECOMMISSIONING");
return;
}
} else {
- decommissioningNodes = allNodes.collect(Collectors.toList());
if (!json) {
System.out.println("\nDecommission Status: DECOMMISSIONING - " +
- decommissioningNodes.size() + " node(s)");
+ decommissioningNodes.size() + " node(s)");
}
}
@@ -102,12 +93,8 @@ public void execute(ScmClient scmClient) throws IOException {
int numDecomNodes = -1;
JsonNode jsonNode = null;
if (metricsJson != null) {
- ObjectMapper objectMapper = new ObjectMapper();
- JsonFactory factory = objectMapper.getFactory();
- JsonParser parser = factory.createParser(metricsJson);
- jsonNode = (JsonNode) objectMapper.readTree(parser).get("beans").get(0);
- JsonNode totalDecom = jsonNode.get("DecommissioningMaintenanceNodesTotal");
- numDecomNodes = (totalDecom == null ? -1 : Integer.parseInt(totalDecom.toString()));
+ jsonNode = DecommissionUtils.getBeansJsonNode(metricsJson);
+ numDecomNodes = DecommissionUtils.getNumDecomNodes(jsonNode);
}
if (json) {
@@ -164,28 +151,9 @@ private Map getCounts(DatanodeDetails datanode, JsonNode counts,
Map countsMap = new LinkedHashMap<>();
String errMsg = getErrorMessage() + datanode.getHostName();
try {
- for (int i = 1; i <= numDecomNodes; i++) {
- if (datanode.getHostName().equals(counts.get("tag.datanode." + i).asText())) {
- JsonNode pipelinesDN = counts.get("PipelinesWaitingToCloseDN." + i);
- JsonNode underReplicatedDN = counts.get("UnderReplicatedDN." + i);
- JsonNode unclosedDN = counts.get("UnclosedContainersDN." + i);
- JsonNode startTimeDN = counts.get("StartTimeDN." + i);
- if (pipelinesDN == null || underReplicatedDN == null || unclosedDN == null || startTimeDN == null) {
- throw new IOException(errMsg);
- }
-
- int pipelines = Integer.parseInt(pipelinesDN.toString());
- double underReplicated = Double.parseDouble(underReplicatedDN.toString());
- double unclosed = Double.parseDouble(unclosedDN.toString());
- long startTime = Long.parseLong(startTimeDN.toString());
- Date date = new Date(startTime);
- DateFormat formatter = new SimpleDateFormat("dd/MM/yyyy hh:mm:ss z");
- countsMap.put("decommissionStartTime", formatter.format(date));
- countsMap.put("numOfUnclosedPipelines", pipelines);
- countsMap.put("numOfUnderReplicatedContainers", underReplicated);
- countsMap.put("numOfUnclosedContainers", unclosed);
- return countsMap;
- }
+ countsMap = DecommissionUtils.getCountsMap(datanode, counts, numDecomNodes, countsMap, errMsg);
+ if (countsMap != null) {
+ return countsMap;
}
System.err.println(errMsg);
} catch (IOException e) {
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java
index ab87bda4412c..5c9e40396358 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java
@@ -185,6 +185,7 @@ public final class ReconServerConfigKeys {
public static final int
OZONE_RECON_SCM_CLIENT_FAILOVER_MAX_RETRY_DEFAULT = 3;
+
/**
* Private constructor for utility class.
*/
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
index d384c761dd5e..a0bcfd302554 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
@@ -18,7 +18,10 @@
package org.apache.hadoop.ozone.recon.api;
+import com.fasterxml.jackson.databind.JsonNode;
import com.google.common.base.Preconditions;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.client.DecommissionUtils;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
@@ -32,8 +35,10 @@
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
+import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.ozone.ClientVersion;
import org.apache.hadoop.ozone.recon.api.types.DatanodeMetadata;
import org.apache.hadoop.ozone.recon.api.types.DatanodePipeline;
import org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport;
@@ -48,6 +53,7 @@
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
@@ -55,16 +61,21 @@
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
+import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
import org.apache.hadoop.ozone.recon.scm.ReconPipelineManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING;
+
/**
* Endpoint to fetch details about datanodes.
*/
@@ -78,14 +89,18 @@ public class NodeEndpoint {
private ReconNodeManager nodeManager;
private ReconPipelineManager pipelineManager;
private ReconContainerManager reconContainerManager;
+ private StorageContainerLocationProtocol scmClient;
+ private String errorMessage = "Error getting pipeline and container metrics for ";
@Inject
- NodeEndpoint(OzoneStorageContainerManager reconSCM) {
+ NodeEndpoint(OzoneStorageContainerManager reconSCM,
+ StorageContainerLocationProtocol scmClient) {
this.nodeManager =
(ReconNodeManager) reconSCM.getScmNodeManager();
- this.reconContainerManager =
+ this.reconContainerManager =
(ReconContainerManager) reconSCM.getContainerManager();
this.pipelineManager = (ReconPipelineManager) reconSCM.getPipelineManager();
+ this.scmClient = scmClient;
}
/**
@@ -325,4 +340,112 @@ private void checkContainers(DatanodeDetails nodeByUuid, AtomicBoolean isContain
}
});
}
+
+ /**
+ * This GET API provides the information of all datanodes for which decommissioning is initiated.
+ * @return the wrapped Response output
+ */
+ @GET
+ @Path("/decommission/info")
+ public Response getDatanodesDecommissionInfo() {
+ try {
+ return getDecommissionStatusResponse(null, null);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ /**
+ * This GET API provides the information of a specific datanode for which decommissioning is initiated.
+ * API accepts both uuid or ipAddress, uuid will be given preference if both provided.
+ * @return the wrapped Response output
+ */
+ @GET
+ @Path("/decommission/info/datanode")
+ public Response getDecommissionInfoForDatanode(@QueryParam("uuid") String uuid,
+ @QueryParam("ipAddress") String ipAddress) {
+ if (StringUtils.isEmpty(uuid)) {
+ Preconditions.checkNotNull(ipAddress, "Either uuid or ipAddress of a datanode should be provided !!!");
+ Preconditions.checkArgument(!ipAddress.isEmpty(),
+ "Either uuid or ipAddress of a datanode should be provided !!!");
+ }
+ try {
+ return getDecommissionStatusResponse(uuid, ipAddress);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private Response getDecommissionStatusResponse(String uuid, String ipAddress) throws IOException {
+ Response.ResponseBuilder builder = Response.status(Response.Status.OK);
+ Map responseMap = new HashMap<>();
+ Stream allNodes = scmClient.queryNode(DECOMMISSIONING,
+ null, HddsProtos.QueryScope.CLUSTER, "", ClientVersion.CURRENT_VERSION).stream();
+ List decommissioningNodes =
+ DecommissionUtils.getDecommissioningNodesList(allNodes, uuid, ipAddress);
+ String metricsJson = scmClient.getMetrics("Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics");
+ int numDecomNodes = -1;
+ JsonNode jsonNode = null;
+ if (metricsJson != null) {
+ jsonNode = DecommissionUtils.getBeansJsonNode(metricsJson);
+ numDecomNodes = DecommissionUtils.getNumDecomNodes(jsonNode);
+ }
+ List