From 523cc1d96acf4aeb2074d08a5e6acb7f1b89dccf Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Wed, 13 Mar 2024 20:28:59 +0530 Subject: [PATCH 01/16] HDDS-10514. Recon - Provide DN decommissioning detailed status and info inline with current CLI command output. --- .../DecommissionStatusSubCommand.java | 74 ++++++++++++++++- .../hadoop/ozone/recon/api/NodeEndpoint.java | 56 +++++++++++++ .../recon/api/types/DatanodeMetadata.java | 31 +++++++ .../recon/api/types/DatanodeMetrics.java | 81 +++++++++++++++++++ .../types/DecommissionStatusInfoResponse.java | 72 +++++++++++++++++ 5 files changed, 312 insertions(+), 2 deletions(-) create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetrics.java create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DecommissionStatusInfoResponse.java diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java index 464b08099db7..5158404953da 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java @@ -28,11 +28,14 @@ import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.server.JsonUtils; import picocli.CommandLine; import java.io.IOException; import java.text.DateFormat; import java.text.SimpleDateFormat; +import java.util.LinkedHashMap; +import java.util.ArrayList; import java.util.Date; import java.util.List; import java.util.Map; @@ -62,6 +65,11 @@ public class DecommissionStatusSubCommand extends ScmSubcommand { defaultValue = "") private String ipAddress; + @CommandLine.Option(names = { "--json" }, + description = "Show output in json format", + defaultValue = "false") + private boolean json; + @Override public void execute(ScmClient scmClient) throws IOException { List decommissioningNodes; @@ -84,8 +92,10 @@ public void execute(ScmClient scmClient) throws IOException { } } else { decommissioningNodes = allNodes.collect(Collectors.toList()); - System.out.println("\nDecommission Status: DECOMMISSIONING - " + - decommissioningNodes.size() + " node(s)"); + if (!json) { + System.out.println("\nDecommission Status: DECOMMISSIONING - " + + decommissioningNodes.size() + " node(s)"); + } } String metricsJson = scmClient.getMetrics("Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics"); @@ -100,6 +110,22 @@ public void execute(ScmClient scmClient) throws IOException { numDecomNodes = (totalDecom == null ? -1 : Integer.parseInt(totalDecom.toString())); } + if (json) { + List> decommissioningNodesDetails = new ArrayList<>(); + + for (HddsProtos.Node node : decommissioningNodes) { + DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf( + node.getNodeID()); + Map datanodeMap = new LinkedHashMap<>(); + datanodeMap.put("datanodeDetails", getDatanodeDetails(datanode)); + datanodeMap.put("metrics", getCounts(datanode, jsonNode, numDecomNodes)); + datanodeMap.put("containers", getContainers(scmClient, datanode)); + decommissioningNodesDetails.add(datanodeMap); + } + System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(decommissioningNodesDetails)); + return; + } + for (HddsProtos.Node node : decommissioningNodes) { DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf( node.getNodeID()); @@ -139,4 +165,48 @@ private void printCounts(DatanodeDetails datanode, JsonNode counts, int numDecom System.err.println("Error getting pipeline and container counts for " + datanode.getHostName()); } } + + private Map getDatanodeDetails(DatanodeDetails datanode) { + Map detailsMap = new LinkedHashMap<>(); + detailsMap.put("uuid", datanode.getUuid().toString()); + detailsMap.put("networkLocation", datanode.getNetworkLocation()); + detailsMap.put("ipAddress", datanode.getIpAddress()); + detailsMap.put("hostname", datanode.getHostName()); + return detailsMap; + } + + private Map getCounts(DatanodeDetails datanode, JsonNode counts, int numDecomNodes) { + Map countsMap = new LinkedHashMap<>(); + try { + for (int i = 1; i <= numDecomNodes; i++) { + if (datanode.getHostName().equals(counts.get("tag.datanode." + i).asText())) { + long startTime = Long.parseLong(counts.get("StartTimeDN." + i).toString()); + Date date = new Date(startTime); + DateFormat formatter = new SimpleDateFormat("dd/MM/yyyy hh:mm:ss z"); + countsMap.put("decommissionStartTime", formatter.format(date)); + countsMap.put("numOfUnclosedPipelines", + Integer.parseInt(counts.get("PipelinesWaitingToCloseDN." + i).toString())); + countsMap.put("numOfUnderReplicatedContainers", + Double.parseDouble(counts.get("UnderReplicatedDN." + i).toString())); + countsMap.put("numOfUnclosedContainers", + Double.parseDouble(counts.get("UnclosedContainersDN." + i).toString())); + return countsMap; + } + } + System.err.println("Error getting pipeline and container metrics for " + datanode.getHostName()); + } catch (NullPointerException ex) { + System.err.println("Error getting pipeline and container metrics for " + datanode.getHostName()); + } + return countsMap; + } + + private Map getContainers(ScmClient scmClient, DatanodeDetails datanode) throws IOException { + Map> containers = scmClient.getContainersOnDecomNode(datanode); + return containers.entrySet().stream() + .collect(Collectors.toMap( + Map.Entry::getKey, + entry -> entry.getValue().stream(). + map(ContainerID::toString). + collect(Collectors.toList()))); + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java index 968bfbc46343..b11ab0c78447 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java @@ -18,6 +18,8 @@ package org.apache.hadoop.ozone.recon.api; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; @@ -33,6 +35,7 @@ import org.apache.hadoop.ozone.recon.api.types.DatanodePipeline; import org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport; import org.apache.hadoop.ozone.recon.api.types.DatanodesResponse; +import org.apache.hadoop.ozone.recon.api.types.DecommissionStatusInfoResponse; import org.apache.hadoop.ozone.recon.scm.ReconNodeManager; import org.apache.hadoop.ozone.recon.scm.ReconContainerManager; @@ -42,13 +45,17 @@ import javax.ws.rs.Produces; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; +import java.io.BufferedReader; import java.io.IOException; +import java.io.InputStreamReader; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.ozone.recon.scm.ReconPipelineManager; +import org.apache.hadoop.security.SecurityUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -171,4 +178,53 @@ private DatanodeStorageReport getStorageReport(DatanodeDetails datanode) { long committed = nodeStat.getCommitted().get(); return new DatanodeStorageReport(capacity, used, remaining, committed); } + + @GET + @Path("/decommission/info") + public Response getDatanodesDecommissionInfo() { + // Command to execute + List command = Arrays.asList("ozone", "admin", "datanode", "status", "decommission", "--json"); + Response.ResponseBuilder builder = Response.status(Response.Status.OK); + ProcessBuilder pb = new ProcessBuilder(command); + try { + SecurityUtil.doAsLoginUser(() -> { + Process process = pb.start(); + try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()))) { + String processOutput = readProcessStream(reader); + // Wait for the process to complete + int exitCode = process.waitFor(); + LOG.info("Datanode decommission status info command exitcode: {}", exitCode); + if (exitCode != 0) { + try (BufferedReader errReader = new BufferedReader(new InputStreamReader(process.getErrorStream()))) { + String processErrorOutput = readProcessStream(errReader); + builder.status(exitCode); + builder.entity("Datanode decommission status info command is not successful : " + processErrorOutput); + // Build and return the response + return builder.build(); + } + } + // Create ObjectMapper + ObjectMapper objectMapper = new ObjectMapper(); + LOG.info("processOutput: {}", processOutput); + // Deserialize JSON to Java object + List decommissionStatusInfoResponseList = + objectMapper.readValue(processOutput, new TypeReference>() { }); + builder.entity(decommissionStatusInfoResponseList); + } + return builder.build(); + }); + } catch (IOException ioException) { + LOG.error("Failed to run datanode decommission status info command. ", ioException); + } + return builder.build(); + } + + private static String readProcessStream(BufferedReader reader) throws IOException { + StringBuilder processOutputBuilder = new StringBuilder(); + String line; + while ((line = reader.readLine()) != null) { + processOutputBuilder.append(line); + } + return processOutputBuilder.toString(); + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java index 4927c4a1e86a..7b86afdff496 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.recon.api.types; +import com.fasterxml.jackson.annotation.JsonInclude; import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; @@ -39,47 +40,66 @@ public final class DatanodeMetadata { private String hostname; @XmlElement(name = "state") + @JsonInclude(JsonInclude.Include.NON_NULL) private NodeState state; @XmlElement(name = "opState") + @JsonInclude(JsonInclude.Include.NON_NULL) private NodeOperationalState opState; @XmlElement(name = "lastHeartbeat") + @JsonInclude(JsonInclude.Include.NON_DEFAULT) private long lastHeartbeat; @XmlElement(name = "storageReport") + @JsonInclude(JsonInclude.Include.NON_NULL) private DatanodeStorageReport datanodeStorageReport; @XmlElement(name = "pipelines") + @JsonInclude(JsonInclude.Include.NON_NULL) private List pipelines; @XmlElement(name = "containers") + @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int containers; @XmlElement(name = "openContainers") + @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int openContainers; @XmlElement(name = "leaderCount") + @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int leaderCount; @XmlElement(name = "version") + @JsonInclude(JsonInclude.Include.NON_NULL) private String version; @XmlElement(name = "setupTime") + @JsonInclude(JsonInclude.Include.NON_DEFAULT) private long setupTime; @XmlElement(name = "revision") + @JsonInclude(JsonInclude.Include.NON_NULL) private String revision; @XmlElement(name = "buildDate") + @JsonInclude(JsonInclude.Include.NON_NULL) private String buildDate; @XmlElement(name = "layoutVersion") + @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int layoutVersion; @XmlElement(name = "networkLocation") private String networkLocation; + @XmlElement(name = "ipAddress") + private String ipAddress; + + public DatanodeMetadata() { + } + private DatanodeMetadata(Builder builder) { this.hostname = builder.hostname; this.uuid = builder.uuid; @@ -97,6 +117,7 @@ private DatanodeMetadata(Builder builder) { this.buildDate = builder.buildDate; this.layoutVersion = builder.layoutVersion; this.networkLocation = builder.networkLocation; + this.ipAddress = builder.ipAddress; } public String getHostname() { @@ -163,6 +184,10 @@ public String getNetworkLocation() { return networkLocation; } + public String getIpAddress() { + return ipAddress; + } + /** * Returns new builder class that builds a DatanodeMetadata. * @@ -193,6 +218,7 @@ public static final class Builder { private String buildDate; private int layoutVersion; private String networkLocation; + private String ipAddress; public Builder() { this.containers = 0; @@ -280,6 +306,11 @@ public Builder withNetworkLocation(String networkLocation) { this.networkLocation = networkLocation; return this; } + + public Builder withIpAddress(String ipAddress) { + this.ipAddress = ipAddress; + return this; + } /** * Constructs DatanodeMetadata. * diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetrics.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetrics.java new file mode 100644 index 000000000000..e2312e2fdb37 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetrics.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.recon.api.types; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * Class that represents the datanode metrics captured during decommissioning. + */ +public class DatanodeMetrics { + /** + * Start time of decommission of datanode. + */ + @JsonProperty("decommissionStartTime") + private String decommissionStartTime; + + /** + * Number of pipelines in unclosed status. + */ + @JsonProperty("numOfUnclosedPipelines") + private int numOfUnclosedPipelines; + + /** + * Number of under replicated containers. + */ + @JsonProperty("numOfUnderReplicatedContainers") + private double numOfUnderReplicatedContainers; + + /** + * Number of containers still not closed. + */ + @JsonProperty("numOfUnclosedContainers") + private double numOfUnclosedContainers; + + public String getDecommissionStartTime() { + return decommissionStartTime; + } + + public void setDecommissionStartTime(String decommissionStartTime) { + this.decommissionStartTime = decommissionStartTime; + } + + public int getNumOfUnclosedPipelines() { + return numOfUnclosedPipelines; + } + + public void setNumOfUnclosedPipelines(int numOfUnclosedPipelines) { + this.numOfUnclosedPipelines = numOfUnclosedPipelines; + } + + public double getNumOfUnderReplicatedContainers() { + return numOfUnderReplicatedContainers; + } + + public void setNumOfUnderReplicatedContainers(double numOfUnderReplicatedContainers) { + this.numOfUnderReplicatedContainers = numOfUnderReplicatedContainers; + } + + public double getNumOfUnclosedContainers() { + return numOfUnclosedContainers; + } + + public void setNumOfUnclosedContainers(double numOfUnclosedContainers) { + this.numOfUnclosedContainers = numOfUnclosedContainers; + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DecommissionStatusInfoResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DecommissionStatusInfoResponse.java new file mode 100644 index 000000000000..9374146aa9fe --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DecommissionStatusInfoResponse.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.recon.api.types; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.hdds.scm.container.ContainerID; + +import java.util.List; +import java.util.Map; + +/** + * Class that represents the API Response of decommissioning status info of datanode. + */ +public class DecommissionStatusInfoResponse { + /** + * Metadata of a datanode when decommissioning of datanode is in progress. + */ + @JsonProperty("datanodeDetails") + private DatanodeMetadata dataNodeDetails; + + /** + * Metrics of datanode when decommissioning of datanode is in progress. + */ + @JsonProperty("metrics") + private DatanodeMetrics datanodeMetrics; + + /** + * containers info of a datanode when decommissioning of datanode is in progress. + */ + @JsonProperty("containers") + private Map> containers; + + public DatanodeMetadata getDataNodeDetails() { + return dataNodeDetails; + } + + public void setDataNodeDetails(DatanodeMetadata dataNodeDetails) { + this.dataNodeDetails = dataNodeDetails; + } + + public DatanodeMetrics getDatanodeMetrics() { + return datanodeMetrics; + } + + public void setDatanodeMetrics(DatanodeMetrics datanodeMetrics) { + this.datanodeMetrics = datanodeMetrics; + } + + public Map> getContainers() { + return containers; + } + + public void setContainers( + Map> containers) { + this.containers = containers; + } +} From 525ef354f5976e8dd7b624dcbe2eb488e98e42f1 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Wed, 13 Mar 2024 20:43:17 +0530 Subject: [PATCH 02/16] HDDS-10514. Reverting this file change. --- .../DecommissionStatusSubCommand.java | 74 +------------------ 1 file changed, 2 insertions(+), 72 deletions(-) diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java index 5158404953da..464b08099db7 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java @@ -28,14 +28,11 @@ import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.server.JsonUtils; import picocli.CommandLine; import java.io.IOException; import java.text.DateFormat; import java.text.SimpleDateFormat; -import java.util.LinkedHashMap; -import java.util.ArrayList; import java.util.Date; import java.util.List; import java.util.Map; @@ -65,11 +62,6 @@ public class DecommissionStatusSubCommand extends ScmSubcommand { defaultValue = "") private String ipAddress; - @CommandLine.Option(names = { "--json" }, - description = "Show output in json format", - defaultValue = "false") - private boolean json; - @Override public void execute(ScmClient scmClient) throws IOException { List decommissioningNodes; @@ -92,10 +84,8 @@ public void execute(ScmClient scmClient) throws IOException { } } else { decommissioningNodes = allNodes.collect(Collectors.toList()); - if (!json) { - System.out.println("\nDecommission Status: DECOMMISSIONING - " + - decommissioningNodes.size() + " node(s)"); - } + System.out.println("\nDecommission Status: DECOMMISSIONING - " + + decommissioningNodes.size() + " node(s)"); } String metricsJson = scmClient.getMetrics("Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics"); @@ -110,22 +100,6 @@ public void execute(ScmClient scmClient) throws IOException { numDecomNodes = (totalDecom == null ? -1 : Integer.parseInt(totalDecom.toString())); } - if (json) { - List> decommissioningNodesDetails = new ArrayList<>(); - - for (HddsProtos.Node node : decommissioningNodes) { - DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf( - node.getNodeID()); - Map datanodeMap = new LinkedHashMap<>(); - datanodeMap.put("datanodeDetails", getDatanodeDetails(datanode)); - datanodeMap.put("metrics", getCounts(datanode, jsonNode, numDecomNodes)); - datanodeMap.put("containers", getContainers(scmClient, datanode)); - decommissioningNodesDetails.add(datanodeMap); - } - System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(decommissioningNodesDetails)); - return; - } - for (HddsProtos.Node node : decommissioningNodes) { DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf( node.getNodeID()); @@ -165,48 +139,4 @@ private void printCounts(DatanodeDetails datanode, JsonNode counts, int numDecom System.err.println("Error getting pipeline and container counts for " + datanode.getHostName()); } } - - private Map getDatanodeDetails(DatanodeDetails datanode) { - Map detailsMap = new LinkedHashMap<>(); - detailsMap.put("uuid", datanode.getUuid().toString()); - detailsMap.put("networkLocation", datanode.getNetworkLocation()); - detailsMap.put("ipAddress", datanode.getIpAddress()); - detailsMap.put("hostname", datanode.getHostName()); - return detailsMap; - } - - private Map getCounts(DatanodeDetails datanode, JsonNode counts, int numDecomNodes) { - Map countsMap = new LinkedHashMap<>(); - try { - for (int i = 1; i <= numDecomNodes; i++) { - if (datanode.getHostName().equals(counts.get("tag.datanode." + i).asText())) { - long startTime = Long.parseLong(counts.get("StartTimeDN." + i).toString()); - Date date = new Date(startTime); - DateFormat formatter = new SimpleDateFormat("dd/MM/yyyy hh:mm:ss z"); - countsMap.put("decommissionStartTime", formatter.format(date)); - countsMap.put("numOfUnclosedPipelines", - Integer.parseInt(counts.get("PipelinesWaitingToCloseDN." + i).toString())); - countsMap.put("numOfUnderReplicatedContainers", - Double.parseDouble(counts.get("UnderReplicatedDN." + i).toString())); - countsMap.put("numOfUnclosedContainers", - Double.parseDouble(counts.get("UnclosedContainersDN." + i).toString())); - return countsMap; - } - } - System.err.println("Error getting pipeline and container metrics for " + datanode.getHostName()); - } catch (NullPointerException ex) { - System.err.println("Error getting pipeline and container metrics for " + datanode.getHostName()); - } - return countsMap; - } - - private Map getContainers(ScmClient scmClient, DatanodeDetails datanode) throws IOException { - Map> containers = scmClient.getContainersOnDecomNode(datanode); - return containers.entrySet().stream() - .collect(Collectors.toMap( - Map.Entry::getKey, - entry -> entry.getValue().stream(). - map(ContainerID::toString). - collect(Collectors.toList()))); - } } From f1df5600e76854379450beecbf9645973932498b Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Thu, 14 Mar 2024 19:31:07 +0530 Subject: [PATCH 03/16] HDDS-10514. Added API for UUID argument. --- .../apache/hadoop/ozone/recon/ReconUtils.java | 70 ++++++++++++- .../hadoop/ozone/recon/api/NodeEndpoint.java | 99 +++++++++++-------- 2 files changed, 128 insertions(+), 41 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index 39d091ee03c8..9b5292279d22 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -20,21 +20,30 @@ import java.io.BufferedInputStream; import java.io.BufferedOutputStream; +import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; import java.net.HttpURLConnection; import java.net.URL; import java.nio.file.Path; import java.nio.file.Paths; +import java.security.PrivilegedExceptionAction; import java.sql.Timestamp; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.concurrent.BlockingQueue; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; import com.google.common.base.Preconditions; import com.google.inject.Singleton; +import jakarta.annotation.Nonnull; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -60,14 +69,16 @@ import static org.jooq.impl.DSL.using; import org.apache.hadoop.ozone.recon.scm.ReconContainerReportQueue; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats; -import jakarta.annotation.Nonnull; import org.jooq.Configuration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.validation.constraints.NotNull; + /** * Recon Utility class. */ @@ -385,4 +396,61 @@ public SCMNodeDetails getReconNodeDetails(OzoneConfiguration conf) { HddsServerUtil.getReconDataNodeBindAddress(conf)); return builder.build(); } + + /** + * This method accepts the list of command args and supports execution of ozone CLI based commands only. + * + * @param commandArgs the list of command args + * @return the command output map with its exit code + */ + public static Map executeCommand(List commandArgs) { + String command = joinListArgs(commandArgs); + Map processOutputMap = new HashMap<>(); + LOG.info("Received command : '{}' to execute", command); + AtomicReference processOutput = new AtomicReference<>(); + ProcessBuilder pb = new ProcessBuilder(commandArgs); + try { + SecurityUtil.doAsLoginUser((PrivilegedExceptionAction>) () -> { + Process process = pb.start(); + int exitCode = process.waitFor(); + LOG.info("'{}' command : exitcode: {}", command, exitCode); + validateAndReadProcessOutput(processOutput, process, exitCode); + processOutputMap.put(exitCode, processOutput.get()); + return processOutputMap; + }); + } catch (IOException ioException) { + processOutputMap.put(1, "Failed to run command :" + command); + LOG.error("Failed to run '{}' command : {}", command, ioException); + } + return processOutputMap; + } + + @NotNull + public static String joinListArgs(List commandArgs) { + String command = commandArgs.stream().collect(Collectors.joining(" ")); + return command; + } + + private static void validateAndReadProcessOutput(AtomicReference processOutput, + Process process, + int exitCode) throws IOException { + StringBuilder processOutputBuilder = new StringBuilder(); + InputStream inputStream; + if (exitCode != 0) { + inputStream = process.getErrorStream(); + } else { + inputStream = process.getInputStream(); + } + readProcessStream(processOutputBuilder, inputStream); + processOutput.set(processOutputBuilder.toString()); + } + + private static void readProcessStream(StringBuilder processOutputBuilder, InputStream stream) throws IOException { + try (BufferedReader reader = new BufferedReader(new InputStreamReader(stream))) { + String line; + while ((line = reader.readLine()) != null) { + processOutputBuilder.append(line); + } + } + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java index b11ab0c78447..85cb98f64fbd 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java @@ -18,8 +18,10 @@ package org.apache.hadoop.ozone.recon.api; +import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; @@ -31,6 +33,7 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.types.DatanodeMetadata; import org.apache.hadoop.ozone.recon.api.types.DatanodePipeline; import org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport; @@ -42,20 +45,20 @@ import javax.inject.Inject; import javax.ws.rs.GET; import javax.ws.rs.Path; +import javax.ws.rs.PathParam; import javax.ws.rs.Produces; +import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; -import java.io.BufferedReader; import java.io.IOException; -import java.io.InputStreamReader; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.ozone.recon.scm.ReconPipelineManager; -import org.apache.hadoop.security.SecurityUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -179,52 +182,68 @@ private DatanodeStorageReport getStorageReport(DatanodeDetails datanode) { return new DatanodeStorageReport(capacity, used, remaining, committed); } + /** + * This GET API provides the information of all datanodes for which decommissioning is initiated. + * @return the wrapped Response output + */ @GET @Path("/decommission/info") public Response getDatanodesDecommissionInfo() { // Command to execute - List command = Arrays.asList("ozone", "admin", "datanode", "status", "decommission", "--json"); + List commandArgs = Arrays.asList("ozone", "admin", "datanode", "status", "decommission", "--json"); + + return getDecommissionStatusResponse(commandArgs); + } + + /** + * This GET API provides the information of a specific datanode for which decommissioning is initiated. + * @return the wrapped Response output + */ + @GET + @Path("/decommission/info/{uuid}") + public Response getDecommissionInfoForDatanode(@PathParam("uuid") String uuid) { + Preconditions.checkNotNull(uuid, "uuid of a datanode cannot be null !!!"); + Preconditions.checkArgument(uuid.isEmpty(), "uuid of a datanode cannot be empty !!!"); + + // Command to execute + List commandArgs = + Arrays.asList("ozone", "admin", "datanode", "status", "decommission", "--id", uuid, "--json"); + + return getDecommissionStatusResponse(commandArgs); + } + + private static Response getDecommissionStatusResponse(List commandArgs) { Response.ResponseBuilder builder = Response.status(Response.Status.OK); - ProcessBuilder pb = new ProcessBuilder(command); - try { - SecurityUtil.doAsLoginUser(() -> { - Process process = pb.start(); - try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()))) { - String processOutput = readProcessStream(reader); - // Wait for the process to complete - int exitCode = process.waitFor(); - LOG.info("Datanode decommission status info command exitcode: {}", exitCode); - if (exitCode != 0) { - try (BufferedReader errReader = new BufferedReader(new InputStreamReader(process.getErrorStream()))) { - String processErrorOutput = readProcessStream(errReader); - builder.status(exitCode); - builder.entity("Datanode decommission status info command is not successful : " + processErrorOutput); - // Build and return the response - return builder.build(); - } - } - // Create ObjectMapper - ObjectMapper objectMapper = new ObjectMapper(); - LOG.info("processOutput: {}", processOutput); - // Deserialize JSON to Java object - List decommissionStatusInfoResponseList = + Map commandOutputMap = ReconUtils.executeCommand(commandArgs); + + for (Map.Entry entry : commandOutputMap.entrySet()) { + Integer exitCode = entry.getKey(); + String processOutput = entry.getValue(); + if (exitCode != 0) { + builder.status(Response.Status.INTERNAL_SERVER_ERROR); + builder.entity( + ReconUtils.joinListArgs(commandArgs) + " command execution is not successful : " + processOutput); + return builder.build(); + } else { + // Create ObjectMapper + ObjectMapper objectMapper = new ObjectMapper(); + LOG.info("processOutput: {}", processOutput); + // Deserialize JSON to Java object + List decommissionStatusInfoResponseList = null; + try { + decommissionStatusInfoResponseList = objectMapper.readValue(processOutput, new TypeReference>() { }); builder.entity(decommissionStatusInfoResponseList); + return builder.build(); + } catch (JsonProcessingException jsonProcessingException) { + LOG.error("Unexpected JSON Error: {}", jsonProcessingException); + throw new WebApplicationException(jsonProcessingException, Response.Status.INTERNAL_SERVER_ERROR); + } catch (Exception exception) { + LOG.error("Unexpected Error: {}", exception); + throw new WebApplicationException(exception, Response.Status.INTERNAL_SERVER_ERROR); } - return builder.build(); - }); - } catch (IOException ioException) { - LOG.error("Failed to run datanode decommission status info command. ", ioException); + } } return builder.build(); } - - private static String readProcessStream(BufferedReader reader) throws IOException { - StringBuilder processOutputBuilder = new StringBuilder(); - String line; - while ((line = reader.readLine()) != null) { - processOutputBuilder.append(line); - } - return processOutputBuilder.toString(); - } } From c229db325c0788d314ce11e1e78c92f1958af3f6 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Thu, 14 Mar 2024 20:03:08 +0530 Subject: [PATCH 04/16] HDDS-10514. Fixed findbugs. --- .../src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index 9b5292279d22..3e4b0bf1e68b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -446,7 +446,7 @@ private static void validateAndReadProcessOutput(AtomicReference process } private static void readProcessStream(StringBuilder processOutputBuilder, InputStream stream) throws IOException { - try (BufferedReader reader = new BufferedReader(new InputStreamReader(stream))) { + try (BufferedReader reader = new BufferedReader(new InputStreamReader(stream, "UTF-8"))) { String line; while ((line = reader.readLine()) != null) { processOutputBuilder.append(line); From eb57e7ad812d0ae7681658fc52218fb0d57bdd6e Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Fri, 15 Mar 2024 11:31:53 +0530 Subject: [PATCH 05/16] HDDS-10514. Added test cases for endPoint APIs. --- .../apache/hadoop/ozone/recon/ReconUtils.java | 8 +- .../hadoop/ozone/recon/api/NodeEndpoint.java | 17 ++-- .../hadoop/ozone/recon/api/TestEndpoints.java | 81 ++++++++++++++++++- 3 files changed, 95 insertions(+), 11 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index 3e4b0bf1e68b..a99d465f6764 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -403,7 +403,7 @@ public SCMNodeDetails getReconNodeDetails(OzoneConfiguration conf) { * @param commandArgs the list of command args * @return the command output map with its exit code */ - public static Map executeCommand(List commandArgs) { + public Map executeCommand(List commandArgs) { String command = joinListArgs(commandArgs); Map processOutputMap = new HashMap<>(); LOG.info("Received command : '{}' to execute", command); @@ -426,12 +426,12 @@ public static Map executeCommand(List commandArgs) { } @NotNull - public static String joinListArgs(List commandArgs) { + public String joinListArgs(List commandArgs) { String command = commandArgs.stream().collect(Collectors.joining(" ")); return command; } - private static void validateAndReadProcessOutput(AtomicReference processOutput, + private void validateAndReadProcessOutput(AtomicReference processOutput, Process process, int exitCode) throws IOException { StringBuilder processOutputBuilder = new StringBuilder(); @@ -445,7 +445,7 @@ private static void validateAndReadProcessOutput(AtomicReference process processOutput.set(processOutputBuilder.toString()); } - private static void readProcessStream(StringBuilder processOutputBuilder, InputStream stream) throws IOException { + private void readProcessStream(StringBuilder processOutputBuilder, InputStream stream) throws IOException { try (BufferedReader reader = new BufferedReader(new InputStreamReader(stream, "UTF-8"))) { String line; while ((line = reader.readLine()) != null) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java index 85cb98f64fbd..5bf70e7b4a0e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java @@ -53,6 +53,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -75,14 +76,16 @@ public class NodeEndpoint { private ReconNodeManager nodeManager; private ReconPipelineManager pipelineManager; private ReconContainerManager reconContainerManager; + private ReconUtils reconUtils; @Inject - NodeEndpoint(OzoneStorageContainerManager reconSCM) { + NodeEndpoint(OzoneStorageContainerManager reconSCM, ReconUtils reconUtils) { this.nodeManager = (ReconNodeManager) reconSCM.getScmNodeManager(); this.reconContainerManager = (ReconContainerManager) reconSCM.getContainerManager(); this.pipelineManager = (ReconPipelineManager) reconSCM.getPipelineManager(); + this.reconUtils = reconUtils; } /** @@ -203,7 +206,7 @@ public Response getDatanodesDecommissionInfo() { @Path("/decommission/info/{uuid}") public Response getDecommissionInfoForDatanode(@PathParam("uuid") String uuid) { Preconditions.checkNotNull(uuid, "uuid of a datanode cannot be null !!!"); - Preconditions.checkArgument(uuid.isEmpty(), "uuid of a datanode cannot be empty !!!"); + Preconditions.checkArgument(!uuid.isEmpty(), "uuid of a datanode cannot be empty !!!"); // Command to execute List commandArgs = @@ -212,9 +215,10 @@ public Response getDecommissionInfoForDatanode(@PathParam("uuid") String uuid) { return getDecommissionStatusResponse(commandArgs); } - private static Response getDecommissionStatusResponse(List commandArgs) { + private Response getDecommissionStatusResponse(List commandArgs) { Response.ResponseBuilder builder = Response.status(Response.Status.OK); - Map commandOutputMap = ReconUtils.executeCommand(commandArgs); + Map commandOutputMap = reconUtils.executeCommand(commandArgs); + Map responseMap = new HashMap<>(); for (Map.Entry entry : commandOutputMap.entrySet()) { Integer exitCode = entry.getKey(); @@ -222,7 +226,7 @@ private static Response getDecommissionStatusResponse(List commandArgs) if (exitCode != 0) { builder.status(Response.Status.INTERNAL_SERVER_ERROR); builder.entity( - ReconUtils.joinListArgs(commandArgs) + " command execution is not successful : " + processOutput); + reconUtils.joinListArgs(commandArgs) + " command execution is not successful : " + processOutput); return builder.build(); } else { // Create ObjectMapper @@ -233,7 +237,8 @@ private static Response getDecommissionStatusResponse(List commandArgs) try { decommissionStatusInfoResponseList = objectMapper.readValue(processOutput, new TypeReference>() { }); - builder.entity(decommissionStatusInfoResponseList); + responseMap.put("DatanodesDecommissionInfo", decommissionStatusInfoResponseList); + builder.entity(responseMap); return builder.build(); } catch (JsonProcessingException jsonProcessingException) { LOG.error("Unexpected JSON Error: {}", jsonProcessingException); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index 9c92ad4d7e1f..f59f2fa8907d 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -71,7 +71,9 @@ import org.apache.hadoop.ozone.recon.api.types.BucketsResponse; import org.apache.hadoop.ozone.recon.api.types.ClusterStateResponse; import org.apache.hadoop.ozone.recon.api.types.DatanodeMetadata; +import org.apache.hadoop.ozone.recon.api.types.DatanodeMetrics; import org.apache.hadoop.ozone.recon.api.types.DatanodesResponse; +import org.apache.hadoop.ozone.recon.api.types.DecommissionStatusInfoResponse; import org.apache.hadoop.ozone.recon.api.types.PipelineMetadata; import org.apache.hadoop.ozone.recon.api.types.PipelinesResponse; import org.apache.hadoop.ozone.recon.api.types.VolumeObjectDBInfo; @@ -141,8 +143,10 @@ import java.nio.file.Path; import java.util.Arrays; import java.util.ArrayList; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.atomic.AtomicInteger; @@ -306,6 +310,28 @@ private void initializeInjector() throws Exception { pipelineManager = reconScm.getPipelineManager(); reconPipelineManager = (ReconPipelineManager) pipelineManager; reconPipelineManager.addPipeline(pipeline); + + // Mocking for command execution output. + Map commandOutputMap = new HashMap<>(); + commandOutputMap.put(0, "[\n" + + " {\n" + + " \"datanodeDetails\": {\n" + + " \"uuid\": \"5416ef8e-6e6e-476f-a4d7-3744f7c282ab\",\n" + + " \"hostname\": \"ozone-ha-datanode-1.ozone-ha_default\",\n" + + " \"networkLocation\": \"/default-rack\",\n" + + " \"ipAddress\": \"172.25.0.9\"\n" + + " },\n" + + " \"metrics\": {\n" + + " \"decommissionStartTime\": \"14/03/2024 01:56:00 UTC\",\n" + + " \"numOfUnclosedPipelines\": 2,\n" + + " \"numOfUnderReplicatedContainers\": 0.0,\n" + + " \"numOfUnclosedContainers\": 0.0\n" + + " },\n" + + " \"containers\": {}\n" + + " }\n" + + "]"); + when(reconUtilsMock.executeCommand(any(List.class))).thenReturn(commandOutputMap); + } @SuppressWarnings("checkstyle:MethodLength") @@ -692,7 +718,6 @@ public void testGetMetricsResponse() throws Exception { when(urlConnectionMock.getInputStream()).thenReturn(inputStream); when(reconUtilsMock.makeHttpCall(any(URLConnectionFactory.class), anyString(), anyBoolean())).thenReturn(urlConnectionMock); - metricsProxyEndpoint.getMetricsResponse(PROMETHEUS_INSTANT_QUERY_API, uriInfoMock, responseMock); @@ -1181,4 +1206,58 @@ private void waitAndCheckConditionAfterHeartbeat(Callable check) private BucketLayout getBucketLayout() { return BucketLayout.DEFAULT; } + + @Test + public void testDatanodesDecommissionInfoAPI() throws Exception { + Response response = nodeEndpoint.getDatanodesDecommissionInfo(); + Map responseMap = (Map) response.getEntity(); + responseMap.entrySet().forEach(entry -> { + assertEquals("DatanodesDecommissionInfo", entry.getKey()); + List decommissionStatusInfoResponseList = + (List) entry.getValue(); + decommissionStatusInfoResponseList.forEach(decommissionStatusInfoResponse -> { + DatanodeMetadata dataNodeDetails = decommissionStatusInfoResponse.getDataNodeDetails(); + assertEquals("5416ef8e-6e6e-476f-a4d7-3744f7c282ab", dataNodeDetails.getUuid()); + assertEquals("ozone-ha-datanode-1.ozone-ha_default", dataNodeDetails.getHostname()); + assertEquals("/default-rack", dataNodeDetails.getNetworkLocation()); + assertEquals("172.25.0.9", dataNodeDetails.getIpAddress()); + + DatanodeMetrics datanodeMetrics = decommissionStatusInfoResponse.getDatanodeMetrics(); + assertEquals("14/03/2024 01:56:00 UTC", datanodeMetrics.getDecommissionStartTime()); + assertEquals(0.0, datanodeMetrics.getNumOfUnclosedContainers()); + assertEquals(2, datanodeMetrics.getNumOfUnclosedPipelines()); + assertEquals(0.0, datanodeMetrics.getNumOfUnderReplicatedContainers()); + + Map> containers = decommissionStatusInfoResponse.getContainers(); + assertEquals(0, containers.size()); + }); + }); + } + + @Test + public void testDecommissionInfoForDatanodeAPI() throws Exception { + Response response = nodeEndpoint.getDecommissionInfoForDatanode("5416ef8e-6e6e-476f-a4d7-3744f7c282ab"); + Map responseMap = (Map) response.getEntity(); + responseMap.entrySet().forEach(entry -> { + assertEquals("DatanodesDecommissionInfo", entry.getKey()); + List decommissionStatusInfoResponseList = + (List) entry.getValue(); + decommissionStatusInfoResponseList.forEach(decommissionStatusInfoResponse -> { + DatanodeMetadata dataNodeDetails = decommissionStatusInfoResponse.getDataNodeDetails(); + assertEquals("5416ef8e-6e6e-476f-a4d7-3744f7c282ab", dataNodeDetails.getUuid()); + assertEquals("ozone-ha-datanode-1.ozone-ha_default", dataNodeDetails.getHostname()); + assertEquals("/default-rack", dataNodeDetails.getNetworkLocation()); + assertEquals("172.25.0.9", dataNodeDetails.getIpAddress()); + + DatanodeMetrics datanodeMetrics = decommissionStatusInfoResponse.getDatanodeMetrics(); + assertEquals("14/03/2024 01:56:00 UTC", datanodeMetrics.getDecommissionStartTime()); + assertEquals(0.0, datanodeMetrics.getNumOfUnclosedContainers()); + assertEquals(2, datanodeMetrics.getNumOfUnclosedPipelines()); + assertEquals(0.0, datanodeMetrics.getNumOfUnderReplicatedContainers()); + + Map> containers = decommissionStatusInfoResponse.getContainers(); + assertEquals(0, containers.size()); + }); + }); + } } From c29ac0b7834550f8a8f0af91a820f1fb21a36ae1 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Sat, 16 Mar 2024 09:26:39 +0530 Subject: [PATCH 06/16] HDDS-9537. Intermittent failure in TestPipelineManagerMXBean. --- .../hadoop/hdds/protocol/DatanodeDetails.java | 4 +++ .../recon/api/types/DatanodeMetadata.java | 31 ------------------- .../types/DecommissionStatusInfoResponse.java | 7 +++-- .../hadoop/ozone/recon/api/TestEndpoints.java | 14 ++++----- 4 files changed, 15 insertions(+), 41 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index b455daba529f..cd5ffb544a6d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -25,6 +25,8 @@ import java.util.UUID; import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; import com.google.common.collect.ImmutableSet; import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.HddsUtils; @@ -62,6 +64,7 @@ */ @InterfaceAudience.Private @InterfaceStability.Evolving +@JsonDeserialize(builder = DatanodeDetails.Builder.class) public class DatanodeDetails extends NodeImpl implements Comparable { @@ -569,6 +572,7 @@ public String threadNamePrefix() { /** * Builder class for building DatanodeDetails. */ + @JsonPOJOBuilder(withPrefix = "set") public static final class Builder { private UUID id; private String ipAddress; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java index 7b86afdff496..4927c4a1e86a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.ozone.recon.api.types; -import com.fasterxml.jackson.annotation.JsonInclude; import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; @@ -40,66 +39,47 @@ public final class DatanodeMetadata { private String hostname; @XmlElement(name = "state") - @JsonInclude(JsonInclude.Include.NON_NULL) private NodeState state; @XmlElement(name = "opState") - @JsonInclude(JsonInclude.Include.NON_NULL) private NodeOperationalState opState; @XmlElement(name = "lastHeartbeat") - @JsonInclude(JsonInclude.Include.NON_DEFAULT) private long lastHeartbeat; @XmlElement(name = "storageReport") - @JsonInclude(JsonInclude.Include.NON_NULL) private DatanodeStorageReport datanodeStorageReport; @XmlElement(name = "pipelines") - @JsonInclude(JsonInclude.Include.NON_NULL) private List pipelines; @XmlElement(name = "containers") - @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int containers; @XmlElement(name = "openContainers") - @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int openContainers; @XmlElement(name = "leaderCount") - @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int leaderCount; @XmlElement(name = "version") - @JsonInclude(JsonInclude.Include.NON_NULL) private String version; @XmlElement(name = "setupTime") - @JsonInclude(JsonInclude.Include.NON_DEFAULT) private long setupTime; @XmlElement(name = "revision") - @JsonInclude(JsonInclude.Include.NON_NULL) private String revision; @XmlElement(name = "buildDate") - @JsonInclude(JsonInclude.Include.NON_NULL) private String buildDate; @XmlElement(name = "layoutVersion") - @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int layoutVersion; @XmlElement(name = "networkLocation") private String networkLocation; - @XmlElement(name = "ipAddress") - private String ipAddress; - - public DatanodeMetadata() { - } - private DatanodeMetadata(Builder builder) { this.hostname = builder.hostname; this.uuid = builder.uuid; @@ -117,7 +97,6 @@ private DatanodeMetadata(Builder builder) { this.buildDate = builder.buildDate; this.layoutVersion = builder.layoutVersion; this.networkLocation = builder.networkLocation; - this.ipAddress = builder.ipAddress; } public String getHostname() { @@ -184,10 +163,6 @@ public String getNetworkLocation() { return networkLocation; } - public String getIpAddress() { - return ipAddress; - } - /** * Returns new builder class that builds a DatanodeMetadata. * @@ -218,7 +193,6 @@ public static final class Builder { private String buildDate; private int layoutVersion; private String networkLocation; - private String ipAddress; public Builder() { this.containers = 0; @@ -306,11 +280,6 @@ public Builder withNetworkLocation(String networkLocation) { this.networkLocation = networkLocation; return this; } - - public Builder withIpAddress(String ipAddress) { - this.ipAddress = ipAddress; - return this; - } /** * Constructs DatanodeMetadata. * diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DecommissionStatusInfoResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DecommissionStatusInfoResponse.java index 9374146aa9fe..aab2a2789bbe 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DecommissionStatusInfoResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DecommissionStatusInfoResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.recon.api.types; import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.container.ContainerID; import java.util.List; @@ -31,7 +32,7 @@ public class DecommissionStatusInfoResponse { * Metadata of a datanode when decommissioning of datanode is in progress. */ @JsonProperty("datanodeDetails") - private DatanodeMetadata dataNodeDetails; + private DatanodeDetails dataNodeDetails; /** * Metrics of datanode when decommissioning of datanode is in progress. @@ -45,11 +46,11 @@ public class DecommissionStatusInfoResponse { @JsonProperty("containers") private Map> containers; - public DatanodeMetadata getDataNodeDetails() { + public DatanodeDetails getDataNodeDetails() { return dataNodeDetails; } - public void setDataNodeDetails(DatanodeMetadata dataNodeDetails) { + public void setDataNodeDetails(DatanodeDetails dataNodeDetails) { this.dataNodeDetails = dataNodeDetails; } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index f59f2fa8907d..62cb42f1bf8e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -317,7 +317,7 @@ private void initializeInjector() throws Exception { " {\n" + " \"datanodeDetails\": {\n" + " \"uuid\": \"5416ef8e-6e6e-476f-a4d7-3744f7c282ab\",\n" + - " \"hostname\": \"ozone-ha-datanode-1.ozone-ha_default\",\n" + + " \"hostName\": \"ozone-ha-datanode-1.ozone-ha_default\",\n" + " \"networkLocation\": \"/default-rack\",\n" + " \"ipAddress\": \"172.25.0.9\"\n" + " },\n" + @@ -1216,9 +1216,9 @@ public void testDatanodesDecommissionInfoAPI() throws Exception { List decommissionStatusInfoResponseList = (List) entry.getValue(); decommissionStatusInfoResponseList.forEach(decommissionStatusInfoResponse -> { - DatanodeMetadata dataNodeDetails = decommissionStatusInfoResponse.getDataNodeDetails(); - assertEquals("5416ef8e-6e6e-476f-a4d7-3744f7c282ab", dataNodeDetails.getUuid()); - assertEquals("ozone-ha-datanode-1.ozone-ha_default", dataNodeDetails.getHostname()); + DatanodeDetails dataNodeDetails = decommissionStatusInfoResponse.getDataNodeDetails(); + assertEquals("5416ef8e-6e6e-476f-a4d7-3744f7c282ab", dataNodeDetails.getUuid().toString()); + assertEquals("ozone-ha-datanode-1.ozone-ha_default", dataNodeDetails.getHostName()); assertEquals("/default-rack", dataNodeDetails.getNetworkLocation()); assertEquals("172.25.0.9", dataNodeDetails.getIpAddress()); @@ -1243,9 +1243,9 @@ public void testDecommissionInfoForDatanodeAPI() throws Exception { List decommissionStatusInfoResponseList = (List) entry.getValue(); decommissionStatusInfoResponseList.forEach(decommissionStatusInfoResponse -> { - DatanodeMetadata dataNodeDetails = decommissionStatusInfoResponse.getDataNodeDetails(); - assertEquals("5416ef8e-6e6e-476f-a4d7-3744f7c282ab", dataNodeDetails.getUuid()); - assertEquals("ozone-ha-datanode-1.ozone-ha_default", dataNodeDetails.getHostname()); + DatanodeDetails dataNodeDetails = decommissionStatusInfoResponse.getDataNodeDetails(); + assertEquals("5416ef8e-6e6e-476f-a4d7-3744f7c282ab", dataNodeDetails.getUuid().toString()); + assertEquals("ozone-ha-datanode-1.ozone-ha_default", dataNodeDetails.getHostName()); assertEquals("/default-rack", dataNodeDetails.getNetworkLocation()); assertEquals("172.25.0.9", dataNodeDetails.getIpAddress()); From fe6ae722c74ef4531c832c850bb2e4586ab1a83b Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Sat, 16 Mar 2024 09:28:11 +0530 Subject: [PATCH 07/16] Revert "HDDS-9537. Intermittent failure in TestPipelineManagerMXBean." This reverts commit c29ac0b7834550f8a8f0af91a820f1fb21a36ae1. --- .../hadoop/hdds/protocol/DatanodeDetails.java | 4 --- .../recon/api/types/DatanodeMetadata.java | 31 +++++++++++++++++++ .../types/DecommissionStatusInfoResponse.java | 7 ++--- .../hadoop/ozone/recon/api/TestEndpoints.java | 14 ++++----- 4 files changed, 41 insertions(+), 15 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index cd5ffb544a6d..b455daba529f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -25,8 +25,6 @@ import java.util.UUID; import com.fasterxml.jackson.annotation.JsonIgnore; -import com.fasterxml.jackson.databind.annotation.JsonDeserialize; -import com.fasterxml.jackson.databind.annotation.JsonPOJOBuilder; import com.google.common.collect.ImmutableSet; import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.HddsUtils; @@ -64,7 +62,6 @@ */ @InterfaceAudience.Private @InterfaceStability.Evolving -@JsonDeserialize(builder = DatanodeDetails.Builder.class) public class DatanodeDetails extends NodeImpl implements Comparable { @@ -572,7 +569,6 @@ public String threadNamePrefix() { /** * Builder class for building DatanodeDetails. */ - @JsonPOJOBuilder(withPrefix = "set") public static final class Builder { private UUID id; private String ipAddress; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java index 4927c4a1e86a..7b86afdff496 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.recon.api.types; +import com.fasterxml.jackson.annotation.JsonInclude; import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; @@ -39,47 +40,66 @@ public final class DatanodeMetadata { private String hostname; @XmlElement(name = "state") + @JsonInclude(JsonInclude.Include.NON_NULL) private NodeState state; @XmlElement(name = "opState") + @JsonInclude(JsonInclude.Include.NON_NULL) private NodeOperationalState opState; @XmlElement(name = "lastHeartbeat") + @JsonInclude(JsonInclude.Include.NON_DEFAULT) private long lastHeartbeat; @XmlElement(name = "storageReport") + @JsonInclude(JsonInclude.Include.NON_NULL) private DatanodeStorageReport datanodeStorageReport; @XmlElement(name = "pipelines") + @JsonInclude(JsonInclude.Include.NON_NULL) private List pipelines; @XmlElement(name = "containers") + @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int containers; @XmlElement(name = "openContainers") + @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int openContainers; @XmlElement(name = "leaderCount") + @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int leaderCount; @XmlElement(name = "version") + @JsonInclude(JsonInclude.Include.NON_NULL) private String version; @XmlElement(name = "setupTime") + @JsonInclude(JsonInclude.Include.NON_DEFAULT) private long setupTime; @XmlElement(name = "revision") + @JsonInclude(JsonInclude.Include.NON_NULL) private String revision; @XmlElement(name = "buildDate") + @JsonInclude(JsonInclude.Include.NON_NULL) private String buildDate; @XmlElement(name = "layoutVersion") + @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int layoutVersion; @XmlElement(name = "networkLocation") private String networkLocation; + @XmlElement(name = "ipAddress") + private String ipAddress; + + public DatanodeMetadata() { + } + private DatanodeMetadata(Builder builder) { this.hostname = builder.hostname; this.uuid = builder.uuid; @@ -97,6 +117,7 @@ private DatanodeMetadata(Builder builder) { this.buildDate = builder.buildDate; this.layoutVersion = builder.layoutVersion; this.networkLocation = builder.networkLocation; + this.ipAddress = builder.ipAddress; } public String getHostname() { @@ -163,6 +184,10 @@ public String getNetworkLocation() { return networkLocation; } + public String getIpAddress() { + return ipAddress; + } + /** * Returns new builder class that builds a DatanodeMetadata. * @@ -193,6 +218,7 @@ public static final class Builder { private String buildDate; private int layoutVersion; private String networkLocation; + private String ipAddress; public Builder() { this.containers = 0; @@ -280,6 +306,11 @@ public Builder withNetworkLocation(String networkLocation) { this.networkLocation = networkLocation; return this; } + + public Builder withIpAddress(String ipAddress) { + this.ipAddress = ipAddress; + return this; + } /** * Constructs DatanodeMetadata. * diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DecommissionStatusInfoResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DecommissionStatusInfoResponse.java index aab2a2789bbe..9374146aa9fe 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DecommissionStatusInfoResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DecommissionStatusInfoResponse.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.recon.api.types; import com.fasterxml.jackson.annotation.JsonProperty; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.container.ContainerID; import java.util.List; @@ -32,7 +31,7 @@ public class DecommissionStatusInfoResponse { * Metadata of a datanode when decommissioning of datanode is in progress. */ @JsonProperty("datanodeDetails") - private DatanodeDetails dataNodeDetails; + private DatanodeMetadata dataNodeDetails; /** * Metrics of datanode when decommissioning of datanode is in progress. @@ -46,11 +45,11 @@ public class DecommissionStatusInfoResponse { @JsonProperty("containers") private Map> containers; - public DatanodeDetails getDataNodeDetails() { + public DatanodeMetadata getDataNodeDetails() { return dataNodeDetails; } - public void setDataNodeDetails(DatanodeDetails dataNodeDetails) { + public void setDataNodeDetails(DatanodeMetadata dataNodeDetails) { this.dataNodeDetails = dataNodeDetails; } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index 62cb42f1bf8e..f59f2fa8907d 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -317,7 +317,7 @@ private void initializeInjector() throws Exception { " {\n" + " \"datanodeDetails\": {\n" + " \"uuid\": \"5416ef8e-6e6e-476f-a4d7-3744f7c282ab\",\n" + - " \"hostName\": \"ozone-ha-datanode-1.ozone-ha_default\",\n" + + " \"hostname\": \"ozone-ha-datanode-1.ozone-ha_default\",\n" + " \"networkLocation\": \"/default-rack\",\n" + " \"ipAddress\": \"172.25.0.9\"\n" + " },\n" + @@ -1216,9 +1216,9 @@ public void testDatanodesDecommissionInfoAPI() throws Exception { List decommissionStatusInfoResponseList = (List) entry.getValue(); decommissionStatusInfoResponseList.forEach(decommissionStatusInfoResponse -> { - DatanodeDetails dataNodeDetails = decommissionStatusInfoResponse.getDataNodeDetails(); - assertEquals("5416ef8e-6e6e-476f-a4d7-3744f7c282ab", dataNodeDetails.getUuid().toString()); - assertEquals("ozone-ha-datanode-1.ozone-ha_default", dataNodeDetails.getHostName()); + DatanodeMetadata dataNodeDetails = decommissionStatusInfoResponse.getDataNodeDetails(); + assertEquals("5416ef8e-6e6e-476f-a4d7-3744f7c282ab", dataNodeDetails.getUuid()); + assertEquals("ozone-ha-datanode-1.ozone-ha_default", dataNodeDetails.getHostname()); assertEquals("/default-rack", dataNodeDetails.getNetworkLocation()); assertEquals("172.25.0.9", dataNodeDetails.getIpAddress()); @@ -1243,9 +1243,9 @@ public void testDecommissionInfoForDatanodeAPI() throws Exception { List decommissionStatusInfoResponseList = (List) entry.getValue(); decommissionStatusInfoResponseList.forEach(decommissionStatusInfoResponse -> { - DatanodeDetails dataNodeDetails = decommissionStatusInfoResponse.getDataNodeDetails(); - assertEquals("5416ef8e-6e6e-476f-a4d7-3744f7c282ab", dataNodeDetails.getUuid().toString()); - assertEquals("ozone-ha-datanode-1.ozone-ha_default", dataNodeDetails.getHostName()); + DatanodeMetadata dataNodeDetails = decommissionStatusInfoResponse.getDataNodeDetails(); + assertEquals("5416ef8e-6e6e-476f-a4d7-3744f7c282ab", dataNodeDetails.getUuid()); + assertEquals("ozone-ha-datanode-1.ozone-ha_default", dataNodeDetails.getHostname()); assertEquals("/default-rack", dataNodeDetails.getNetworkLocation()); assertEquals("172.25.0.9", dataNodeDetails.getIpAddress()); From 378ed7a1070bbd6027b1c95c90a876f9815f77b5 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Mon, 18 Mar 2024 12:05:19 +0530 Subject: [PATCH 08/16] HDDS-10514. Fixed JSON formatting error and test cases. --- .../hadoop/hdds/protocol/DatanodeDetails.java | 49 ++++++++++- .../CustomDatanodeDetailsDeserializer.java | 72 ++++++++++++++++ .../CustomDatanodeDetailsSerializer.java | 72 ++++++++++++++++ .../hadoop/ozone/recon/api/NodeEndpoint.java | 7 ++ .../recon/api/types/DatanodeMetadata.java | 31 ------- .../types/DecommissionStatusInfoResponse.java | 7 +- .../hadoop/ozone/recon/api/TestEndpoints.java | 86 ++++++++++++++----- 7 files changed, 268 insertions(+), 56 deletions(-) create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/CustomDatanodeDetailsDeserializer.java create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/CustomDatanodeDetailsSerializer.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index b455daba529f..1ee17836e80e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -24,7 +24,13 @@ import java.util.Set; import java.util.UUID; +import com.fasterxml.jackson.annotation.JsonAutoDetect; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonFormat; import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; import com.google.common.collect.ImmutableSet; import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.HddsUtils; @@ -33,6 +39,8 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ExtendedDatanodeDetailsProto; +import org.apache.hadoop.hdds.recon.CustomDatanodeDetailsDeserializer; +import org.apache.hadoop.hdds.recon.CustomDatanodeDetailsSerializer; import org.apache.hadoop.hdds.scm.net.NetConstants; import org.apache.hadoop.hdds.scm.net.NodeImpl; @@ -62,6 +70,8 @@ */ @InterfaceAudience.Private @InterfaceStability.Evolving +@JsonSerialize(using = CustomDatanodeDetailsSerializer.class) +@JsonDeserialize(using = CustomDatanodeDetailsDeserializer.class) public class DatanodeDetails extends NodeImpl implements Comparable { @@ -86,6 +96,7 @@ public static Codec getCodec() { private String ipAddress; private String hostName; + @JsonProperty("ports") private final List ports; private String certSerialId; private String version; @@ -571,11 +582,14 @@ public String threadNamePrefix() { */ public static final class Builder { private UUID id; + private String uuidString; private String ipAddress; private String hostName; private String networkName; private String networkLocation; private int level; + // This field is being added here only for custom serialization and deserialization. + private int cost; private List ports; private String certSerialId; private String version; @@ -608,6 +622,7 @@ public Builder setDatanodeDetails(DatanodeDetails details) { this.networkName = details.getNetworkName(); this.networkLocation = details.getNetworkLocation(); this.level = details.getLevel(); + this.cost = details.getCost(); this.ports = details.getPorts(); this.certSerialId = details.getCertSerialId(); this.version = details.getVersion(); @@ -631,6 +646,17 @@ public Builder setUuid(UUID uuid) { return this; } + /** + * Sets the DatanodeUuid as String. + * + * @param uuidStr DatanodeUuid as String + * @return DatanodeDetails.Builder + */ + public Builder setUuidAsString(String uuidStr) { + this.uuidString = uuidStr; + return this; + } + /** * Sets the IP address of DataNode. * @@ -653,6 +679,17 @@ public Builder setHostName(String host) { return this; } + /** + * Sets the ports of DataNode. + * + * @param ports hostname + * @return DatanodeDetails.Builder + */ + public Builder setPorts(List ports) { + this.ports = ports; + return this; + } + /** * Sets the network name of DataNode. * @@ -680,6 +717,11 @@ public Builder setLevel(int level) { return this; } + public Builder setCost(int cost) { + this.cost = cost; + return this; + } + /** * Adds a DataNode Port. * @@ -816,11 +858,13 @@ public static Port newPort(Port.Name name, Integer value) { /** * Container to hold DataNode Port details. */ + @JsonAutoDetect public static final class Port { /** * Ports that are supported in DataNode. */ + @JsonFormat(shape = JsonFormat.Shape.OBJECT) public enum Name { STANDALONE, RATIS, REST, REPLICATION, RATIS_ADMIN, RATIS_SERVER, @BelongsToHDDSLayoutVersion(RATIS_DATASTREAM_PORT_IN_DATANODEDETAILS) @@ -838,14 +882,17 @@ public enum Name { EnumSet.of(STANDALONE, RATIS, REST)); } + @JsonProperty("name") private final Name name; + @JsonProperty("value") private final Integer value; /** * Private constructor for constructing Port object. Use * DatanodeDetails#newPort to create a new Port object. */ - private Port(Name name, Integer value) { + @JsonCreator + public Port(Name name, Integer value) { this.name = name; this.value = value; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/CustomDatanodeDetailsDeserializer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/CustomDatanodeDetailsDeserializer.java new file mode 100644 index 000000000000..490aa849992f --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/CustomDatanodeDetailsDeserializer.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.recon; + +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.deser.std.StdDeserializer; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +/** + * This is custom jackson deserializer class for DetanodeDetails class. + * Jackson deserializer is being used to deserialize json using JSON parser + * and map JSON fields to respective DatanodeDetails class fields. + */ +public class CustomDatanodeDetailsDeserializer extends StdDeserializer { + + protected CustomDatanodeDetailsDeserializer(Class vc) { + super(vc); + } + + public CustomDatanodeDetailsDeserializer() { + this(null); + } + + @Override + public DatanodeDetails deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { + JsonNode node = p.getCodec().readTree(p); + List ports = new ArrayList<>(); + JsonNode portsNode = node.get("ports"); + if (portsNode != null && portsNode.isArray()) { + for (JsonNode portNode : portsNode) { + DatanodeDetails.Port port = new DatanodeDetails.Port( + DatanodeDetails.Port.Name.valueOf(portNode.get("name").asText()), portNode.get("value").asInt()); + ports.add(port); + } + } + DatanodeDetails datanodeDetails = DatanodeDetails.newBuilder() + .setLevel(node.get("level").asInt()) + .setCost(node.get("cost").asInt()) + .setUuid(UUID.fromString(node.get("uuid").asText())) + .setUuidAsString(node.get("uuidString").asText()) + .setIpAddress(node.get("ipAddress").asText()) + .setHostName(node.get("hostName").asText()) + .setPorts(ports) + .build(); + return datanodeDetails; + } +} + + diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/CustomDatanodeDetailsSerializer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/CustomDatanodeDetailsSerializer.java new file mode 100644 index 000000000000..e86c910d32ae --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/CustomDatanodeDetailsSerializer.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.recon; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.SerializerProvider; +import com.fasterxml.jackson.databind.ser.std.StdSerializer; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; + +import java.io.IOException; + +/** + * This is custom jackson serializer class for DetanodeDetails class. + * Jackson serializer is being used to serialize DatanodeDetails class object + * to map and serialize respective object fields to JSON fields. + */ +public class CustomDatanodeDetailsSerializer extends StdSerializer { + + protected CustomDatanodeDetailsSerializer(Class t) { + super(t); + } + public CustomDatanodeDetailsSerializer() { + this(null); + } + + /** + * This method is a call back method to serialize respective object fields to JSON fields. + * + * @param datanodeDetails the datanodeDetails class object + * @param gen the JSON Generator + * @param provider the SerializerProvider + * @throws IOException + */ + @Override + public void serialize(DatanodeDetails datanodeDetails, JsonGenerator gen, SerializerProvider provider) + throws IOException { + gen.writeStartObject(); + gen.writeNumberField("level", datanodeDetails.getLevel()); + gen.writeNumberField("cost", datanodeDetails.getCost()); + gen.writeStringField("uuid", datanodeDetails.getUuid().toString()); + gen.writeStringField("uuidString", datanodeDetails.getUuidString()); + gen.writeStringField("ipAddress", datanodeDetails.getIpAddress()); + gen.writeStringField("hostName", datanodeDetails.getHostName()); + gen.writeFieldName("ports"); + gen.writeStartArray(); + for (DatanodeDetails.Port port : datanodeDetails.getPorts()) { + gen.writeStartObject(); + gen.writeStringField("name", port.getName().name()); + gen.writeNumberField("value", port.getValue()); + gen.writeEndObject(); + } + gen.writeEndArray(); + gen.writeEndObject(); + } +} + diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java index 5bf70e7b4a0e..1e149e9e0ae9 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java @@ -21,10 +21,13 @@ import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.module.SimpleModule; import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; +import org.apache.hadoop.hdds.recon.CustomDatanodeDetailsDeserializer; +import org.apache.hadoop.hdds.recon.CustomDatanodeDetailsSerializer; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdds.scm.node.DatanodeInfo; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; @@ -231,6 +234,10 @@ private Response getDecommissionStatusResponse(List commandArgs) { } else { // Create ObjectMapper ObjectMapper objectMapper = new ObjectMapper(); + SimpleModule module = new SimpleModule(); + module.addDeserializer(DatanodeDetails.class, new CustomDatanodeDetailsDeserializer()); + module.addSerializer(DatanodeDetails.class, new CustomDatanodeDetailsSerializer()); + objectMapper.registerModule(module); LOG.info("processOutput: {}", processOutput); // Deserialize JSON to Java object List decommissionStatusInfoResponseList = null; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java index 7b86afdff496..4927c4a1e86a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.ozone.recon.api.types; -import com.fasterxml.jackson.annotation.JsonInclude; import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; @@ -40,66 +39,47 @@ public final class DatanodeMetadata { private String hostname; @XmlElement(name = "state") - @JsonInclude(JsonInclude.Include.NON_NULL) private NodeState state; @XmlElement(name = "opState") - @JsonInclude(JsonInclude.Include.NON_NULL) private NodeOperationalState opState; @XmlElement(name = "lastHeartbeat") - @JsonInclude(JsonInclude.Include.NON_DEFAULT) private long lastHeartbeat; @XmlElement(name = "storageReport") - @JsonInclude(JsonInclude.Include.NON_NULL) private DatanodeStorageReport datanodeStorageReport; @XmlElement(name = "pipelines") - @JsonInclude(JsonInclude.Include.NON_NULL) private List pipelines; @XmlElement(name = "containers") - @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int containers; @XmlElement(name = "openContainers") - @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int openContainers; @XmlElement(name = "leaderCount") - @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int leaderCount; @XmlElement(name = "version") - @JsonInclude(JsonInclude.Include.NON_NULL) private String version; @XmlElement(name = "setupTime") - @JsonInclude(JsonInclude.Include.NON_DEFAULT) private long setupTime; @XmlElement(name = "revision") - @JsonInclude(JsonInclude.Include.NON_NULL) private String revision; @XmlElement(name = "buildDate") - @JsonInclude(JsonInclude.Include.NON_NULL) private String buildDate; @XmlElement(name = "layoutVersion") - @JsonInclude(JsonInclude.Include.NON_DEFAULT) private int layoutVersion; @XmlElement(name = "networkLocation") private String networkLocation; - @XmlElement(name = "ipAddress") - private String ipAddress; - - public DatanodeMetadata() { - } - private DatanodeMetadata(Builder builder) { this.hostname = builder.hostname; this.uuid = builder.uuid; @@ -117,7 +97,6 @@ private DatanodeMetadata(Builder builder) { this.buildDate = builder.buildDate; this.layoutVersion = builder.layoutVersion; this.networkLocation = builder.networkLocation; - this.ipAddress = builder.ipAddress; } public String getHostname() { @@ -184,10 +163,6 @@ public String getNetworkLocation() { return networkLocation; } - public String getIpAddress() { - return ipAddress; - } - /** * Returns new builder class that builds a DatanodeMetadata. * @@ -218,7 +193,6 @@ public static final class Builder { private String buildDate; private int layoutVersion; private String networkLocation; - private String ipAddress; public Builder() { this.containers = 0; @@ -306,11 +280,6 @@ public Builder withNetworkLocation(String networkLocation) { this.networkLocation = networkLocation; return this; } - - public Builder withIpAddress(String ipAddress) { - this.ipAddress = ipAddress; - return this; - } /** * Constructs DatanodeMetadata. * diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DecommissionStatusInfoResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DecommissionStatusInfoResponse.java index 9374146aa9fe..aab2a2789bbe 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DecommissionStatusInfoResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DecommissionStatusInfoResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.recon.api.types; import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.container.ContainerID; import java.util.List; @@ -31,7 +32,7 @@ public class DecommissionStatusInfoResponse { * Metadata of a datanode when decommissioning of datanode is in progress. */ @JsonProperty("datanodeDetails") - private DatanodeMetadata dataNodeDetails; + private DatanodeDetails dataNodeDetails; /** * Metrics of datanode when decommissioning of datanode is in progress. @@ -45,11 +46,11 @@ public class DecommissionStatusInfoResponse { @JsonProperty("containers") private Map> containers; - public DatanodeMetadata getDataNodeDetails() { + public DatanodeDetails getDataNodeDetails() { return dataNodeDetails; } - public void setDataNodeDetails(DatanodeMetadata dataNodeDetails) { + public void setDataNodeDetails(DatanodeDetails dataNodeDetails) { this.dataNodeDetails = dataNodeDetails; } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index f59f2fa8907d..4b65d71899ff 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -197,6 +197,7 @@ public TestEndpoints() { super(); } + @SuppressWarnings("methodlength") private void initializeInjector() throws Exception { reconOMMetadataManager = getTestReconOmMetadataManager( initializeNewOmMetadataManager(Files.createDirectory( @@ -316,14 +317,50 @@ private void initializeInjector() throws Exception { commandOutputMap.put(0, "[\n" + " {\n" + " \"datanodeDetails\": {\n" + - " \"uuid\": \"5416ef8e-6e6e-476f-a4d7-3744f7c282ab\",\n" + - " \"hostname\": \"ozone-ha-datanode-1.ozone-ha_default\",\n" + - " \"networkLocation\": \"/default-rack\",\n" + - " \"ipAddress\": \"172.25.0.9\"\n" + + " \"level\": 3,\n" + + " \"cost\": 0,\n" + + " \"uuid\": \"77bae1b4-0c33-44bd-84f9-fe91508495fe\",\n" + + " \"uuidString\": \"77bae1b4-0c33-44bd-84f9-fe91508495fe\",\n" + + " \"ipAddress\": \"172.22.0.12\",\n" + + " \"hostName\": \"ozone-ha-datanode-1.ozone-ha_default\",\n" + + " \"ports\": [\n" + + " {\n" + + " \"name\": \"HTTP\",\n" + + " \"value\": 9882\n" + + " },\n" + + " {\n" + + " \"name\": \"CLIENT_RPC\",\n" + + " \"value\": 19864\n" + + " },\n" + + " {\n" + + " \"name\": \"REPLICATION\",\n" + + " \"value\": 9886\n" + + " },\n" + + " {\n" + + " \"name\": \"RATIS\",\n" + + " \"value\": 9858\n" + + " },\n" + + " {\n" + + " \"name\": \"RATIS_ADMIN\",\n" + + " \"value\": 9857\n" + + " },\n" + + " {\n" + + " \"name\": \"RATIS_SERVER\",\n" + + " \"value\": 9856\n" + + " },\n" + + " {\n" + + " \"name\": \"RATIS_DATASTREAM\",\n" + + " \"value\": 9855\n" + + " },\n" + + " {\n" + + " \"name\": \"STANDALONE\",\n" + + " \"value\": 9859\n" + + " }\n" + + " ]\n" + " },\n" + " \"metrics\": {\n" + - " \"decommissionStartTime\": \"14/03/2024 01:56:00 UTC\",\n" + - " \"numOfUnclosedPipelines\": 2,\n" + + " \"decommissionStartTime\": \"18/03/2024 05:41:10 UTC\",\n" + + " \"numOfUnclosedPipelines\": 1,\n" + " \"numOfUnderReplicatedContainers\": 0.0,\n" + " \"numOfUnclosedContainers\": 0.0\n" + " },\n" + @@ -1216,18 +1253,22 @@ public void testDatanodesDecommissionInfoAPI() throws Exception { List decommissionStatusInfoResponseList = (List) entry.getValue(); decommissionStatusInfoResponseList.forEach(decommissionStatusInfoResponse -> { - DatanodeMetadata dataNodeDetails = decommissionStatusInfoResponse.getDataNodeDetails(); - assertEquals("5416ef8e-6e6e-476f-a4d7-3744f7c282ab", dataNodeDetails.getUuid()); - assertEquals("ozone-ha-datanode-1.ozone-ha_default", dataNodeDetails.getHostname()); - assertEquals("/default-rack", dataNodeDetails.getNetworkLocation()); - assertEquals("172.25.0.9", dataNodeDetails.getIpAddress()); + DatanodeDetails dataNodeDetails = decommissionStatusInfoResponse.getDataNodeDetails(); + + assertEquals(3, dataNodeDetails.getLevel()); + assertEquals(0, dataNodeDetails.getCost()); + assertEquals("77bae1b4-0c33-44bd-84f9-fe91508495fe", dataNodeDetails.getUuid().toString()); + assertEquals("172.22.0.12", dataNodeDetails.getIpAddress()); + assertEquals("ozone-ha-datanode-1.ozone-ha_default", dataNodeDetails.getHostName()); + assertEquals(8, dataNodeDetails.getPorts().size()); + assertEquals("CLIENT_RPC", dataNodeDetails.getPorts().get(1).getName().name()); + assertEquals(19864, dataNodeDetails.getPorts().get(1).getValue()); DatanodeMetrics datanodeMetrics = decommissionStatusInfoResponse.getDatanodeMetrics(); - assertEquals("14/03/2024 01:56:00 UTC", datanodeMetrics.getDecommissionStartTime()); + assertEquals("18/03/2024 05:41:10 UTC", datanodeMetrics.getDecommissionStartTime()); assertEquals(0.0, datanodeMetrics.getNumOfUnclosedContainers()); - assertEquals(2, datanodeMetrics.getNumOfUnclosedPipelines()); + assertEquals(1, datanodeMetrics.getNumOfUnclosedPipelines()); assertEquals(0.0, datanodeMetrics.getNumOfUnderReplicatedContainers()); - Map> containers = decommissionStatusInfoResponse.getContainers(); assertEquals(0, containers.size()); }); @@ -1236,23 +1277,26 @@ public void testDatanodesDecommissionInfoAPI() throws Exception { @Test public void testDecommissionInfoForDatanodeAPI() throws Exception { - Response response = nodeEndpoint.getDecommissionInfoForDatanode("5416ef8e-6e6e-476f-a4d7-3744f7c282ab"); + Response response = nodeEndpoint.getDecommissionInfoForDatanode("77bae1b4-0c33-44bd-84f9-fe91508495fe"); Map responseMap = (Map) response.getEntity(); responseMap.entrySet().forEach(entry -> { assertEquals("DatanodesDecommissionInfo", entry.getKey()); List decommissionStatusInfoResponseList = (List) entry.getValue(); decommissionStatusInfoResponseList.forEach(decommissionStatusInfoResponse -> { - DatanodeMetadata dataNodeDetails = decommissionStatusInfoResponse.getDataNodeDetails(); - assertEquals("5416ef8e-6e6e-476f-a4d7-3744f7c282ab", dataNodeDetails.getUuid()); - assertEquals("ozone-ha-datanode-1.ozone-ha_default", dataNodeDetails.getHostname()); + DatanodeDetails dataNodeDetails = decommissionStatusInfoResponse.getDataNodeDetails(); + assertEquals("77bae1b4-0c33-44bd-84f9-fe91508495fe", dataNodeDetails.getUuid().toString()); + assertEquals("ozone-ha-datanode-1.ozone-ha_default", dataNodeDetails.getHostName()); assertEquals("/default-rack", dataNodeDetails.getNetworkLocation()); - assertEquals("172.25.0.9", dataNodeDetails.getIpAddress()); + assertEquals("172.22.0.12", dataNodeDetails.getIpAddress()); + assertEquals(8, dataNodeDetails.getPorts().size()); + assertEquals("CLIENT_RPC", dataNodeDetails.getPorts().get(1).getName().name()); + assertEquals(19864, dataNodeDetails.getPorts().get(1).getValue()); DatanodeMetrics datanodeMetrics = decommissionStatusInfoResponse.getDatanodeMetrics(); - assertEquals("14/03/2024 01:56:00 UTC", datanodeMetrics.getDecommissionStartTime()); + assertEquals("18/03/2024 05:41:10 UTC", datanodeMetrics.getDecommissionStartTime()); assertEquals(0.0, datanodeMetrics.getNumOfUnclosedContainers()); - assertEquals(2, datanodeMetrics.getNumOfUnclosedPipelines()); + assertEquals(1, datanodeMetrics.getNumOfUnclosedPipelines()); assertEquals(0.0, datanodeMetrics.getNumOfUnderReplicatedContainers()); Map> containers = decommissionStatusInfoResponse.getContainers(); From 0f76eccb2bf1e463e66ce19fa2e2e8415407cd6d Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Mon, 18 Mar 2024 12:58:06 +0530 Subject: [PATCH 09/16] HDDS-10514. Fixed findbugs. --- .../org/apache/hadoop/hdds/protocol/DatanodeDetails.java | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index 1ee17836e80e..dc8f2c3b2dc9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -131,6 +131,14 @@ private DatanodeDetails(Builder b) { if (b.level > 0) { setLevel(b.level); } + + // Below are dummy logs just to avoid findbugs + if (b.cost > 0) { + LOG.debug("cost: {}", getCost()); + } + if (!b.uuidString.isEmpty()) { + LOG.debug("uuid : {}", uuidString); + } } public DatanodeDetails(DatanodeDetails datanodeDetails) { From 8e90c06b4c74000038bd9a708622c66cd12efb8a Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Mon, 18 Mar 2024 16:02:46 +0530 Subject: [PATCH 10/16] HDDS-10514. Fixed test failed cases. --- .../java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index dc8f2c3b2dc9..f742b6dfb363 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -136,7 +136,7 @@ private DatanodeDetails(Builder b) { if (b.cost > 0) { LOG.debug("cost: {}", getCost()); } - if (!b.uuidString.isEmpty()) { + if (null != b.uuidString && !b.uuidString.isEmpty()) { LOG.debug("uuid : {}", uuidString); } } From b1c22b8a8e8a1d13e468a530338d36a479c8714d Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Wed, 20 Mar 2024 10:19:48 +0530 Subject: [PATCH 11/16] HDDS-10514. Fixed spelling mistakes in javadoc comments. --- .../hadoop/hdds/recon/CustomDatanodeDetailsDeserializer.java | 2 +- .../hadoop/hdds/recon/CustomDatanodeDetailsSerializer.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/CustomDatanodeDetailsDeserializer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/CustomDatanodeDetailsDeserializer.java index 490aa849992f..52c9c0c43794 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/CustomDatanodeDetailsDeserializer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/CustomDatanodeDetailsDeserializer.java @@ -30,7 +30,7 @@ import java.util.UUID; /** - * This is custom jackson deserializer class for DetanodeDetails class. + * This is custom jackson deserializer class for DatanodeDetails class. * Jackson deserializer is being used to deserialize json using JSON parser * and map JSON fields to respective DatanodeDetails class fields. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/CustomDatanodeDetailsSerializer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/CustomDatanodeDetailsSerializer.java index e86c910d32ae..435b1e18cfa4 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/CustomDatanodeDetailsSerializer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/CustomDatanodeDetailsSerializer.java @@ -26,7 +26,7 @@ import java.io.IOException; /** - * This is custom jackson serializer class for DetanodeDetails class. + * This is custom jackson serializer class for DatanodeDetails class. * Jackson serializer is being used to serialize DatanodeDetails class object * to map and serialize respective object fields to JSON fields. */ From 0869a88ee96760cffcc18fd8b901d2723087e2d8 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Thu, 4 Apr 2024 10:45:24 +0530 Subject: [PATCH 12/16] HDDS-10514. Fixed review comments. --- .../hadoop/ozone/recon/ReconServerConfigKeys.java | 4 ++++ .../org/apache/hadoop/ozone/recon/ReconUtils.java | 15 +++++++++++---- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java index ab87bda4412c..8109a1516aac 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java @@ -185,6 +185,10 @@ public final class ReconServerConfigKeys { public static final int OZONE_RECON_SCM_CLIENT_FAILOVER_MAX_RETRY_DEFAULT = 3; + + public static final long + OZONE_RECON_COMMAND_PROCESS_TIME_OUT_DEFAULT = 60L; + /** * Private constructor for utility class. */ diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index a99d465f6764..dc83c0cc3d0d 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -38,6 +38,8 @@ import java.util.List; import java.util.Map; import java.util.concurrent.BlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -63,6 +65,7 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EVENT_THREAD_POOL_SIZE_DEFAULT; import static org.apache.hadoop.hdds.server.ServerUtils.getDirectoryFromConfig; import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_COMMAND_PROCESS_TIME_OUT_DEFAULT; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR; import static org.jooq.impl.DSL.currentTimestamp; import static org.jooq.impl.DSL.select; @@ -412,10 +415,14 @@ public Map executeCommand(List commandArgs) { try { SecurityUtil.doAsLoginUser((PrivilegedExceptionAction>) () -> { Process process = pb.start(); - int exitCode = process.waitFor(); - LOG.info("'{}' command : exitcode: {}", command, exitCode); - validateAndReadProcessOutput(processOutput, process, exitCode); - processOutputMap.put(exitCode, processOutput.get()); + boolean isProcessExited = process.waitFor(OZONE_RECON_COMMAND_PROCESS_TIME_OUT_DEFAULT, TimeUnit.SECONDS); + if (!isProcessExited) { + throw new TimeoutException("Command " + command + " process failed to completed and exit before timeout of " + + OZONE_RECON_COMMAND_PROCESS_TIME_OUT_DEFAULT + " seconds."); + } + LOG.info("'{}' command : exitCode: {}", command, process.exitValue()); + validateAndReadProcessOutput(processOutput, process, process.exitValue()); + processOutputMap.put(process.exitValue(), processOutput.get()); return processOutputMap; }); } catch (IOException ioException) { From 0ab6ae837316665febf8f6affda0bac07ed40983 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Wed, 15 May 2024 11:29:31 +0530 Subject: [PATCH 13/16] HDDS-10514. Added test cases. --- .../hadoop/ozone/recon/api/TestEndpoints.java | 66 +++++++++++++++++-- 1 file changed, 62 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index 07315a5c81d1..2c3439cd19b6 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -102,6 +102,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; +import org.mockito.Mockito; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.defaultLayoutVersionProto; @@ -242,7 +243,7 @@ private void initializeInjector() throws Exception { new ContainerWithPipeline(containerInfo, pipeline); mockScmClient = mock( - StorageContainerLocationProtocol.class); + StorageContainerLocationProtocol.class, Mockito.RETURNS_DEEP_STUBS); StorageContainerServiceProvider mockScmServiceProvider = mock( StorageContainerServiceProviderImpl.class); when(mockScmServiceProvider.getPipeline( @@ -1321,14 +1322,71 @@ public void testExplicitRemovalOfNonExistingNode() { @Test public void testSuccessWhenDecommissionStatus() throws IOException { - when(mockScmClient.queryNode(any(), any(), any(), any(), any())) - .thenAnswer(invocation -> nodes); // 2 nodes decommissioning + when(mockScmClient.queryNode(any(), any(), any(), any(), any(Integer.class))).thenReturn( + nodes); // 2 nodes decommissioning when(mockScmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom); when(mockScmClient.getMetrics(any())).thenReturn(metrics.get(1)); Response datanodesDecommissionInfo = nodeEndpoint.getDatanodesDecommissionInfo(); - List> dnDecommissionInfo = (List>) datanodesDecommissionInfo.getEntity(); + Map responseMap = (Map) datanodesDecommissionInfo.getEntity(); + List> dnDecommissionInfo = + (List>) responseMap.get("DatanodesDecommissionInfo"); DatanodeDetails datanode = (DatanodeDetails) dnDecommissionInfo.get(0).get("datanodeDetails"); + Map dnMetrics = (Map) dnDecommissionInfo.get(0).get("metrics"); + Map containers = (Map) dnDecommissionInfo.get(0).get("containers"); assertNotNull(datanode); + assertNotNull(dnMetrics); + assertNotNull(containers); + assertFalse(datanode.getUuidString().isEmpty()); + assertFalse(((String) dnMetrics.get("decommissionStartTime")).isEmpty()); + assertEquals(1, dnMetrics.get("numOfUnclosedPipelines")); + assertEquals(3.0, dnMetrics.get("numOfUnderReplicatedContainers")); + assertEquals(3.0, dnMetrics.get("numOfUnclosedContainers")); + + assertEquals(3, ((List) containers.get("UnderReplicated")).size()); + assertEquals(3, ((List) containers.get("UnClosed")).size()); + } + + @Test + public void testSuccessWhenDecommissionStatusWithUUID() throws IOException { + when(mockScmClient.queryNode(any(), any(), any(), any(), any(Integer.class))).thenReturn( + getNodeDetailsForUuid("654c4b89-04ef-4015-8a3b-50d0fb0e1684")); // 1 nodes decommissioning + when(mockScmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom); + Response datanodesDecommissionInfo = + nodeEndpoint.getDecommissionInfoForDatanode("654c4b89-04ef-4015-8a3b-50d0fb0e1684", ""); + Map responseMap = (Map) datanodesDecommissionInfo.getEntity(); + List> dnDecommissionInfo = + (List>) responseMap.get("DatanodesDecommissionInfo"); + DatanodeDetails datanode = (DatanodeDetails) dnDecommissionInfo.get(0).get("datanodeDetails"); + Map containers = (Map) dnDecommissionInfo.get(0).get("containers"); + assertNotNull(datanode); + assertNotNull(containers); + assertFalse(datanode.getUuidString().isEmpty()); + assertEquals("654c4b89-04ef-4015-8a3b-50d0fb0e1684", datanode.getUuidString()); + + assertEquals(3, ((List) containers.get("UnderReplicated")).size()); + assertEquals(3, ((List) containers.get("UnClosed")).size()); + } + + private List getNodeDetailsForUuid(String uuid) { + List nodesList = new ArrayList<>(); + + HddsProtos.DatanodeDetailsProto.Builder dnd = + HddsProtos.DatanodeDetailsProto.newBuilder(); + dnd.setHostName("hostName"); + dnd.setIpAddress("1.2.3.5"); + dnd.setNetworkLocation("/default"); + dnd.setNetworkName("hostName"); + dnd.addPorts(HddsProtos.Port.newBuilder() + .setName("ratis").setValue(5678).build()); + dnd.setUuid(uuid); + + HddsProtos.Node.Builder builder = HddsProtos.Node.newBuilder(); + builder.addNodeOperationalStates( + HddsProtos.NodeOperationalState.DECOMMISSIONING); + builder.addNodeStates(HddsProtos.NodeState.HEALTHY); + builder.setNodeID(dnd.build()); + nodesList.add(builder.build()); + return nodesList; } private List getNodeDetails(int n) { From 6513cc3817b8ae7356162866416bd433533a7159 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Wed, 15 May 2024 20:15:39 +0530 Subject: [PATCH 14/16] HDDS-10514. Handled review comments. --- .../org/apache/hadoop/hdds/HddsUtils.java | 75 --------- .../hadoop/hdds/client/DecommissionUtils.java | 153 ++++++++++++++++++ .../DecommissionStatusSubCommand.java | 13 +- .../hadoop/ozone/recon/api/NodeEndpoint.java | 11 +- 4 files changed, 166 insertions(+), 86 deletions(-) create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java index 084a8e14b3a7..c39f16053e0d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java @@ -18,11 +18,6 @@ package org.apache.hadoop.hdds; -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.base.Strings; import com.google.protobuf.ServiceException; import jakarta.annotation.Nonnull; @@ -35,11 +30,8 @@ import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.nio.file.Path; -import java.text.DateFormat; -import java.text.SimpleDateFormat; import java.util.Collection; import java.util.Collections; -import java.util.Date; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -48,8 +40,6 @@ import java.util.OptionalInt; import java.util.TreeMap; import java.util.UUID; -import java.util.stream.Collectors; -import java.util.stream.Stream; import org.apache.hadoop.conf.ConfigRedactor; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -59,7 +49,6 @@ import org.apache.hadoop.hdds.conf.ConfigurationException; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProtoOrBuilder; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State; @@ -881,68 +870,4 @@ public static HddsProtos.UUID toProtobuf(UUID uuid) { ? Thread.currentThread().getStackTrace() : null; } - - - - public static List getDecommissioningNodesList(Stream allNodes, - String uuid, - String ipAddress) { - List decommissioningNodes; - if (!Strings.isNullOrEmpty(uuid)) { - decommissioningNodes = allNodes.filter(p -> p.getNodeID().getUuid() - .equals(uuid)).collect(Collectors.toList()); - } else if (!Strings.isNullOrEmpty(ipAddress)) { - decommissioningNodes = allNodes.filter(p -> p.getNodeID().getIpAddress() - .compareToIgnoreCase(ipAddress) == 0).collect(Collectors.toList()); - } else { - decommissioningNodes = allNodes.collect(Collectors.toList()); - } - return decommissioningNodes; - } - - public static JsonNode getBeansJsonNode(String metricsJson) throws IOException { - JsonNode jsonNode; - ObjectMapper objectMapper = new ObjectMapper(); - JsonFactory factory = objectMapper.getFactory(); - JsonParser parser = factory.createParser(metricsJson); - jsonNode = (JsonNode) objectMapper.readTree(parser).get("beans").get(0); - return jsonNode; - } - - public static int getNumDecomNodes(JsonNode jsonNode) { - int numDecomNodes; - JsonNode totalDecom = jsonNode.get("DecommissioningMaintenanceNodesTotal"); - numDecomNodes = (totalDecom == null ? -1 : Integer.parseInt(totalDecom.toString())); - return numDecomNodes; - } - - @Nullable - public static Map getCountsMap(DatanodeDetails datanode, JsonNode counts, int numDecomNodes, - Map countsMap, String errMsg) - throws IOException { - for (int i = 1; i <= numDecomNodes; i++) { - if (datanode.getHostName().equals(counts.get("tag.datanode." + i).asText())) { - JsonNode pipelinesDN = counts.get("PipelinesWaitingToCloseDN." + i); - JsonNode underReplicatedDN = counts.get("UnderReplicatedDN." + i); - JsonNode unclosedDN = counts.get("UnclosedContainersDN." + i); - JsonNode startTimeDN = counts.get("StartTimeDN." + i); - if (pipelinesDN == null || underReplicatedDN == null || unclosedDN == null || startTimeDN == null) { - throw new IOException(errMsg); - } - - int pipelines = Integer.parseInt(pipelinesDN.toString()); - double underReplicated = Double.parseDouble(underReplicatedDN.toString()); - double unclosed = Double.parseDouble(unclosedDN.toString()); - long startTime = Long.parseLong(startTimeDN.toString()); - Date date = new Date(startTime); - DateFormat formatter = new SimpleDateFormat("dd/MM/yyyy hh:mm:ss z"); - countsMap.put("decommissionStartTime", formatter.format(date)); - countsMap.put("numOfUnclosedPipelines", pipelines); - countsMap.put("numOfUnderReplicatedContainers", underReplicated); - countsMap.put("numOfUnclosedContainers", unclosed); - return countsMap; - } - } - return null; - } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java new file mode 100644 index 000000000000..d585a3bd41e6 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java @@ -0,0 +1,153 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.client; + +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.common.base.Strings; +import jakarta.annotation.Nullable; +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.annotation.InterfaceStability; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * Decommission specific stateless utility functions. + */ +@InterfaceAudience.Private +@InterfaceStability.Stable +public final class DecommissionUtils { + + + private static final Logger LOG = LoggerFactory.getLogger(DecommissionUtils.class); + + private DecommissionUtils() { + } + + /** + * Returns the list of uuid or ipAddress matching decommissioning status nodes. + * + * @param allNodes All datanodes which are in decommissioning status. + * @param uuid node uuid. + * @param ipAddress node ipAddress + * @return the list of uuid or ipAddress matching decommissioning status nodes. + */ + public static List getDecommissioningNodesList(Stream allNodes, + String uuid, + String ipAddress) { + List decommissioningNodes; + if (!Strings.isNullOrEmpty(uuid)) { + decommissioningNodes = allNodes.filter(p -> p.getNodeID().getUuid() + .equals(uuid)).collect(Collectors.toList()); + } else if (!Strings.isNullOrEmpty(ipAddress)) { + decommissioningNodes = allNodes.filter(p -> p.getNodeID().getIpAddress() + .compareToIgnoreCase(ipAddress) == 0).collect(Collectors.toList()); + } else { + decommissioningNodes = allNodes.collect(Collectors.toList()); + } + return decommissioningNodes; + } + + /** + * Returns Json node of datanode metrics. + * + * @param metricsJson + * @return Json node of datanode metrics + * @throws IOException + */ + public static JsonNode getBeansJsonNode(String metricsJson) throws IOException { + JsonNode jsonNode; + ObjectMapper objectMapper = new ObjectMapper(); + JsonFactory factory = objectMapper.getFactory(); + JsonParser parser = factory.createParser(metricsJson); + jsonNode = (JsonNode) objectMapper.readTree(parser).get("beans").get(0); + return jsonNode; + } + + /** + * Returns the number of decommissioning nodes. + * + * @param jsonNode + * @return + */ + public static int getNumDecomNodes(JsonNode jsonNode) { + int numDecomNodes; + JsonNode totalDecom = jsonNode.get("DecommissioningMaintenanceNodesTotal"); + numDecomNodes = (totalDecom == null ? -1 : Integer.parseInt(totalDecom.toString())); + return numDecomNodes; + } + + /** + * Returns the counts of following info attributes: + * - decommissionStartTime + * - numOfUnclosedPipelines + * - numOfUnderReplicatedContainers + * - numOfUnclosedContainers + * + * @param datanode + * @param counts + * @param numDecomNodes + * @param countsMap + * @param errMsg + * @return + * @throws IOException + */ + @Nullable + public static Map getCountsMap(DatanodeDetails datanode, JsonNode counts, int numDecomNodes, + Map countsMap, String errMsg) + throws IOException { + for (int i = 1; i <= numDecomNodes; i++) { + if (datanode.getHostName().equals(counts.get("tag.datanode." + i).asText())) { + JsonNode pipelinesDN = counts.get("PipelinesWaitingToCloseDN." + i); + JsonNode underReplicatedDN = counts.get("UnderReplicatedDN." + i); + JsonNode unclosedDN = counts.get("UnclosedContainersDN." + i); + JsonNode startTimeDN = counts.get("StartTimeDN." + i); + if (pipelinesDN == null || underReplicatedDN == null || unclosedDN == null || startTimeDN == null) { + throw new IOException(errMsg); + } + + int pipelines = Integer.parseInt(pipelinesDN.toString()); + double underReplicated = Double.parseDouble(underReplicatedDN.toString()); + double unclosed = Double.parseDouble(unclosedDN.toString()); + long startTime = Long.parseLong(startTimeDN.toString()); + Date date = new Date(startTime); + DateFormat formatter = new SimpleDateFormat("dd/MM/yyyy hh:mm:ss z"); + countsMap.put("decommissionStartTime", formatter.format(date)); + countsMap.put("numOfUnclosedPipelines", pipelines); + countsMap.put("numOfUnderReplicatedContainers", underReplicated); + countsMap.put("numOfUnclosedContainers", unclosed); + return countsMap; + } + } + return null; + } +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java index 20dd109fa197..18ddbd086d7a 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java @@ -19,8 +19,8 @@ import com.fasterxml.jackson.databind.JsonNode; import com.google.common.base.Strings; -import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.client.DecommissionUtils; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; @@ -69,7 +69,8 @@ public class DecommissionStatusSubCommand extends ScmSubcommand { public void execute(ScmClient scmClient) throws IOException { Stream allNodes = scmClient.queryNode(DECOMMISSIONING, null, HddsProtos.QueryScope.CLUSTER, "").stream(); - List decommissioningNodes = HddsUtils.getDecommissioningNodesList(allNodes, uuid, ipAddress); + List decommissioningNodes = + DecommissionUtils.getDecommissioningNodesList(allNodes, uuid, ipAddress); if (!Strings.isNullOrEmpty(uuid)) { if (decommissioningNodes.isEmpty()) { System.err.println("Datanode: " + uuid + " is not in DECOMMISSIONING"); @@ -84,7 +85,7 @@ public void execute(ScmClient scmClient) throws IOException { } else { if (!json) { System.out.println("\nDecommission Status: DECOMMISSIONING - " + - decommissioningNodes.size() + " node(s)"); + decommissioningNodes.size() + " node(s)"); } } @@ -92,8 +93,8 @@ public void execute(ScmClient scmClient) throws IOException { int numDecomNodes = -1; JsonNode jsonNode = null; if (metricsJson != null) { - jsonNode = HddsUtils.getBeansJsonNode(metricsJson); - numDecomNodes = HddsUtils.getNumDecomNodes(jsonNode); + jsonNode = DecommissionUtils.getBeansJsonNode(metricsJson); + numDecomNodes = DecommissionUtils.getNumDecomNodes(jsonNode); } if (json) { @@ -150,7 +151,7 @@ private Map getCounts(DatanodeDetails datanode, JsonNode counts, Map countsMap = new LinkedHashMap<>(); String errMsg = getErrorMessage() + datanode.getHostName(); try { - countsMap = HddsUtils.getCountsMap(datanode, counts, numDecomNodes, countsMap, errMsg); + countsMap = DecommissionUtils.getCountsMap(datanode, counts, numDecomNodes, countsMap, errMsg); if (countsMap != null) { return countsMap; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java index 706b6ca58d85..a0bcfd302554 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.databind.JsonNode; import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hdds.HddsUtils; +import org.apache.hadoop.hdds.client.DecommissionUtils; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; @@ -381,13 +381,14 @@ private Response getDecommissionStatusResponse(String uuid, String ipAddress) th Map responseMap = new HashMap<>(); Stream allNodes = scmClient.queryNode(DECOMMISSIONING, null, HddsProtos.QueryScope.CLUSTER, "", ClientVersion.CURRENT_VERSION).stream(); - List decommissioningNodes = HddsUtils.getDecommissioningNodesList(allNodes, uuid, ipAddress); + List decommissioningNodes = + DecommissionUtils.getDecommissioningNodesList(allNodes, uuid, ipAddress); String metricsJson = scmClient.getMetrics("Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics"); int numDecomNodes = -1; JsonNode jsonNode = null; if (metricsJson != null) { - jsonNode = HddsUtils.getBeansJsonNode(metricsJson); - numDecomNodes = HddsUtils.getNumDecomNodes(jsonNode); + jsonNode = DecommissionUtils.getBeansJsonNode(metricsJson); + numDecomNodes = DecommissionUtils.getNumDecomNodes(jsonNode); } List> dnDecommissionInfo = getDecommissioningNodesDetails(decommissioningNodes, jsonNode, numDecomNodes); @@ -422,7 +423,7 @@ private Map getCounts(DatanodeDetails datanode, JsonNode counts, Map countsMap = new LinkedHashMap<>(); String errMsg = getErrorMessage() + datanode.getHostName(); try { - countsMap = HddsUtils.getCountsMap(datanode, counts, numDecomNodes, countsMap, errMsg); + countsMap = DecommissionUtils.getCountsMap(datanode, counts, numDecomNodes, countsMap, errMsg); if (countsMap != null) { return countsMap; } From aa1b62b6e117d31368c36025d646f391d75995ad Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Wed, 15 May 2024 20:18:38 +0530 Subject: [PATCH 15/16] HDDS-10514. Handled review comments. --- .../java/org/apache/hadoop/hdds/client/DecommissionUtils.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java index d585a3bd41e6..7d5b610b0875 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/DecommissionUtils.java @@ -107,7 +107,7 @@ public static int getNumDecomNodes(JsonNode jsonNode) { } /** - * Returns the counts of following info attributes: + * Returns the counts of following info attributes. * - decommissionStartTime * - numOfUnclosedPipelines * - numOfUnderReplicatedContainers From b4bc43816e5b80967e27549d5851d35d9beb8bef Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Fri, 17 May 2024 14:19:33 +0530 Subject: [PATCH 16/16] HDDS-10514. Removed unused constant. --- .../org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java index 8109a1516aac..5c9e40396358 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java @@ -186,9 +186,6 @@ public final class ReconServerConfigKeys { public static final int OZONE_RECON_SCM_CLIENT_FAILOVER_MAX_RETRY_DEFAULT = 3; - public static final long - OZONE_RECON_COMMAND_PROCESS_TIME_OUT_DEFAULT = 60L; - /** * Private constructor for utility class. */