diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index 402398e36c3f..fb5a2deee26d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -452,4 +452,6 @@ StatusAndMessages queryUpgradeFinalizationProgress( DecommissionScmResponseProto decommissionScm( String scmId) throws IOException; + + String getMetrics(String query) throws IOException; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index e8bddb42cfbd..663f317a3b3b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -474,4 +474,6 @@ List getListOfContainers( DecommissionScmResponseProto decommissionScm( String scmId) throws IOException; + + String getMetrics(String query) throws IOException; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index 84a0fa4886ce..109358c67bf6 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -68,6 +68,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerCountRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerCountResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetMetricsRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetMetricsResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineResponseProto; @@ -1143,4 +1145,13 @@ public DecommissionScmResponseProto decommissionScm( .getDecommissionScmResponse(); return response; } + + @Override + public String getMetrics(String query) throws IOException { + GetMetricsRequestProto request = GetMetricsRequestProto.newBuilder().setQuery(query).build(); + GetMetricsResponseProto response = submitRequest(Type.GetMetrics, + builder -> builder.setGetMetricsRequest(request)).getGetMetricsResponse(); + String metricsJsonStr = response.getMetricsJson(); + return metricsJsonStr; + } } diff --git a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto index 6adca817ed1d..e8b8d623942a 100644 --- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto +++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto @@ -83,6 +83,7 @@ message ScmContainerLocationRequest { optional DecommissionScmRequestProto decommissionScmRequest = 44; optional SingleNodeQueryRequestProto singleNodeQueryRequest = 45; optional GetContainersOnDecomNodeRequestProto getContainersOnDecomNodeRequest = 46; + optional GetMetricsRequestProto getMetricsRequest = 47; } message ScmContainerLocationResponse { @@ -137,6 +138,7 @@ message ScmContainerLocationResponse { optional DecommissionScmResponseProto decommissionScmResponse = 44; optional SingleNodeQueryResponseProto singleNodeQueryResponse = 45; optional GetContainersOnDecomNodeResponseProto getContainersOnDecomNodeResponse = 46; + optional GetMetricsResponseProto getMetricsResponse = 47; enum Status { OK = 1; @@ -190,6 +192,7 @@ enum Type { DecommissionScm = 40; SingleNodeQuery = 41; GetContainersOnDecomNode = 42; + GetMetrics = 43; } /** @@ -618,6 +621,14 @@ message GetContainersOnDecomNodeResponseProto { repeated ContainersOnDecomNodeProto containersOnDecomNode = 1; } +message GetMetricsRequestProto { + optional string query = 1; +} + +message GetMetricsResponseProto { + optional string metricsJson = 1; +} + /** * Protocol used from an HDFS node to StorageContainerManager. See the request * and response messages for details of the RPC calls. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/FetchMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/FetchMetrics.java new file mode 100644 index 000000000000..0778b9a30dc3 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/FetchMetrics.java @@ -0,0 +1,220 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm; + +import com.fasterxml.jackson.core.JsonEncoding; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.lang.reflect.Array; +import java.nio.charset.StandardCharsets; +import java.util.Iterator; +import java.util.Set; +import javax.management.AttributeNotFoundException; +import javax.management.InstanceNotFoundException; +import javax.management.IntrospectionException; +import javax.management.MBeanAttributeInfo; +import javax.management.MBeanException; +import javax.management.MBeanInfo; +import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; +import javax.management.QueryExp; +import javax.management.ReflectionException; +import javax.management.RuntimeErrorException; +import javax.management.RuntimeMBeanException; +import javax.management.openmbean.CompositeData; +import javax.management.openmbean.CompositeType; +import javax.management.openmbean.TabularData; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Class used to fetch metrics from MBeanServer. + */ +public class FetchMetrics { + private static final Logger LOG = LoggerFactory.getLogger(FetchMetrics.class); + private transient MBeanServer mBeanServer; + private transient JsonFactory jsonFactory; + + public FetchMetrics() { + this.mBeanServer = ManagementFactory.getPlatformMBeanServer(); + this.jsonFactory = new JsonFactory(); + } + + public String getMetrics(String qry) { + try { + JsonGenerator jg = null; + ByteArrayOutputStream opStream = new ByteArrayOutputStream(); + + try { + jg = this.jsonFactory.createGenerator(opStream, JsonEncoding.UTF8); + jg.disable(JsonGenerator.Feature.AUTO_CLOSE_TARGET); + jg.useDefaultPrettyPrinter(); + jg.writeStartObject(); + if (qry == null) { + qry = "*:*"; + } + this.listBeans(jg, new ObjectName(qry)); + } finally { + if (jg != null) { + jg.close(); + } + } + return new String(opStream.toByteArray(), StandardCharsets.UTF_8); + } catch (IOException | MalformedObjectNameException ex) { + LOG.error("Caught an exception while processing getMetrics request", ex); + } + return null; + } + + private void listBeans(JsonGenerator jg, ObjectName qry) + throws IOException { + LOG.debug("Listing beans for " + qry); + Set names = null; + names = this.mBeanServer.queryNames(qry, (QueryExp) null); + jg.writeArrayFieldStart("beans"); + Iterator it = names.iterator(); + + while (it.hasNext()) { + ObjectName oname = (ObjectName) it.next(); + String code = ""; + + MBeanInfo minfo; + try { + minfo = this.mBeanServer.getMBeanInfo(oname); + code = minfo.getClassName(); + String prs = ""; + + try { + if ("org.apache.commons.modeler.BaseModelMBean".equals(code)) { + prs = "modelerType"; + code = (String) this.mBeanServer.getAttribute(oname, prs); + } + } catch (AttributeNotFoundException | MBeanException | RuntimeException | ReflectionException ex) { + LOG.error("getting attribute " + prs + " of " + oname + " threw an exception", ex); + } + } catch (InstanceNotFoundException var17) { + continue; + } catch (IntrospectionException | ReflectionException ex) { + LOG.error("Problem while trying to process JMX query: " + qry + " with MBean " + oname, ex); + continue; + } + jg.writeStartObject(); + jg.writeStringField("name", oname.toString()); + jg.writeStringField("modelerType", code); + MBeanAttributeInfo[] attrs = minfo.getAttributes(); + for (int i = 0; i < attrs.length; ++i) { + this.writeAttribute(jg, oname, attrs[i]); + } + jg.writeEndObject(); + } + jg.writeEndArray(); + } + + private void writeAttribute(JsonGenerator jg, ObjectName oname, MBeanAttributeInfo attr) throws IOException { + if (attr.isReadable()) { + String attName = attr.getName(); + if (!"modelerType".equals(attName)) { + if (attName.indexOf("=") < 0 && attName.indexOf(":") < 0 && attName.indexOf(" ") < 0) { + Object value = null; + + try { + value = this.mBeanServer.getAttribute(oname, attName); + } catch (RuntimeMBeanException var7) { + if (var7.getCause() instanceof UnsupportedOperationException) { + LOG.debug("getting attribute " + attName + " of " + oname + " threw an exception", var7); + } else { + LOG.error("getting attribute " + attName + " of " + oname + " threw an exception", var7); + } + return; + } catch (RuntimeErrorException var8) { + LOG.error("getting attribute {} of {} threw an exception", new Object[]{attName, oname, var8}); + return; + } catch (MBeanException | RuntimeException | ReflectionException ex) { + LOG.error("getting attribute " + attName + " of " + oname + " threw an exception", ex); + return; + } catch (AttributeNotFoundException | InstanceNotFoundException ex) { + return; + } + this.writeAttribute(jg, attName, value); + } + } + } + } + + private void writeAttribute(JsonGenerator jg, String attName, Object value) throws IOException { + jg.writeFieldName(attName); + this.writeObject(jg, value); + } + + private void writeObject(JsonGenerator jg, Object value) throws IOException { + if (value == null) { + jg.writeNull(); + } else { + Class c = value.getClass(); + Object entry; + if (c.isArray()) { + jg.writeStartArray(); + int len = Array.getLength(value); + + for (int j = 0; j < len; ++j) { + entry = Array.get(value, j); + this.writeObject(jg, entry); + } + + jg.writeEndArray(); + } else if (value instanceof Number) { + Number n = (Number) value; + jg.writeNumber(n.toString()); + } else if (value instanceof Boolean) { + Boolean b = (Boolean) value; + jg.writeBoolean(b); + } else if (value instanceof CompositeData) { + CompositeData cds = (CompositeData) value; + CompositeType comp = cds.getCompositeType(); + Set keys = comp.keySet(); + jg.writeStartObject(); + Iterator var7 = keys.iterator(); + + while (var7.hasNext()) { + String key = (String) var7.next(); + this.writeAttribute(jg, key, cds.get(key)); + } + + jg.writeEndObject(); + } else if (value instanceof TabularData) { + TabularData tds = (TabularData) value; + jg.writeStartArray(); + Iterator var14 = tds.values().iterator(); + + while (var14.hasNext()) { + entry = var14.next(); + this.writeObject(jg, entry); + } + jg.writeEndArray(); + } else { + jg.writeString(value.toString()); + } + } + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index f402b9309fe4..a44536bf4463 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -72,6 +72,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetSafeModeRuleStatusesRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetSafeModeRuleStatusesResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetMetricsRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetMetricsResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineRequestProto; @@ -714,6 +716,12 @@ public ScmContainerLocationResponse processRequest( .setDecommissionScmResponse(decommissionScm( request.getDecommissionScmRequest())) .build(); + case GetMetrics: + return ScmContainerLocationResponse.newBuilder() + .setCmdType(request.getCmdType()) + .setStatus(Status.OK) + .setGetMetricsResponse(getMetrics(request.getGetMetricsRequest())) + .build(); default: throw new IllegalArgumentException( "Unknown command type: " + request.getCmdType()); @@ -1287,4 +1295,8 @@ public DecommissionScmResponseProto decommissionScm( return impl.decommissionScm( request.getScmId()); } + + public GetMetricsResponseProto getMetrics(GetMetricsRequestProto request) throws IOException { + return GetMetricsResponseProto.newBuilder().setMetricsJson(impl.getMetrics(request.getQuery())).build(); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 13bef8590b79..faee4fcaaab7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -63,6 +63,7 @@ import org.apache.hadoop.hdds.scm.ha.SCMRatisServer; import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl; import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo; +import org.apache.hadoop.hdds.scm.FetchMetrics; import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -1373,4 +1374,10 @@ public DecommissionScmResponseProto decommissionScm( } return decommissionScmResponseBuilder.build(); } + + @Override + public String getMetrics(String query) throws IOException { + FetchMetrics fetchMetrics = new FetchMetrics(); + return fetchMetrics.getMetrics(query); + } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestFetchMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestFetchMetrics.java new file mode 100644 index 000000000000..ede005745e5e --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestFetchMetrics.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.node; + +import org.apache.hadoop.hdds.scm.FetchMetrics; +import org.junit.jupiter.api.Test; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static org.junit.jupiter.api.Assertions.assertTrue; + +class TestFetchMetrics { + private static FetchMetrics fetchMetrics = new FetchMetrics(); + + @Test + public void testFetchAll() { + String result = fetchMetrics.getMetrics(null); + Pattern p = Pattern.compile("beans", Pattern.MULTILINE); + Matcher m = p.matcher(result); + assertTrue(m.find()); + } + + @Test + public void testFetchFiltered() { + String result = fetchMetrics.getMetrics("Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics"); + Pattern p = Pattern.compile("beans", Pattern.MULTILINE); + Matcher m = p.matcher(result); + assertTrue(m.find()); + } +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index d07e696e7ef0..6a5550e9fbd3 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -563,4 +563,9 @@ public DecommissionScmResponseProto decommissionScm( return storageContainerLocationClient.decommissionScm(scmId); } + @Override + public String getMetrics(String query) throws IOException { + return storageContainerLocationClient.getMetrics(query); + } + } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java index b53632f8eec5..17d577ff2dc7 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdds.scm.cli.datanode; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Strings; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -27,6 +31,9 @@ import picocli.CommandLine; import java.io.IOException; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.Date; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -81,17 +88,55 @@ public void execute(ScmClient scmClient) throws IOException { decommissioningNodes.size() + " node(s)"); } + String metricsJson = scmClient.getMetrics("Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics"); + int numDecomNodes = -1; + JsonNode jsonNode = null; + if (metricsJson != null) { + ObjectMapper objectMapper = new ObjectMapper(); + JsonFactory factory = objectMapper.getFactory(); + JsonParser parser = factory.createParser(metricsJson); + jsonNode = (JsonNode) objectMapper.readTree(parser).get("beans").get(0); + JsonNode totalDecom = jsonNode.get("DecommissioningMaintenanceNodesTotal"); + numDecomNodes = (totalDecom == null ? -1 : Integer.parseInt(totalDecom.toString())); + } + for (HddsProtos.Node node : decommissioningNodes) { DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf( node.getNodeID()); printDetails(datanode); + printCounts(datanode, jsonNode, numDecomNodes); Map> containers = scmClient.getContainersOnDecomNode(datanode); System.out.println(containers); } } + private void printDetails(DatanodeDetails datanode) { System.out.println("\nDatanode: " + datanode.getUuid().toString() + " (" + datanode.getNetworkLocation() + "/" + datanode.getIpAddress() + "/" + datanode.getHostName() + ")"); } + + private void printCounts(DatanodeDetails datanode, JsonNode counts, int numDecomNodes) { + try { + for (int i = 1; i <= numDecomNodes; i++) { + if (datanode.getHostName().equals(counts.get("tag.datanode." + i).asText())) { + int pipelines = Integer.parseInt(counts.get("PipelinesWaitingToCloseDN." + i).toString()); + double underReplicated = Double.parseDouble(counts.get("UnderReplicatedDN." + i).toString()); + double unclosed = Double.parseDouble(counts.get("UnclosedContainersDN." + i).toString()); + long startTime = Long.parseLong(counts.get("StartTimeDN." + i).toString()); + System.out.print("Decommission started at : "); + Date date = new Date(startTime); + DateFormat formatter = new SimpleDateFormat("dd/MM/yyyy hh:mm:ss z"); + System.out.println(formatter.format(date)); + System.out.println("No. of Pipelines: " + pipelines); + System.out.println("No. of UnderReplicated containers: " + underReplicated); + System.out.println("No. of Unclosed Containers: " + unclosed); + return; + } + } + System.err.println("Error getting pipeline and container counts for " + datanode.getHostName()); + } catch (NullPointerException ex) { + System.err.println("Error getting pipeline and container counts for " + datanode.getHostName()); + } + } } diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java index 41c31caf1f0a..ad0323d334e6 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java @@ -60,6 +60,7 @@ public class TestDecommissionStatusSubCommand { private DecommissionStatusSubCommand cmd; private List nodes = getNodeDetails(2); private Map> containerOnDecom = getContainersOnDecomNodes(); + private ArrayList metrics = getMetrics(); @BeforeEach public void setup() throws UnsupportedEncodingException { @@ -80,6 +81,7 @@ public void testSuccessWhenDecommissionStatus() throws IOException { when(scmClient.queryNode(any(), any(), any(), any())) .thenAnswer(invocation -> nodes); // 2 nodes decommissioning when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(1)); cmd.execute(scmClient); Pattern p = Pattern.compile("Decommission\\sStatus:\\s" + @@ -91,15 +93,17 @@ public void testSuccessWhenDecommissionStatus() throws IOException { p = Pattern.compile("Datanode:\\s.*host0\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host1\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("Datanode:\\s.*host1\\)"); + p = Pattern.compile("No\\. of Pipelines:"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + assertTrue(m.find()); // metrics for both are shown + p = Pattern.compile("UnderReplicated=.* UnClosed="); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); + assertTrue(m.find()); // container lists for both are shown } @Test @@ -109,6 +113,7 @@ public void testNoNodesWhenDecommissionStatus() throws IOException { when(scmClient.queryNode(any(), any(), any(), any())) .thenReturn(new ArrayList<>()); when(scmClient.getContainersOnDecomNode(any())).thenReturn(new HashMap<>()); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(0)); cmd.execute(scmClient); Pattern p = Pattern.compile("Decommission\\sStatus:\\s" + @@ -117,10 +122,10 @@ public void testNoNodesWhenDecommissionStatus() throws IOException { assertTrue(m.find()); // no host details are shown - p = Pattern.compile("Datanode:\\s.*host0\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host0\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); - p = Pattern.compile("Datanode:\\s.*host1.\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host1.\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); } @@ -131,24 +136,22 @@ public void testIdOptionDecommissionStatusSuccess() throws IOException { when(scmClient.queryNode(any(), any(), any(), any())) .thenAnswer(invocation -> nodes); // 2 nodes decommissioning when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(1)); CommandLine c = new CommandLine(cmd); c.parseArgs("--id", nodes.get(0).getNodeID().getUuid()); cmd.execute(scmClient); // check status of host0 - Pattern p = Pattern.compile("Datanode:\\s.*host0\\)", Pattern.MULTILINE); + Pattern p = Pattern.compile("Datanode:\\s.*host0\\)"); Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); - m = p.matcher(outContent.toString(DEFAULT_ENCODING)); - assertTrue(m.find()); - // as uuid of only host0 is passed, host1 should NOT be displayed - p = Pattern.compile("Datanode:\\s.*host1.\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host1.\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); - p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + p = Pattern.compile("UnderReplicated=.*UnClosed="); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); assertFalse(m.find()); } @@ -161,6 +164,7 @@ public void testIdOptionDecommissionStatusFail() throws IOException { .thenReturn(containerOnDecom); when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(1).getNodeID()))) .thenReturn(new HashMap<>()); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(2)); CommandLine c = new CommandLine(cmd); c.parseArgs("--id", nodes.get(1).getNodeID().getUuid()); @@ -172,10 +176,10 @@ public void testIdOptionDecommissionStatusFail() throws IOException { assertTrue(m.find()); // no host details are shown - p = Pattern.compile("Datanode:\\s.*host0\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host0\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); - p = Pattern.compile("Datanode:\\s.*host1\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host1\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); } @@ -186,24 +190,22 @@ public void testIpOptionDecommissionStatusSuccess() throws IOException { when(scmClient.queryNode(any(), any(), any(), any())) .thenAnswer(invocation -> nodes); // 2 nodes decommissioning when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(1)); CommandLine c = new CommandLine(cmd); c.parseArgs("--ip", nodes.get(1).getNodeID().getIpAddress()); cmd.execute(scmClient); // check status of host1 - Pattern p = Pattern.compile("Datanode:\\s.*host1\\)", Pattern.MULTILINE); + Pattern p = Pattern.compile("Datanode:\\s.*host1\\)"); Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); - m = p.matcher(outContent.toString(DEFAULT_ENCODING)); - assertTrue(m.find()); - // as IpAddress of only host1 is passed, host0 should NOT be displayed - p = Pattern.compile("Datanode:\\s.*host0.\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host0.\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); - p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + p = Pattern.compile("UnderReplicated=.*UnClosed="); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); assertFalse(m.find()); } @@ -216,6 +218,7 @@ public void testIpOptionDecommissionStatusFail() throws IOException { .thenReturn(containerOnDecom); when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(1).getNodeID()))) .thenReturn(new HashMap<>()); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(2)); CommandLine c = new CommandLine(cmd); c.parseArgs("--ip", nodes.get(1).getNodeID().getIpAddress()); @@ -226,11 +229,11 @@ public void testIpOptionDecommissionStatusFail() throws IOException { Matcher m = p.matcher(errContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("Datanode:\\s.*host0\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host0\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); - p = Pattern.compile("Datanode:\\s.*host1\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host1\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); } @@ -275,4 +278,38 @@ private Map> getContainersOnDecomNodes() { return containerMap; } + private ArrayList getMetrics() { + ArrayList result = new ArrayList<>(); + // no nodes decommissioning + result.add("{ \"beans\" : [ { " + + "\"name\" : \"Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics\", " + + "\"modelerType\" : \"NodeDecommissionMetrics\", \"DecommissioningMaintenanceNodesTotal\" : 0, " + + "\"RecommissionNodesTotal\" : 0, \"PipelinesWaitingToCloseTotal\" : 0, " + + "\"ContainersUnderReplicatedTotal\" : 0, \"ContainersUnClosedTotal\" : 0, " + + "\"ContainersSufficientlyReplicatedTotal\" : 0 } ]}"); + // 2 nodes in decommisioning + result.add("{ \"beans\" : [ { " + + "\"name\" : \"Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics\", " + + "\"modelerType\" : \"NodeDecommissionMetrics\", \"DecommissioningMaintenanceNodesTotal\" : 2, " + + "\"RecommissionNodesTotal\" : 0, \"PipelinesWaitingToCloseTotal\" : 2, " + + "\"ContainersUnderReplicatedTotal\" : 6, \"ContainersUnclosedTotal\" : 6, " + + "\"ContainersSufficientlyReplicatedTotal\" : 10, " + + "\"tag.datanode.1\" : \"host0\", \"tag.Hostname.1\" : \"host0\", " + + "\"PipelinesWaitingToCloseDN.1\" : 1, \"UnderReplicatedDN.1\" : 3, " + + "\"SufficientlyReplicatedDN.1\" : 0, \"UnclosedContainersDN.1\" : 3, \"StartTimeDN.1\" : 111211, " + + "\"tag.datanode.2\" : \"host1\", \"tag.Hostname.2\" : \"host1\", " + + "\"PipelinesWaitingToCloseDN.2\" : 1, \"UnderReplicatedDN.2\" : 3, " + + "\"SufficientlyReplicatedDN.2\" : 0, \"UnclosedContainersDN.2\" : 3, \"StartTimeDN.2\" : 221221} ]}"); + // only host 1 decommissioning + result.add("{ \"beans\" : [ { " + + "\"name\" : \"Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics\", " + + "\"modelerType\" : \"NodeDecommissionMetrics\", \"DecommissioningMaintenanceNodesTotal\" : 1, " + + "\"RecommissionNodesTotal\" : 0, \"PipelinesWaitingToCloseTotal\" : 1, " + + "\"ContainersUnderReplicatedTotal\" : 3, \"ContainersUnclosedTotal\" : 3, " + + "\"ContainersSufficientlyReplicatedTotal\" : 10, " + + "\"tag.datanode.1\" : \"host0\",\n \"tag.Hostname.1\" : \"host0\",\n " + + "\"PipelinesWaitingToCloseDN.1\" : 1,\n \"UnderReplicatedDN.1\" : 3,\n " + + "\"SufficientlyReplicatedDN.1\" : 0,\n \"UnclosedContainersDN.1\" : 3, \"StartTimeDN.1\" : 221221} ]}"); + return result; + } }