diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java index e1b51efc34cd..791e754e2c4a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java @@ -34,7 +34,7 @@ public interface NodeManagerMXBean { * * @return A state to number of nodes that in this state mapping */ - Map getNodeCount(); + Map> getNodeCount(); /** * Get the disk metrics like capacity, usage and remaining based on the diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index 3c5071f1cde5..d89dac1cae35 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -482,62 +482,97 @@ private SCMNodeStat getNodeStatInternal(DatanodeDetails datanodeDetails) { } } - @Override - public Map getNodeCount() { - // TODO - This does not consider decom, maint etc. - Map nodeCountMap = new HashMap(); - for(NodeState state : NodeState.values()) { - // TODO - this iterate the node list once per state and needs - // fixed to only perform one pass. - nodeCountMap.put(state.toString(), getNodeCount(null, state)); + @Override // NodeManagerMXBean + public Map> getNodeCount() { + Map> nodes = new HashMap<>(); + for (NodeOperationalState opState : NodeOperationalState.values()) { + Map states = new HashMap<>(); + for (NodeState health : NodeState.values()) { + states.put(health.name(), 0); + } + nodes.put(opState.name(), states); + } + for (DatanodeInfo dni : nodeStateManager.getAllNodes()) { + NodeStatus status = dni.getNodeStatus(); + nodes.get(status.getOperationalState().name()) + .compute(status.getHealth().name(), (k, v) -> v+1); } - return nodeCountMap; + return nodes; } // We should introduce DISK, SSD, etc., notion in // SCMNodeStat and try to use it. - @Override + @Override // NodeManagerMXBean public Map getNodeInfo() { - long diskCapacity = 0L; - long diskUsed = 0L; - long diskRemaning = 0L; - - long ssdCapacity = 0L; - long ssdUsed = 0L; - long ssdRemaining = 0L; - - List healthyNodes = nodeStateManager.getHealthyNodes(); - List staleNodes = nodeStateManager.getStaleNodes(); - - List datanodes = new ArrayList<>(healthyNodes); - datanodes.addAll(staleNodes); + Map nodeInfo = new HashMap<>(); + // Compute all the possible stats from the enums, and default to zero: + for (UsageStates s : UsageStates.values()) { + for (UsageMetrics stat : UsageMetrics.values()) { + nodeInfo.put(s.label + stat.name(), 0L); + } + } - for (DatanodeInfo dnInfo : datanodes) { - List storageReportProtos = dnInfo.getStorageReports(); + for (DatanodeInfo node : nodeStateManager.getAllNodes()) { + String keyPrefix = ""; + NodeStatus status = node.getNodeStatus(); + if (status.isMaintenance()) { + keyPrefix = UsageStates.MAINT.getLabel(); + } else if (status.isDecommission()) { + keyPrefix = UsageStates.DECOM.getLabel(); + } else if (status.isAlive()) { + // Inservice but not dead + keyPrefix = UsageStates.ONLINE.getLabel(); + } else { + // dead inservice node, skip it + continue; + } + List storageReportProtos = node.getStorageReports(); for (StorageReportProto reportProto : storageReportProtos) { if (reportProto.getStorageType() == StorageContainerDatanodeProtocolProtos.StorageTypeProto.DISK) { - diskCapacity += reportProto.getCapacity(); - diskRemaning += reportProto.getRemaining(); - diskUsed += reportProto.getScmUsed(); + nodeInfo.compute(keyPrefix + UsageMetrics.DiskCapacity.name(), + (k, v) -> v + reportProto.getCapacity()); + nodeInfo.compute(keyPrefix + UsageMetrics.DiskRemaining.name(), + (k, v) -> v + reportProto.getRemaining()); + nodeInfo.compute(keyPrefix + UsageMetrics.DiskUsed.name(), + (k, v) -> v + reportProto.getScmUsed()); } else if (reportProto.getStorageType() == StorageContainerDatanodeProtocolProtos.StorageTypeProto.SSD) { - ssdCapacity += reportProto.getCapacity(); - ssdRemaining += reportProto.getRemaining(); - ssdUsed += reportProto.getScmUsed(); + nodeInfo.compute(keyPrefix + UsageMetrics.SSDCapacity.name(), + (k, v) -> v + reportProto.getCapacity()); + nodeInfo.compute(keyPrefix + UsageMetrics.SSDRemaining.name(), + (k, v) -> v + reportProto.getRemaining()); + nodeInfo.compute(keyPrefix + UsageMetrics.SSDUsed.name(), + (k, v) -> v + reportProto.getScmUsed()); } } } + return nodeInfo; + } - Map nodeInfo = new HashMap<>(); - nodeInfo.put("DISKCapacity", diskCapacity); - nodeInfo.put("DISKUsed", diskUsed); - nodeInfo.put("DISKRemaining", diskRemaning); + private enum UsageMetrics { + DiskCapacity, + DiskUsed, + DiskRemaining, + SSDCapacity, + SSDUsed, + SSDRemaining + } - nodeInfo.put("SSDCapacity", ssdCapacity); - nodeInfo.put("SSDUsed", ssdUsed); - nodeInfo.put("SSDRemaining", ssdRemaining); - return nodeInfo; + private enum UsageStates { + ONLINE(""), + MAINT("Maintenance"), + DECOM("Decommissioned"); + + private final String label; + + public String getLabel() { + return label; + } + + UsageStates(String label) { + this.label = label; + } } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java index 5b872c10155b..ccb5d144ae4f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java @@ -18,15 +18,12 @@ package org.apache.hadoop.hdds.scm.node; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; - import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsInfo; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.MetricsSource; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; @@ -36,6 +33,7 @@ import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.util.StringUtils; /** * This class maintains Node related metrics. @@ -51,6 +49,7 @@ public final class SCMNodeMetrics implements MetricsSource { private @Metric MutableCounterLong numHBProcessingFailed; private @Metric MutableCounterLong numNodeReportProcessed; private @Metric MutableCounterLong numNodeReportProcessingFailed; + private @Metric String textMetric; private final MetricsRegistry registry; private final NodeManagerMXBean managerMXBean; @@ -61,6 +60,7 @@ public final class SCMNodeMetrics implements MetricsSource { private SCMNodeMetrics(NodeManagerMXBean managerMXBean) { this.managerMXBean = managerMXBean; this.registry = new MetricsRegistry(recordInfo); + this.textMetric = "my_test_metric"; } /** @@ -116,39 +116,58 @@ void incNumNodeReportProcessingFailed() { @Override @SuppressWarnings("SuspiciousMethodCalls") public void getMetrics(MetricsCollector collector, boolean all) { - Map nodeCount = managerMXBean.getNodeCount(); + Map> nodeCount = managerMXBean.getNodeCount(); Map nodeInfo = managerMXBean.getNodeInfo(); - registry.snapshot( - collector.addRecord(registry.info()) // Add annotated ones first - .addGauge(Interns.info( - "HealthyNodes", - "Number of healthy datanodes"), - nodeCount.get(HEALTHY.toString())) - .addGauge(Interns.info("StaleNodes", - "Number of stale datanodes"), - nodeCount.get(STALE.toString())) - .addGauge(Interns.info("DeadNodes", - "Number of dead datanodes"), - nodeCount.get(DEAD.toString())) - .addGauge(Interns.info("DiskCapacity", - "Total disk capacity"), - nodeInfo.get("DISKCapacity")) - .addGauge(Interns.info("DiskUsed", - "Total disk capacity used"), - nodeInfo.get("DISKUsed")) - .addGauge(Interns.info("DiskRemaining", - "Total disk capacity remaining"), - nodeInfo.get("DISKRemaining")) - .addGauge(Interns.info("SSDCapacity", - "Total ssd capacity"), - nodeInfo.get("SSDCapacity")) - .addGauge(Interns.info("SSDUsed", - "Total ssd capacity used"), - nodeInfo.get("SSDUsed")) - .addGauge(Interns.info("SSDRemaining", - "Total disk capacity remaining"), - nodeInfo.get("SSDRemaining")), - all); + /** + * Loop over the Node map and create a metric for the cross product of all + * Operational and health states, ie: + * InServiceHealthy + * InServiceStale + * ... + * EnteringMaintenanceHealthy + * ... + */ + MetricsRecordBuilder metrics = collector.addRecord(registry.info()); + for(Map.Entry> e : nodeCount.entrySet()) { + for(Map.Entry h : e.getValue().entrySet()) { + metrics.addGauge( + Interns.info( + StringUtils.camelize(e.getKey()+"_"+h.getKey()+"_nodes"), + "Number of "+e.getKey()+" "+h.getKey()+" datanodes"), + h.getValue()); + } + } + + for (Map.Entry e : nodeInfo.entrySet()) { + metrics.addGauge( + Interns.info(e.getKey(), diskMetricDescription(e.getKey())), + e.getValue()); + } + registry.snapshot(metrics, all); + } + + private String diskMetricDescription(String metric) { + StringBuilder sb = new StringBuilder(); + sb.append("Total"); + if (metric.indexOf("Maintenance") >= 0) { + sb.append(" maintenance"); + } else if (metric.indexOf("Decommissioned") >= 0) { + sb.append(" decommissioned"); + } + if (metric.indexOf("DiskCapacity") >= 0) { + sb.append(" disk capacity"); + } else if (metric.indexOf("DiskUsed") >= 0) { + sb.append(" disk capacity used"); + } else if (metric.indexOf("DiskRemaining") >= 0) { + sb.append(" disk capacity remaining"); + } else if (metric.indexOf("SSDCapacity") >= 0) { + sb.append(" SSD capacity"); + } else if (metric.indexOf("SSDUsed") >= 0) { + sb.append(" SSD capacity used"); + } else if (metric.indexOf("SSDRemaining") >= 0) { + sb.append(" SSD capacity remaining"); + } + return sb.toString(); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index 8a48a68fcd08..d24cbfd5114d 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.NodeReportProto; import org.apache.hadoop.hdds.protocol.proto @@ -471,12 +472,23 @@ public Boolean isNodeRegistered( } @Override - public Map getNodeCount() { - Map nodeCountMap = new HashMap(); + public Map> getNodeCount() { + Map> nodes = new HashMap<>(); + for (NodeOperationalState opState : NodeOperationalState.values()) { + Map states = new HashMap<>(); + for (HddsProtos.NodeState health : HddsProtos.NodeState.values()) { + states.put(health.name(), 0); + } + nodes.put(opState.name(), states); + } + // At the moment MockNodeManager is not aware of decommission and + // maintenance states, therefore loop over all nodes and assume all nodes + // are IN_SERVICE. This will be fixed as part of HDDS-2673 for (HddsProtos.NodeState state : HddsProtos.NodeState.values()) { - nodeCountMap.put(state.toString(), getNodeCount(null, state)); + nodes.get(NodeOperationalState.IN_SERVICE.name()) + .compute(state.name(), (k, v) -> v + 1); } - return nodeCountMap; + return nodes; } @Override diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java index 883e910a4b29..dad3448bdf38 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java @@ -243,7 +243,7 @@ public void close() throws IOException { } @Override - public Map getNodeCount() { + public Map> getNodeCount() { return null; } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index 1af996cb3265..0917fa4019b6 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -19,10 +19,12 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -53,6 +55,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Map; import java.util.UUID; import java.util.concurrent.ExecutionException; import java.util.concurrent.ScheduledFuture; @@ -240,6 +243,11 @@ public void testScmHealthyNodeCount() Thread.sleep(4 * 1000); assertEquals(count, nodeManager.getNodeCount( NodeStatus.inServiceHealthy())); + + Map> nodeCounts = nodeManager.getNodeCount(); + assertEquals(count, + nodeCounts.get(HddsProtos.NodeOperationalState.IN_SERVICE.name()) + .get(HddsProtos.NodeState.HEALTHY.name()).intValue()); } } @@ -323,6 +331,11 @@ public void testScmDetectStaleAndDeadNode() .getUuid(), staleNodeList.get(0).getUuid()); Thread.sleep(1000); + Map> nodeCounts = nodeManager.getNodeCount(); + assertEquals(1, + nodeCounts.get(HddsProtos.NodeOperationalState.IN_SERVICE.name()) + .get(HddsProtos.NodeState.STALE.name()).intValue()); + // heartbeat good nodes again. for (DatanodeDetails dn : nodeList) { nodeManager.processHeartbeat(dn); @@ -334,10 +347,14 @@ public void testScmDetectStaleAndDeadNode() // the stale node has been removed staleNodeList = nodeManager.getNodes(NodeStatus.inServiceStale()); + nodeCounts = nodeManager.getNodeCount(); assertEquals("Expected to find 1 stale node", 0, nodeManager.getNodeCount(NodeStatus.inServiceStale())); assertEquals("Expected to find 1 stale node", 0, staleNodeList.size()); + assertEquals(0, + nodeCounts.get(HddsProtos.NodeOperationalState.IN_SERVICE.name()) + .get(HddsProtos.NodeState.STALE.name()).intValue()); // Check for the dead node now. List deadNodeList = @@ -346,6 +363,9 @@ public void testScmDetectStaleAndDeadNode() nodeManager.getNodeCount(NodeStatus.inServiceDead())); assertEquals("Expected to find 1 dead node", 1, deadNodeList.size()); + assertEquals(1, + nodeCounts.get(HddsProtos.NodeOperationalState.IN_SERVICE.name()) + .get(HddsProtos.NodeState.DEAD.name()).intValue()); assertEquals("Dead node is not the expected ID", staleNode .getUuid(), deadNodeList.get(0).getUuid()); } @@ -1195,6 +1215,58 @@ private void testScmRegisterNodeWithNetworkTopology(boolean useHostname) } } + @Test + public void testGetNodeInfo() + throws IOException, InterruptedException, NodeNotFoundException, + AuthenticationException { + OzoneConfiguration conf = getConf(); + final int nodeCount = 6; + SCMNodeManager nodeManager = createNodeManager(conf); + + for (int i=0; i stats = nodeManager.getNodeInfo(); + // 3 IN_SERVICE nodes: + assertEquals(6000, stats.get("DiskCapacity").longValue()); + assertEquals(300, stats.get("DiskUsed").longValue()); + assertEquals(5700, stats.get("DiskRemaining").longValue()); + + // 2 Decommissioned nodes + assertEquals(4000, stats.get("DecommissionedDiskCapacity").longValue()); + assertEquals(200, stats.get("DecommissionedDiskUsed").longValue()); + assertEquals(3800, stats.get("DecommissionedDiskRemaining").longValue()); + + // 1 Maintenance node + assertEquals(2000, stats.get("MaintenanceDiskCapacity").longValue()); + assertEquals(100, stats.get("MaintenanceDiskUsed").longValue()); + assertEquals(1900, stats.get("MaintenanceDiskRemaining").longValue()); + } + /** * Test add node into a 4-layer network topology during node register. */ diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java index 5a2e3c2109ac..ee96565a4097 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java @@ -72,7 +72,7 @@ public ReplicationNodeManagerMock(Map nodeStatus, * @return A state to number of nodes that in this state mapping */ @Override - public Map getNodeCount() { + public Map> getNodeCount() { return null; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMNodeManagerMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMNodeManagerMXBean.java index 43b9bf03a6ef..0f961095b9ad 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMNodeManagerMXBean.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMNodeManagerMXBean.java @@ -92,10 +92,32 @@ public void testNodeCount() throws Exception { + "name=SCMNodeManagerInfo"); TabularData data = (TabularData) mbs.getAttribute(bean, "NodeCount"); - Map nodeCount = scm.getScmNodeManager().getNodeCount(); - Map nodeCountLong = new HashMap<>(); - nodeCount.forEach((k, v) -> nodeCountLong.put(k, new Long(v))); - verifyEquals(data, nodeCountLong); + Map> mbeanMap = convertNodeCountToMap(data); + Map> nodeMap = + scm.getScmNodeManager().getNodeCount(); + assertTrue(nodeMap.equals(mbeanMap)); + } + + private Map> convertNodeCountToMap( + TabularData data) { + Map> map = new HashMap<>(); + for (Object o : data.values()) { + CompositeData cds = (CompositeData) o; + Iterator it = cds.values().iterator(); + String opState = it.next().toString(); + TabularData states = (TabularData) it.next(); + + Map healthStates = new HashMap<>(); + for (Object obj : states.values()) { + CompositeData stateData = (CompositeData) obj; + Iterator stateIt = stateData.values().iterator(); + String health = stateIt.next().toString(); + Integer value = Integer.parseInt(stateIt.next().toString()); + healthStates.put(health, value); + } + map.put(opState, healthStates); + } + return map; } private void verifyEquals(TabularData actualData, Map diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java index d528f437cee1..6192ff5bf315 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java @@ -146,11 +146,35 @@ public void testNodeCountAndInfoMetricsReported() throws Exception { cluster.getStorageContainerManager().getScmNodeManager() .processNodeReport(datanode.getDatanodeDetails(), nodeReport); - assertGauge("HealthyNodes", 1, + assertGauge("InServiceHealthyNodes", 1, getMetrics(SCMNodeMetrics.class.getSimpleName())); - assertGauge("StaleNodes", 0, + assertGauge("InServiceStaleNodes", 0, getMetrics(SCMNodeMetrics.class.getSimpleName())); - assertGauge("DeadNodes", 0, + assertGauge("InServiceDeadNodes", 0, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("DecommissioningHealthyNodes", 0, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("DecommissioningStaleNodes", 0, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("DecommissioningDeadNodes", 0, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("DecommissionedHealthyNodes", 0, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("DecommissionedStaleNodes", 0, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("DecommissionedDeadNodes", 0, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("EnteringMaintenanceHealthyNodes", 0, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("EnteringMaintenanceStaleNodes", 0, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("EnteringMaintenanceDeadNodes", 0, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("InMaintenanceHealthyNodes", 0, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("InMaintenanceStaleNodes", 0, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("InMaintenanceDeadNodes", 0, getMetrics(SCMNodeMetrics.class.getSimpleName())); assertGauge("DiskCapacity", 100L, getMetrics(SCMNodeMetrics.class.getSimpleName())); @@ -164,6 +188,30 @@ public void testNodeCountAndInfoMetricsReported() throws Exception { getMetrics(SCMNodeMetrics.class.getSimpleName())); assertGauge("SSDRemaining", 0L, getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("MaintenanceDiskCapacity", 0L, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("MaintenanceDiskUsed", 0L, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("MaintenanceDiskRemaining", 0L, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("MaintenanceSSDCapacity", 0L, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("MaintenanceSSDUsed", 0L, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("MaintenanceSSDRemaining", 0L, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("DecommissionedDiskCapacity", 0L, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("DecommissionedDiskUsed", 0L, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("DecommissionedDiskRemaining", 0L, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("DecommissionedSSDCapacity", 0L, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("DecommissionedSSDUsed", 0L, + getMetrics(SCMNodeMetrics.class.getSimpleName())); + assertGauge("DecommissionedSSDRemaining", 0L, + getMetrics(SCMNodeMetrics.class.getSimpleName())); } @After