diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java index 275665ec38ca..d3bcf7d75050 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java @@ -258,16 +258,6 @@ void addContainer(DatanodeDetails datanodeDetails, void removeContainer(DatanodeDetails datanodeDetails, ContainerID containerId) throws NodeNotFoundException; - /** - * Remaps datanode to containers mapping to the new set of containers. - * @param datanodeDetails - DatanodeDetails - * @param containerIds - Set of containerIDs - * @throws NodeNotFoundException - if datanode is not known. For new datanode - * use addDatanodeInContainerMap call. - */ - void setContainers(DatanodeDetails datanodeDetails, - Set containerIds) throws NodeNotFoundException; - /** * Return set of containerIDs available on a datanode. * @param datanodeDetails DatanodeDetails diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java index 863f4bc40ee8..5393c57f72a9 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java @@ -697,12 +697,13 @@ public void removeContainer(final DatanodeID datanodeID, } /** - * Update set of containers available on a datanode. + * Set the containers for the given datanode. + * This method is only used for testing. * @throws NodeNotFoundException - if datanode is not known. */ - public void setContainers(DatanodeID datanodeID, Set containerIds) + void setContainersForTesting(DatanodeID datanodeID, Set containerIds) throws NodeNotFoundException { - nodeStateMap.setContainers(datanodeID, containerIds); + nodeStateMap.setContainersForTesting(datanodeID, containerIds); } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index b3b02cb4c54c..3f0906908353 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -1597,20 +1597,6 @@ public void removeContainer(final DatanodeDetails datanodeDetails, nodeStateManager.removeContainer(datanodeDetails.getID(), containerId); } - /** - * Update set of containers available on a datanode. - * - * @param datanodeDetails - DatanodeID - * @param containerIds - Set of containerIDs - * @throws NodeNotFoundException - if datanode is not known. For new datanode - * use addDatanodeInContainerMap call. - */ - @Override - public void setContainers(DatanodeDetails datanodeDetails, - Set containerIds) throws NodeNotFoundException { - nodeStateManager.setContainers(datanodeDetails.getID(), containerIds); - } - /** * Return set of containerIDs available on a datanode. This is a copy of the * set which resides inside NodeManager and hence can be modified without diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java index 0f4420f8a660..1f44e5c752aa 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java @@ -304,7 +304,11 @@ public void addContainer(final DatanodeID datanodeID, } } - public void setContainers(DatanodeID id, Set containers) + /** + * Set the containers for the given datanode. + * This method is only used for testing. + */ + public void setContainersForTesting(DatanodeID id, Set containers) throws NodeNotFoundException { lock.writeLock().lock(); try { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index c3004983e38f..ffb215edcc4c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -50,7 +50,6 @@ import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.net.NetConstants; import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; @@ -617,15 +616,10 @@ public Map getTotalDatanodeCommandCounts( /** * Update set of containers available on a datanode. - * @param uuid - DatanodeID - * @param containerIds - Set of containerIDs - * @throws SCMException - if datanode is not known. For new datanode use - * addDatanodeInContainerMap call. */ - @Override public void setContainers(DatanodeDetails uuid, Set containerIds) throws NodeNotFoundException { - node2ContainerMap.setContainers(uuid.getID(), containerIds); + node2ContainerMap.setContainersForTesting(uuid.getID(), containerIds); } /** diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java index a5272f8a4fc2..d53ca3f7dfd5 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java @@ -158,10 +158,7 @@ public int getPipelinesCount(DatanodeDetails datanodeDetails) { return 0; } - @Override - public void setContainers(DatanodeDetails dn, - Set containerIds) - throws NodeNotFoundException { + public void setContainers(DatanodeDetails dn, Set containerIds) { containerMap.put(dn.getUuid(), containerIds); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java index 5baa26ba26a0..a3b8f2751c0f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java @@ -60,7 +60,6 @@ import org.apache.hadoop.hdds.scm.ha.SCMHAManager; import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; -import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.apache.hadoop.hdds.scm.pipeline.MockPipelineManager; @@ -89,7 +88,7 @@ */ public class TestContainerReportHandler { - private NodeManager nodeManager; + private MockNodeManager nodeManager; private ContainerManager containerManager; private ContainerStateManager containerStateManager; private EventPublisher publisher; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/ScmNodeTestUtil.java similarity index 57% rename from hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java rename to hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/ScmNodeTestUtil.java index 62c4656e0c25..fd0556648b27 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/ScmNodeTestUtil.java @@ -15,5 +15,17 @@ * limitations under the License. */ -/** Helper classes for ozone and container tests. */ -package org.apache.hadoop.ozone.container.testutils; +package org.apache.hadoop.hdds.scm.node; + +import java.util.Set; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; + +/** Utilities for testing the {@link org.apache.hadoop.hdds.scm.node} package. */ +public interface ScmNodeTestUtil { + static void setContainers(SCMNodeManager scm, DatanodeDetails datanode, + Set containers) throws NodeNotFoundException { + scm.getNodeStateManager().setContainersForTesting(datanode.getID(), containers); + } +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java index 46524d49b443..7221fac8b641 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java @@ -299,15 +299,12 @@ private void registerReplicas(ContainerManager contManager, /** * Update containers available on the datanode. - * @param datanode - * @param containers - * @throws NodeNotFoundException */ private void registerContainers(DatanodeDetails datanode, ContainerInfo... containers) throws NodeNotFoundException { - nodeManager - .setContainers(datanode, + ScmNodeTestUtil.setContainers(nodeManager, + datanode, Arrays.stream(containers) .map(ContainerInfo::containerID) .collect(Collectors.toSet())); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java index 83f1cb381d78..63ac6b05f8f0 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java @@ -68,7 +68,7 @@ public class TestNodeDecommissionManager { private NodeDecommissionManager decom; private StorageContainerManager scm; - private NodeManager nodeManager; + private SCMNodeManager nodeManager; private ContainerManager containerManager; private OzoneConfiguration conf; private static int id = 1; @@ -78,7 +78,7 @@ void setup(@TempDir File dir) throws Exception { conf = new OzoneConfiguration(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.getAbsolutePath()); scm = HddsTestUtils.getScm(conf); - nodeManager = scm.getScmNodeManager(); + nodeManager = (SCMNodeManager)scm.getScmNodeManager(); containerManager = mock(ContainerManager.class); decom = new NodeDecommissionManager(conf, nodeManager, containerManager, SCMContext.emptyContext(), new EventQueue(), null); @@ -87,6 +87,10 @@ void setup(@TempDir File dir) throws Exception { (String) invocation.getArguments()[1])); } + void setContainers(DatanodeDetails datanode, Set containers) throws NodeNotFoundException { + ScmNodeTestUtil.setContainers(nodeManager, datanode, containers); + } + private ContainerInfo createMockContainer(ReplicationConfig rep, String owner) { ContainerInfo.Builder builder = new ContainerInfo.Builder() .setReplicationConfig(rep) @@ -426,7 +430,7 @@ public void testInsufficientNodeDecommissionThrowsExceptionForRatis() throws } for (DatanodeDetails dn : nodeManager.getAllNodes().subList(0, 3)) { - nodeManager.setContainers(dn, idsRatis); + setContainers(dn, idsRatis); } error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), @@ -480,7 +484,7 @@ public void testInsufficientNodeDecommissionThrowsExceptionForEc() throws } for (DatanodeDetails dn : nodeManager.getAllNodes()) { - nodeManager.setContainers(dn, idsEC); + setContainers(dn, idsEC); } error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), false); @@ -529,10 +533,10 @@ public void testInsufficientNodeDecommissionThrowsExceptionRatisAndEc() throws }); for (DatanodeDetails dn : nodeManager.getAllNodes().subList(0, 3)) { - nodeManager.setContainers(dn, idsRatis); + setContainers(dn, idsRatis); } for (DatanodeDetails dn : nodeManager.getAllNodes()) { - nodeManager.setContainers(dn, idsEC); + setContainers(dn, idsEC); } error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), false); @@ -573,7 +577,7 @@ public void testInsufficientNodeDecommissionChecksNotInService() throws } for (DatanodeDetails dn : nodeManager.getAllNodes().subList(0, 3)) { - nodeManager.setContainers(dn, idsRatis); + setContainers(dn, idsRatis); } // decommission one node successfully @@ -608,7 +612,7 @@ public void testInsufficientNodeDecommissionChecksForNNF() throws idsRatis.add(container.containerID()); } - nodeManager = mock(NodeManager.class); + nodeManager = mock(SCMNodeManager.class); decom = new NodeDecommissionManager(conf, nodeManager, containerManager, SCMContext.emptyContext(), new EventQueue(), null); when(containerManager.getContainer(any(ContainerID.class))) @@ -669,7 +673,7 @@ public void testInsufficientNodeMaintenanceThrowsExceptionForRatis() throws idsRatis.add(container.containerID()); } for (DatanodeDetails dn : nodeManager.getAllNodes().subList(0, 3)) { - nodeManager.setContainers(dn, idsRatis); + setContainers(dn, idsRatis); } decom.setMaintenanceConfigs(2, 1); // default config @@ -769,7 +773,7 @@ public void testInsufficientNodeMaintenanceThrowsExceptionForEc() throws idsEC.add(container.containerID()); } for (DatanodeDetails dn : nodeManager.getAllNodes()) { - nodeManager.setContainers(dn, idsEC); + setContainers(dn, idsEC); } decom.setMaintenanceConfigs(2, 1); // default config @@ -852,10 +856,10 @@ public void testInsufficientNodeMaintenanceThrowsExceptionForRatisAndEc() throws (ContainerID)invocation.getArguments()[0]); }); for (DatanodeDetails dn : nodeManager.getAllNodes().subList(0, 3)) { - nodeManager.setContainers(dn, idsRatis); + setContainers(dn, idsRatis); } for (DatanodeDetails dn : nodeManager.getAllNodes()) { - nodeManager.setContainers(dn, idsEC); + setContainers(dn, idsEC); } decom.setMaintenanceConfigs(2, 1); // default config @@ -926,7 +930,7 @@ public void testInsufficientNodeMaintenanceChecksNotInService() throws idsRatis.add(container.containerID()); } for (DatanodeDetails dn : nodeManager.getAllNodes().subList(0, 3)) { - nodeManager.setContainers(dn, idsRatis); + setContainers(dn, idsRatis); } // put 2 nodes into maintenance successfully @@ -966,7 +970,7 @@ public void testInsufficientNodeMaintenanceChecksForNNF() throws idsRatis.add(container.containerID()); } - nodeManager = mock(NodeManager.class); + nodeManager = mock(SCMNodeManager.class); decom = new NodeDecommissionManager(conf, nodeManager, containerManager, SCMContext.emptyContext(), new EventQueue(), null); when(containerManager.getContainer(any(ContainerID.class))) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java deleted file mode 100644 index d84ea2211073..000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java +++ /dev/null @@ -1,537 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.testutils; - -import com.google.common.base.Preconditions; -import java.io.IOException; -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandQueueReportProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; -import org.apache.hadoop.hdds.scm.net.NetworkTopology; -import org.apache.hadoop.hdds.scm.node.CommandQueue; -import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.NodeStatus; -import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.ozone.protocol.VersionResponse; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; - -/** - * A Node Manager to test replication. - */ -public class ReplicationNodeManagerMock implements NodeManager { - private final Map nodeStateMap; - private final CommandQueue commandQueue; - - /** - * A list of Datanodes and current states. - * @param nodeStatus A node state map. - */ - public ReplicationNodeManagerMock(Map nodeStatus, - CommandQueue commandQueue) { - Preconditions.checkNotNull(nodeStatus); - this.nodeStateMap = nodeStatus; - this.commandQueue = commandQueue; - } - - /** - * Get the number of data nodes that in all states. - * - * @return A state to number of nodes that in this state mapping - */ - @Override - public Map> getNodeCount() { - return null; - } - - @Override - public Map getNodeInfo() { - return null; - } - - @Override - public Map> getNodeStatusInfo() { - return null; - } - - /** - * Gets all Live Datanodes that is currently communicating with SCM. - * - * @param nodestatus - State of the node - * @return List of Datanodes that are Heartbeating SCM. - */ - @Override - public List getNodes(NodeStatus nodestatus) { - return null; - } - - /** - * Gets all Live Datanodes that is currently communicating with SCM. - * - * @param opState - Operational state of the node - * @param health - Health of the node - * @return List of Datanodes that are Heartbeating SCM. - */ - @Override - public List getNodes( - HddsProtos.NodeOperationalState opState, NodeState health) { - return null; - } - - /** - * Returns the Number of Datanodes that are communicating with SCM. - * - * @param nodestatus - State of the node - * @return int -- count - */ - @Override - public int getNodeCount(NodeStatus nodestatus) { - return 0; - } - - /** - * Returns the Number of Datanodes that are communicating with SCM. - * - * @param opState - Operational state of the node - * @param health - Health of the node - * @return int -- count - */ - @Override - public int getNodeCount( - HddsProtos.NodeOperationalState opState, NodeState health) { - return 0; - } - - /** - * Get all datanodes known to SCM. - * - * @return List of DatanodeDetails known to SCM. - */ - @Override - public List getAllNodes() { - return null; - } - - /** - * Returns the aggregated node stats. - * - * @return the aggregated node stats. - */ - @Override - public SCMNodeStat getStats() { - return null; - } - - /** - * Return a map of node stats. - * - * @return a map of individual node stats (live/stale but not dead). - */ - @Override - public Map getNodeStats() { - return null; - } - - /** - * Gets a sorted list of most or least used DatanodeUsageInfo containing - * healthy, in-service nodes. If the specified mostUsed is true, the returned - * list is in descending order of usage. Otherwise, the returned list is in - * ascending order of usage. - * - * @param mostUsed true if most used, false if least used - * @return List of DatanodeUsageInfo - */ - @Override - public List getMostOrLeastUsedDatanodes(boolean mostUsed) { - return null; - } - - /** - * Get the usage info of a specified datanode. - * - * @param dn the usage of which we want to get - * @return DatanodeUsageInfo of the specified datanode - */ - @Override - public DatanodeUsageInfo getUsageInfo(DatanodeDetails dn) { - return null; - } - - /** - * Return the node stat of the specified datanode. - * - * @param dd - datanode details. - * @return node stat if it is live/stale, null if it is decommissioned or - * doesn't exist. - */ - @Override - public SCMNodeMetric getNodeStat(DatanodeDetails dd) { - return null; - } - - - /** - * Returns the node state of a specific node. - * - * @param dd - DatanodeDetails - * @return Healthy/Stale/Dead. - */ - @Override - public NodeStatus getNodeStatus(DatanodeDetails dd) { - return nodeStateMap.get(dd); - } - - /** - * Set the operation state of a node. - * @param dd The datanode to set the new state for - * @param newState The new operational state for the node - */ - @Override - public void setNodeOperationalState(DatanodeDetails dd, - HddsProtos.NodeOperationalState newState) throws NodeNotFoundException { - setNodeOperationalState(dd, newState, 0); - } - - /** - * Set the operation state of a node. - * @param dd The datanode to set the new state for - * @param newState The new operational state for the node - */ - @Override - public void setNodeOperationalState(DatanodeDetails dd, - HddsProtos.NodeOperationalState newState, long opStateExpiryEpocSec) - throws NodeNotFoundException { - NodeStatus currentStatus = nodeStateMap.get(dd); - if (currentStatus != null) { - nodeStateMap.put(dd, NodeStatus.valueOf(newState, currentStatus.getHealth(), - opStateExpiryEpocSec)); - } else { - throw new NodeNotFoundException(dd.getID()); - } - } - - /** - * Get set of pipelines a datanode is part of. - * @param dnId - datanodeID - * @return Set of PipelineID - */ - @Override - public Set getPipelines(DatanodeDetails dnId) { - throw new UnsupportedOperationException("Not yet implemented"); - } - - /** - * Get the count of pipelines a datanodes is associated with. - * @param dn DatanodeDetails - * @return The number of pipelines - */ - @Override - public int getPipelinesCount(DatanodeDetails dn) { - throw new UnsupportedOperationException("Not yet implemented"); - } - - /** - * Add pipeline information in the NodeManager. - * @param pipeline - Pipeline to be added - */ - @Override - public void addPipeline(Pipeline pipeline) { - throw new UnsupportedOperationException("Not yet implemented"); - } - - /** - * Remove a pipeline information from the NodeManager. - * @param pipeline - Pipeline to be removed - */ - @Override - public void removePipeline(Pipeline pipeline) { - throw new UnsupportedOperationException("Not yet implemented"); - } - - @Override - public void addContainer(DatanodeDetails datanodeDetails, - ContainerID containerId) - throws NodeNotFoundException { - throw new UnsupportedOperationException("Not yet implemented"); - } - - @Override - public void removeContainer(DatanodeDetails datanodeDetails, - ContainerID containerId) { - throw new UnsupportedOperationException("Not yet implemented"); - } - - /** - * Update set of containers available on a datanode. - * @param uuid - DatanodeID - * @param containerIds - Set of containerIDs - * @throws NodeNotFoundException - if datanode is not known. For new datanode - * use addDatanodeInContainerMap call. - */ - @Override - public void setContainers(DatanodeDetails uuid, Set containerIds) - throws NodeNotFoundException { - throw new UnsupportedOperationException("Not yet implemented"); - } - - /** - * Return set of containerIDs available on a datanode. - * @param uuid - DatanodeID - * @return - set of containerIDs - */ - @Override - public Set getContainers(DatanodeDetails uuid) { - throw new UnsupportedOperationException("Not yet implemented"); - } - - /** - * Closes this stream and releases any system resources associated - * with it. If the stream is already closed then invoking this - * method has no effect. - *

- *

As noted in {@link AutoCloseable#close()}, cases where the - * close may fail require careful attention. It is strongly advised - * to relinquish the underlying resources and to internally - * mark the {@code Closeable} as closed, prior to throwing - * the {@code IOException}. - * - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - - } - - /** - * Gets the version info from SCM. - * - * @param versionRequest - version Request. - * @return - returns SCM version info and other required information needed by - * datanode. - */ - @Override - public VersionResponse getVersion(SCMVersionRequestProto versionRequest) { - return null; - } - - /** - * Register the node if the node finds that it is not registered with any SCM. - * - * @param dd DatanodeDetailsProto - * @param nodeReport NodeReportProto - * @return SCMRegisteredResponseProto - */ - @Override - public RegisteredCommand register(DatanodeDetails dd, - NodeReportProto nodeReport, - PipelineReportsProto pipelineReportsProto, - LayoutVersionProto layoutInfo) { - return null; - } - - /** - * Send heartbeat to indicate the datanode is alive and doing well. - * - * @param dd - Datanode Details. - * @param commandQueueReportProto - Command Queue Report Proto - * @return SCMheartbeat response list - */ - @Override - public List> processHeartbeat(DatanodeDetails dd, - CommandQueueReportProto commandQueueReportProto) { - return null; - } - - @Override - public Boolean isNodeRegistered( - DatanodeDetails datanodeDetails) { - return false; - } - - /** - * Clears all nodes from the node Manager. - */ - public void clearMap() { - this.nodeStateMap.clear(); - } - - /** - * Adds a node to the existing Node manager. This is used only for test - * purposes. - * @param id DatanodeDetails - * @param status State you want to put that node to. - */ - public void addNode(DatanodeDetails id, NodeStatus status) { - nodeStateMap.put(id, status); - } - - @Override - public void addDatanodeCommand(UUID dnId, SCMCommand command) { - this.commandQueue.addCommand(dnId, command); - } - - /** - * send refresh command to all the healthy datanodes to refresh - * volume usage info immediately. - */ - @Override - public void refreshAllHealthyDnUsageInfo() { - //no op - } - /** - * Empty implementation for processNodeReport. - * @param dnUuid - * @param nodeReport - */ - @Override - public void processNodeReport(DatanodeDetails dnUuid, - NodeReportProto nodeReport) { - // do nothing. - } - - /** - * Empty implementation for processLayoutVersionReport. - * @param dnUuid - * @param layoutVersionReport - */ - @Override - public void processLayoutVersionReport(DatanodeDetails dnUuid, - LayoutVersionProto layoutVersionReport) { - // do nothing. - } - - /** - * Get the number of commands of the given type queued on the datanode at the - * last heartbeat. If the Datanode has not reported information for the given - * command type, -1 will be returned. - * @param cmdType - * @return The queued count or -1 if no data has been received from the DN. - */ - @Override - public int getNodeQueuedCommandCount(DatanodeDetails datanodeDetails, - SCMCommandProto.Type cmdType) { - return -1; - } - - /** - * Get the number of commands of the given type queued in the SCM CommandQueue - * for the given datanode. - * @param dnID The UUID of the datanode. - * @param cmdType The Type of command to query the current count for. - * @return The count of commands queued, or zero if none. - */ - @Override - public int getCommandQueueCount(UUID dnID, SCMCommandProto.Type cmdType) { - return commandQueue.getDatanodeCommandCount(dnID, cmdType); - } - - /** - * Get the total number of pending commands of the given type on the given - * datanode. This includes both the number of commands queued in SCM which - * will be sent to the datanode on the next heartbeat, and the number of - * commands reported by the datanode in the last heartbeat. - * If the datanode has not reported any information for the given command, - * zero is assumed. - * @param datanodeDetails The datanode to query. - * @param cmdType The command Type To query. - * @return The number of commands of the given type pending on the datanode. - * @throws NodeNotFoundException - */ - @Override - public int getTotalDatanodeCommandCount(DatanodeDetails datanodeDetails, - SCMCommandProto.Type cmdType) throws NodeNotFoundException { - return 0; - } - - @Override - public Map getTotalDatanodeCommandCounts( - DatanodeDetails datanodeDetails, SCMCommandProto.Type... cmdType) { - return Collections.emptyMap(); - } - - @Override - public void onMessage(CommandForDatanode commandForDatanode, - EventPublisher publisher) { - // do nothing. - } - - @Override - public List> getCommandQueue(UUID dnID) { - return null; - } - - @Override - public DatanodeDetails getNodeByUuid(String address) { - return null; - } - - @Override - public List getNodesByAddress(String address) { - return new LinkedList<>(); - } - - @Override - public NetworkTopology getClusterNetworkTopologyMap() { - return null; - } - - @Override - public int minHealthyVolumeNum(List dnList) { - return 0; - } - - @Override - public int totalHealthyVolumeCount() { - return 0; - } - - @Override - public int pipelineLimit(DatanodeDetails dn) { - return 0; - } - - @Override - public int minPipelineLimit(List dn) { - return 0; - } - - @Override - public long getLastHeartbeat(DatanodeDetails datanodeDetails) { - return -1; - } -}