diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java index 68494943b143..5a28e4275054 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java @@ -21,11 +21,13 @@ import java.io.IOException; import java.time.Instant; import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.UUID; import java.util.stream.Collectors; @@ -59,8 +61,6 @@ public final class Pipeline { private UUID leaderId; // Timestamp for pipeline upon creation private Instant creationTimestamp; - // Only valid for Ratis THREE pipeline. No need persist. - private int nodeIdsHash; /** * The immutable properties of pipeline object is used in @@ -76,7 +76,6 @@ private Pipeline(PipelineID id, ReplicationType type, this.state = state; this.nodeStatus = nodeStatus; this.creationTimestamp = Instant.now(); - this.nodeIdsHash = 0; } /** @@ -133,14 +132,6 @@ void setCreationTimestamp(Instant creationTimestamp) { this.creationTimestamp = creationTimestamp; } - public int getNodeIdsHash() { - return nodeIdsHash; - } - - void setNodeIdsHash(int nodeIdsHash) { - this.nodeIdsHash = nodeIdsHash; - } - /** * Return the pipeline leader's UUID. * @@ -166,6 +157,23 @@ public List getNodes() { return new ArrayList<>(nodeStatus.keySet()); } + /** + * Return an immutable set of nodes which form this pipeline. + * @return Set of DatanodeDetails + */ + public Set getNodeSet() { + return Collections.unmodifiableSet(nodeStatus.keySet()); + } + + /** + * Check if the input pipeline share the same set of datanodes. + * @param pipeline + * @return true if the input pipeline shares the same set of datanodes. + */ + public boolean sameDatanodes(Pipeline pipeline) { + return getNodeSet().equals(pipeline.getNodeSet()); + } + /** * Returns the leader if found else defaults to closest node. * @@ -360,7 +368,6 @@ public static class Builder { private List nodesInOrder = null; private UUID leaderId = null; private Instant creationTimestamp = null; - private int nodeIdsHash = 0; public Builder() {} @@ -373,7 +380,6 @@ public Builder(Pipeline pipeline) { this.nodesInOrder = pipeline.nodesInOrder.get(); this.leaderId = pipeline.getLeaderId(); this.creationTimestamp = pipeline.getCreationTimestamp(); - this.nodeIdsHash = 0; } public Builder setId(PipelineID id1) { @@ -417,11 +423,6 @@ public Builder setCreateTimestamp(long createTimestamp) { return this; } - public Builder setNodeIdsHash(int nodeIdsHash1) { - this.nodeIdsHash = nodeIdsHash1; - return this; - } - public Pipeline build() { Preconditions.checkNotNull(id); Preconditions.checkNotNull(type); @@ -430,7 +431,6 @@ public Pipeline build() { Preconditions.checkNotNull(nodeStatus); Pipeline pipeline = new Pipeline(id, type, factor, state, nodeStatus); pipeline.setLeaderId(leaderId); - pipeline.setNodeIdsHash(nodeIdsHash); // overwrite with original creationTimestamp if (creationTimestamp != null) { pipeline.setCreationTimestamp(creationTimestamp); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java index 4261a87c4c0d..9d78063a4dcd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java @@ -145,12 +145,10 @@ List filterViableNodes( String msg; if (initialHealthyNodesCount < nodesRequired) { - LOG.warn("Not enough healthy nodes to allocate pipeline." + - nodesRequired + " datanodes required. Found: " + - initialHealthyNodesCount); msg = String.format("Pipeline creation failed due to no sufficient" + " healthy datanodes. Required %d. Found %d.", nodesRequired, initialHealthyNodesCount); + LOG.warn(msg); throw new SCMException(msg, SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE); } @@ -229,42 +227,49 @@ public List getResultSet( // First choose an anchor nodes randomly DatanodeDetails anchor = chooseNode(healthyNodes); if (anchor == null) { - LOG.warn("Unable to find healthy nodes." + + LOG.warn("Unable to find healthy node for anchor(first) node." + " Required nodes: {}, Found nodes: {}", nodesRequired, results.size()); throw new SCMException("Unable to find required number of nodes.", SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE); } + if (LOG.isDebugEnabled()) { + LOG.debug("First node chosen: {}", anchor); + } results.add(anchor); exclude.add(anchor); - nodesRequired--; // Choose the second node on different racks from anchor. DatanodeDetails nodeOnDifferentRack = chooseNodeBasedOnRackAwareness( healthyNodes, exclude, nodeManager.getClusterNetworkTopologyMap(), anchor); if (nodeOnDifferentRack == null) { - LOG.warn("Pipeline Placement: Unable to find nodes on different racks " + - " that meet the criteria. Required nodes: {}, Found nodes: {}", - nodesRequired, results.size()); + LOG.warn("Pipeline Placement: Unable to find 2nd node on different " + + "racks that meets the criteria. Required nodes: {}, Found nodes:" + + " {}", nodesRequired, results.size()); throw new SCMException("Unable to find required number of nodes.", SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE); } + if (LOG.isDebugEnabled()) { + LOG.debug("Second node chosen: {}", nodeOnDifferentRack); + } results.add(nodeOnDifferentRack); exclude.add(nodeOnDifferentRack); - nodesRequired--; // Then choose nodes close to anchor based on network topology - for (int x = 0; x < nodesRequired; x++) { + int nodesToFind = nodesRequired - results.size(); + for (int x = 0; x < nodesToFind; x++) { // invoke the choose function defined in the derived classes. DatanodeDetails pick = chooseNodeFromNetworkTopology( nodeManager.getClusterNetworkTopologyMap(), anchor, exclude); if (pick != null) { results.add(pick); - // exclude the picked node for next time exclude.add(pick); + if (LOG.isDebugEnabled()) { + LOG.debug("Remaining node chosen: {}", pick); + } } } @@ -306,9 +311,7 @@ public DatanodeDetails chooseNode( datanodeDetails = firstNodeMetric.isGreater(secondNodeMetric.get()) ? firstNodeDetails : secondNodeDetails; } - // the pick is decided and it should be removed from candidates. healthyNodes.remove(datanodeDetails); - return datanodeDetails; } @@ -331,12 +334,10 @@ protected DatanodeDetails chooseNodeBasedOnRackAwareness( } for (DatanodeDetails node : healthyNodes) { - if (excludedNodes.contains(node) - || networkTopology.isSameParent(anchor, node)) { + if (excludedNodes.contains(node) || + anchor.getNetworkLocation().equals(node.getNetworkLocation())) { continue; } else { - // the pick is decided and it should be removed from candidates. - healthyNodes.remove(node); return node; } } @@ -374,15 +375,10 @@ protected DatanodeDetails chooseNodeFromNetworkTopology( if (excludedNodes != null && excludedNodes.size() != 0) { excluded.addAll(excludedNodes); } - excluded.add(anchor); Node pick = networkTopology.chooseRandom( anchor.getNetworkLocation(), excluded); DatanodeDetails pickedNode = (DatanodeDetails) pick; - // exclude the picked node for next time - if (excludedNodes != null) { - excludedNodes.add(pickedNode); - } return pickedNode; } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java index 81287a5b9337..1842a8d7dfe7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java @@ -132,13 +132,6 @@ Pipeline openPipeline(PipelineID pipelineId) throws IOException { pipeline = pipelineStateMap .updatePipelineState(pipelineId, PipelineState.OPEN); } - // Amend nodeIdsHash if needed. - if (pipeline.getType() == ReplicationType.RATIS && - pipeline.getFactor() == ReplicationFactor.THREE && - pipeline.getNodeIdsHash() == 0) { - pipeline.setNodeIdsHash(RatisPipelineUtils - .encodeNodeIdsOfFactorThreePipeline(pipeline.getNodes())); - } return pipeline; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java index 4865074d7c70..13c3b6a5cb13 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java @@ -157,7 +157,6 @@ public Pipeline create(ReplicationFactor factor) throws IOException { } List dns; - int nodeIdHash = 0; switch(factor) { case ONE: @@ -166,7 +165,6 @@ public Pipeline create(ReplicationFactor factor) throws IOException { case THREE: dns = placementPolicy.chooseDatanodes(null, null, factor.getNumber(), 0); - nodeIdHash = RatisPipelineUtils.encodeNodeIdsOfFactorThreePipeline(dns); break; default: throw new IllegalStateException("Unknown factor: " + factor.name()); @@ -178,7 +176,6 @@ public Pipeline create(ReplicationFactor factor) throws IOException { .setType(ReplicationType.RATIS) .setFactor(factor) .setNodes(dns) - .setNodeIdsHash(nodeIdHash) .build(); // Send command to datanodes to create pipeline @@ -199,17 +196,12 @@ public Pipeline create(ReplicationFactor factor) throws IOException { @Override public Pipeline create(ReplicationFactor factor, List nodes) { - int nodeIdHash = 0; - if (factor == ReplicationFactor.THREE) { - nodeIdHash = RatisPipelineUtils.encodeNodeIdsOfFactorThreePipeline(nodes); - } return Pipeline.newBuilder() .setId(PipelineID.randomId()) .setState(PipelineState.ALLOCATED) .setType(ReplicationType.RATIS) .setFactor(factor) .setNodes(nodes) - .setNodeIdsHash(nodeIdHash) .build(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java index 8bdd6bb6790b..86de43f9dfe8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java @@ -103,21 +103,12 @@ static void destroyPipeline(DatanodeDetails dn, PipelineID pipelineID, } } - static int encodeNodeIdsOfFactorThreePipeline(List nodes) { - if (nodes.size() != HddsProtos.ReplicationFactor.THREE.getNumber()) { - return 0; - } - return nodes.get(0).getUuid().hashCode() ^ - nodes.get(1).getUuid().hashCode() ^ - nodes.get(2).getUuid().hashCode(); - } - /** * Return the list of pipelines who share the same set of datanodes * with the input pipeline. * @param stateManager PipelineStateManager * @param pipeline input pipeline - * @return first matched pipeline + * @return list of matched pipeline */ static List checkPipelineContainSameDatanodes( PipelineStateManager stateManager, Pipeline pipeline) { @@ -125,9 +116,8 @@ static List checkPipelineContainSameDatanodes( HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE) .stream().filter(p -> !p.getId().equals(pipeline.getId()) && - (// For all OPEN or ALLOCATED pipelines - p.getPipelineState() != Pipeline.PipelineState.CLOSED && - p.getNodeIdsHash() == pipeline.getNodeIdsHash())) + (p.getPipelineState() != Pipeline.PipelineState.CLOSED && + p.sameDatanodes(pipeline))) .collect(Collectors.toList()); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java index 48906a45d0c4..c17ed12a844c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java @@ -145,8 +145,6 @@ private void initializePipelineState() throws IOException { Pipeline pipeline = Pipeline.getFromProtobuf(pipelineBuilder.setState( HddsProtos.PipelineState.PIPELINE_ALLOCATED).build()); Preconditions.checkNotNull(pipeline); - pipeline.setNodeIdsHash(RatisPipelineUtils. - encodeNodeIdsOfFactorThreePipeline(pipeline.getNodes())); stateManager.addPipeline(pipeline); nodeManager.addPipeline(pipeline); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java index 3eb146a2c9cc..ff5247027259 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java @@ -73,8 +73,6 @@ public Pipeline create(HddsProtos.ReplicationFactor factor) .setType(initialPipeline.getType()) .setFactor(factor) .setNodes(initialPipeline.getNodes()) - .setNodeIdsHash(RatisPipelineUtils - .encodeNodeIdsOfFactorThreePipeline(initialPipeline.getNodes())) .build(); } } @@ -93,8 +91,6 @@ public Pipeline create(HddsProtos.ReplicationFactor factor, .setType(HddsProtos.ReplicationType.RATIS) .setFactor(factor) .setNodes(nodes) - .setNodeIdsHash(RatisPipelineUtils - .encodeNodeIdsOfFactorThreePipeline(nodes)) .build(); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java index 2fff7d901cc7..b9aa9afb0518 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java @@ -65,10 +65,10 @@ public void testChooseNodeBasedOnNetworkTopology() { List excludedNodes = new ArrayList<>(PIPELINE_PLACEMENT_MAX_NODES_COUNT); + excludedNodes.add(anchor); DatanodeDetails nextNode = placementPolicy.chooseNodeFromNetworkTopology( nodeManager.getClusterNetworkTopologyMap(), anchor, excludedNodes); - // excludedNodes should contain nextNode after being chosen. - Assert.assertTrue(excludedNodes.contains(nextNode)); + Assert.assertFalse(excludedNodes.contains(nextNode)); // nextNode should not be the same as anchor. Assert.assertTrue(anchor.getUuid() != nextNode.getUuid()); } @@ -83,7 +83,8 @@ public void testChooseNodeBasedOnRackAwareness() { DatanodeDetails nextNode = placementPolicy.chooseNodeBasedOnRackAwareness( healthyNodes, new ArrayList<>(PIPELINE_PLACEMENT_MAX_NODES_COUNT), topologyWithDifRacks, anchor); - Assert.assertFalse(topologyWithDifRacks.isSameParent(anchor, nextNode)); + Assert.assertFalse(anchor.getNetworkLocation().equals( + nextNode.getNetworkLocation())); } private final static Node[] NODES = new NodeImpl[] { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java index a17fc08466df..86d54b399186 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java @@ -35,7 +35,6 @@ import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.UUID; import java.util.stream.Collectors; import static org.apache.commons.collections.CollectionUtils.intersection; @@ -84,7 +83,7 @@ private void createPipelineAndAssertions( intersection(pipeline.getNodes(), pipeline1.getNodes()) .size() < factor.getNumber()); if (pipeline.getFactor() == HddsProtos.ReplicationFactor.THREE) { - assertNotEquals(pipeline.getNodeIdsHash(), pipeline1.getNodeIdsHash()); + assertNotEquals(pipeline.getNodeSet(), pipeline1.getNodeSet()); } stateManager.addPipeline(pipeline1); nodeManager.addPipeline(pipeline1); @@ -105,7 +104,7 @@ public void testCreatePipelineWithFactor() throws IOException { stateManager.addPipeline(pipeline1); // With enough pipeline quote on datanodes, they should not share // the same set of datanodes. - assertNotEquals(pipeline.getNodeIdsHash(), pipeline1.getNodeIdsHash()); + assertNotEquals(pipeline.getNodeSet(), pipeline1.getNodeSet()); } @Test @@ -140,33 +139,6 @@ public void testCreatePipelineWithNodes() { Pipeline.PipelineState.OPEN); } - @Test - public void testComputeNodeIdsHash() { - int total = HddsProtos.ReplicationFactor.THREE.getNumber(); - List nodes1 = new ArrayList<>(); - for (int i = 0; i < total; i++) { - nodes1.add(MockDatanodeDetails.createDatanodeDetails( - UUID.fromString("00000-11000-00000-00000-0000" + (i + 1)))); - } - - Assert.assertEquals(total, nodes1.size()); - Assert.assertNotEquals(0, - RatisPipelineUtils.encodeNodeIdsOfFactorThreePipeline(nodes1)); - - List nodes2 = new ArrayList<>(); - for (int i = 0; i < total; i++) { - nodes2.add(MockDatanodeDetails.createDatanodeDetails( - UUID.fromString("00000-11000-00000-00000-0000" + (total - i)))); - } - Assert.assertEquals(total, nodes2.size()); - Assert.assertNotEquals(0, - RatisPipelineUtils.encodeNodeIdsOfFactorThreePipeline(nodes2)); - - Assert.assertEquals( - RatisPipelineUtils.encodeNodeIdsOfFactorThreePipeline(nodes1), - RatisPipelineUtils.encodeNodeIdsOfFactorThreePipeline(nodes2)); - } - @Test public void testCreateFactorTHREEPipelineWithSameDatanodes() { List healthyNodes = nodeManager @@ -178,9 +150,7 @@ public void testCreateFactorTHREEPipelineWithSameDatanodes() { Pipeline pipeline2 = provider.create( HddsProtos.ReplicationFactor.THREE, healthyNodes); - Assert.assertTrue(pipeline1.getNodes().parallelStream() - .allMatch(pipeline2.getNodes()::contains)); - Assert.assertEquals(pipeline1.getNodeIdsHash(), pipeline2.getNodeIdsHash()); + Assert.assertEquals(pipeline1.getNodeSet(), pipeline2.getNodeSet()); } @Test diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java index deba91b746a5..ab2315326bc3 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdds.scm.pipeline; -import static org.apache.commons.collections.CollectionUtils.intersection; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT; import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; @@ -116,15 +115,13 @@ public void testPipelineReload() throws IOException { List pipelineList = pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS); Assert.assertEquals(pipelines, new HashSet<>(pipelineList)); - // All NodeIdsHash from original pipeline list - List originalPipelineHash = pipelineList.stream() - .map(Pipeline::getNodeIdsHash).collect(Collectors.toList()); - // All NodeIdsHash from reloaded pipeline list - List reloadedPipelineHash = pipelines.stream() - .map(Pipeline::getNodeIdsHash).collect(Collectors.toList()); - // Original NodeIdsHash list should contain same items from reloaded one. - Assert.assertEquals(pipelineNum, - intersection(originalPipelineHash, reloadedPipelineHash).size()); + + Set> originalPipelines = pipelineList.stream() + .map(Pipeline::getNodeSet).collect(Collectors.toSet()); + Set> reloadedPipelineHash = pipelines.stream() + .map(Pipeline::getNodeSet).collect(Collectors.toSet()); + Assert.assertEquals(reloadedPipelineHash, originalPipelines); + Assert.assertEquals(pipelineNum, originalPipelines.size()); // clean up for (Pipeline pipeline : pipelines) { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java index badfadc22eb9..e4060b3dadaf 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java @@ -114,8 +114,8 @@ private void printDatanodeInfo(DatanodeDetails datanode) { pipelineListInfo.append("No pipelines in cluster."); } System.out.println("Datanode: " + datanode.getUuid().toString() + - " (" + datanode.getIpAddress() + "/" - + datanode.getHostName() + "/" + relatedPipelineNum + + " (" + datanode.getNetworkLocation() + "/" + datanode.getIpAddress() + + "/" + datanode.getHostName() + "/" + relatedPipelineNum + " pipelines) \n" + "Related pipelines: \n" + pipelineListInfo); } } \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml index 69611fa674ce..ccd131c7be15 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml @@ -72,6 +72,34 @@ services: networks: net: ipv4_address: 10.5.0.7 + datanode_5: + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} + privileged: true #required by the profiler + volumes: + - ../..:/opt/hadoop + ports: + - 9864 + - 9882 + command: ["/opt/hadoop/bin/ozone","datanode"] + env_file: + - ./docker-config + networks: + net: + ipv4_address: 10.5.0.8 + datanode_6: + image: apache/ozone-runner:${OZONE_RUNNER_VERSION} + privileged: true #required by the profiler + volumes: + - ../..:/opt/hadoop + ports: + - 9864 + - 9882 + command: ["/opt/hadoop/bin/ozone","datanode"] + env_file: + - ./docker-config + networks: + net: + ipv4_address: 10.5.0.9 om: image: apache/ozone-runner:${OZONE_RUNNER_VERSION} privileged: true #required by the profiler