diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index b79ab5b5bcf02..c349c7385dde2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -43,6 +43,7 @@ import org.apache.hadoop.metrics2.source.JvmMetrics; import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.hdfs.DFSUtilClient; +import org.apache.hadoop.net.NetworkTopology; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.HadoopIllegalArgumentException; @@ -243,7 +244,7 @@ public class Balancer { private static void checkReplicationPolicyCompatibility(Configuration conf ) throws UnsupportedActionException { BlockPlacementPolicies placementPolicies = - new BlockPlacementPolicies(conf, null, null, null); + new BlockPlacementPolicies(conf, null, NetworkTopology.getInstance(conf), null); if (!(placementPolicies.getPolicy(CONTIGUOUS) instanceof BlockPlacementPolicyDefault)) { throw new UnsupportedActionException( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index 32b1fa8a5e192..d2ff63fbf1e06 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.balancer; import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY; import static org.apache.hadoop.fs.StorageType.DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY; @@ -40,6 +41,8 @@ import java.lang.reflect.Field; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup; +import org.apache.hadoop.net.NetworkTopologyWithNodeGroup; import org.junit.AfterClass; import static org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset.CONFIG_PROPERTY_NONDFSUSED; @@ -1510,6 +1513,22 @@ public void testBalancerCliWithIncludeListWithPortsInAFile() throws Exception { CAPACITY, RACK2, new PortNumberBasedNodes(3, 0, 1), true, true); } + /** + * Test a cluster with BlockPlacementPolicyWithNodeGroup + */ + @Test(timeout=100000) + public void testBalancerCliWithBlockPlacementPolicyWithNodeGroup() throws Exception { + Configuration conf = new HdfsConfiguration(); + initConf(conf); + conf.setBoolean(DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_KEY, false); + conf.set(NET_TOPOLOGY_IMPL_KEY, NetworkTopologyWithNodeGroup.class.getName()); + conf.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, + BlockPlacementPolicyWithNodeGroup.class.getName()); + String rackWithNodeGroup = "/rack0/nodegroup0"; + doTest(conf, new long[] {CAPACITY}, new String[] {rackWithNodeGroup}, CAPACITY / 2, + rackWithNodeGroup, true); + } + /** * Check that the balancer exits when there is an unfinalized upgrade. */