diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 024953d92c14..a456d821b8cc 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -372,6 +372,13 @@ public final class ScmConfigKeys {
"ozone.scm.pipeline.per.metadata.disk";
public static final int OZONE_SCM_PIPELINE_PER_METADATA_VOLUME_DEFAULT = 2;
+
+ public static final String OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN =
+ "ozone.scm.datanode.ratis.volume.free-space.min";
+
+ public static final String
+ OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN_DEFAULT = "1GB";
+
// Max timeout for pipeline to stay at ALLOCATED state before scrubbed.
public static final String OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT =
"ozone.scm.pipeline.allocated.timeout";
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 81d174c11591..a66ee9368775 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -2901,4 +2901,15 @@
directory deleting service per time interval.
+
+
+ ozone.scm.datanode.ratis.volume.free-space.min
+ 1GB
+ OZONE, DATANODE
+ Minimum amount of storage space required for each ratis
+ volume on a datanode to hold a new pipeline.
+ Datanodes with all its ratis volumes with space under this value
+ will not be allocated a pipeline or container replica.
+
+
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
index ce0223b61f13..73760982d32b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
@@ -24,7 +24,9 @@
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementStatusDefault;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
@@ -37,6 +39,9 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN_DEFAULT;
+
/**
* This policy implements a set of invariants which are common
* for all basic placement policies, acts as the repository of helper
@@ -169,13 +174,35 @@ public boolean hasEnoughSpace(DatanodeDetails datanodeDetails,
long sizeRequired) {
Preconditions.checkArgument(datanodeDetails instanceof DatanodeInfo);
+ long metaSizeRequired = (long) conf.getStorageSize(
+ OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN,
+ OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN_DEFAULT,
+ StorageUnit.BYTES);
+
+ boolean enoughForData = false;
+ boolean enoughForMeta = false;
+
DatanodeInfo datanodeInfo = (DatanodeInfo) datanodeDetails;
for (StorageReportProto reportProto : datanodeInfo.getStorageReports()) {
if (reportProto.getRemaining() > sizeRequired) {
- return true;
+ enoughForData = true;
+ break;
}
}
- return false;
+
+ if (!enoughForData) {
+ return false;
+ }
+
+ for (MetadataStorageReportProto reportProto
+ : datanodeInfo.getMetadataStorageReports()) {
+ if (reportProto.getRemaining() > metaSizeRequired) {
+ enoughForMeta = true;
+ break;
+ }
+ }
+
+ return enoughForData && enoughForMeta;
}
/**
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java
index edd616fb50d9..bf1d64f9a269 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java
@@ -143,6 +143,20 @@ public List getStorageReports() {
}
}
+ /**
+ * Returns the storage reports associated with this datanode.
+ *
+ * @return list of storage report
+ */
+ public List getMetadataStorageReports() {
+ try {
+ lock.readLock().lock();
+ return metadataStorageReports;
+ } finally {
+ lock.readLock().unlock();
+ }
+ }
+
/**
* Returns count of healthy volumes reported from datanode.
* @return count of healthy volumes
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
index c43373875a70..b9aba123773b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
@@ -41,6 +41,9 @@
import java.util.Set;
import java.util.stream.Collectors;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN_DEFAULT;
+
/**
* Pipeline placement policy that choose datanodes based on load balancing
* and network topology to supply pipeline creation.
@@ -158,15 +161,21 @@ List filterViableNodes(
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT,
StorageUnit.BYTES);
+ long metaSizeRequired = (long) conf.getStorageSize(
+ OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN,
+ OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN_DEFAULT,
+ StorageUnit.BYTES);
+
// filter nodes that don't even have space for one container
List canHoldList = healthyNodes.stream().filter(d ->
hasEnoughSpace(d, sizeRequired)).collect(Collectors.toList());
if (canHoldList.size() < nodesRequired) {
msg = String.format("Pipeline creation failed due to no sufficient" +
- " healthy datanodes with enough space for even a single container." +
- " Required %d. Found %d. Container size %d.",
- nodesRequired, canHoldList.size(), sizeRequired);
+ " healthy datanodes with enough space for container data and " +
+ "metadata. Required %d. Found %d. Container data required %d, " +
+ "metadata required %d.",
+ nodesRequired, canHoldList.size(), sizeRequired, metaSizeRequired);
LOG.warn(msg);
throw new SCMException(msg,
SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
index 57e1a25b766a..4d364efa31ce 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
@@ -60,7 +60,9 @@
import org.apache.hadoop.hdds.protocol
.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.StorageReportProto;
+ .StorageContainerDatanodeProtocolProtos.StorageReportProto;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.StorageTypeProto;
import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
@@ -149,7 +151,7 @@ public static List getListOfRegisteredDatanodeDetails(
* @return NodeReportProto
*/
public static NodeReportProto getRandomNodeReport() {
- return getRandomNodeReport(1);
+ return getRandomNodeReport(1, 1);
}
/**
@@ -157,12 +159,15 @@ public static NodeReportProto getRandomNodeReport() {
*
* @param numberOfStorageReport number of storage report this node report
* should have
+ * @param numberOfMetadataStorageReport number of metadata storage report
+ * this node report should have
* @return NodeReportProto
*/
- public static NodeReportProto getRandomNodeReport(int numberOfStorageReport) {
+ public static NodeReportProto getRandomNodeReport(int numberOfStorageReport,
+ int numberOfMetadataStorageReport) {
UUID nodeId = UUID.randomUUID();
return getRandomNodeReport(nodeId, File.separator + nodeId,
- numberOfStorageReport);
+ numberOfStorageReport, numberOfMetadataStorageReport);
}
/**
@@ -172,42 +177,41 @@ public static NodeReportProto getRandomNodeReport(int numberOfStorageReport) {
* @param nodeId datanode id
* @param basePath base path of storage directory
* @param numberOfStorageReport number of storage report
+ * @param numberOfMetadataStorageReport number of metadata storage report
*
* @return NodeReportProto
*/
public static NodeReportProto getRandomNodeReport(UUID nodeId,
- String basePath, int numberOfStorageReport) {
+ String basePath, int numberOfStorageReport,
+ int numberOfMetadataStorageReport) {
List storageReports = new ArrayList<>();
for (int i = 0; i < numberOfStorageReport; i++) {
storageReports.add(getRandomStorageReport(nodeId,
- basePath + File.separator + i));
+ basePath + File.separator + "data-" + i));
}
- return createNodeReport(storageReports);
- }
-
- /**
- * Creates NodeReport with the given storage reports.
- *
- * @param reports one or more storage report
- *
- * @return NodeReportProto
- */
- public static NodeReportProto createNodeReport(
- StorageReportProto... reports) {
- return createNodeReport(Arrays.asList(reports));
+ List metadataStorageReports =
+ new ArrayList<>();
+ for (int i = 0; i < numberOfMetadataStorageReport; i++) {
+ metadataStorageReports.add(getRandomMetadataStorageReport(
+ basePath + File.separator + "metadata-" + i));
+ }
+ return createNodeReport(storageReports, metadataStorageReports);
}
/**
* Creates NodeReport with the given storage reports.
*
* @param reports storage reports to be included in the node report.
- *
+ * @param metaReports metadata storage reports to be included
+ * in the node report.
* @return NodeReportProto
*/
public static NodeReportProto createNodeReport(
- List reports) {
+ List reports,
+ List metaReports) {
NodeReportProto.Builder nodeReport = NodeReportProto.newBuilder();
nodeReport.addAllStorageReport(reports);
+ nodeReport.addAllMetadataStorageReport(metaReports);
return nodeReport.build();
}
@@ -228,6 +232,22 @@ public static StorageReportProto getRandomStorageReport(UUID nodeId,
StorageTypeProto.DISK);
}
+ /**
+ * Generates random metadata storage report.
+ *
+ * @param path path of the storage
+ *
+ * @return MetadataStorageReportProto
+ */
+ public static MetadataStorageReportProto getRandomMetadataStorageReport(
+ String path) {
+ return createMetadataStorageReport(path,
+ random.nextInt(1000),
+ random.nextInt(500),
+ random.nextInt(500),
+ StorageTypeProto.DISK);
+ }
+
public static StorageReportProto createStorageReport(UUID nodeId, String path,
long capacity, long used, long remaining, StorageTypeProto type) {
return createStorageReport(nodeId, path, capacity, used, remaining,
@@ -263,6 +283,39 @@ public static StorageReportProto createStorageReport(UUID nodeId, String path,
return srb.build();
}
+ public static MetadataStorageReportProto createMetadataStorageReport(
+ String path, long capacity, long used, long remaining,
+ StorageTypeProto type) {
+ return createMetadataStorageReport(path, capacity, used, remaining,
+ type, false);
+ }
+ /**
+ * Creates metadata storage report with the given information.
+ *
+ * @param path storage dir
+ * @param capacity storage size
+ * @param used space used
+ * @param remaining space remaining
+ * @param type type of storage
+ *
+ * @return StorageReportProto
+ */
+ public static MetadataStorageReportProto createMetadataStorageReport(
+ String path, long capacity, long used, long remaining,
+ StorageTypeProto type, boolean failed) {
+ Preconditions.checkNotNull(path);
+ MetadataStorageReportProto.Builder srb = MetadataStorageReportProto
+ .newBuilder();
+ srb.setStorageLocation(path)
+ .setCapacity(capacity)
+ .setScmUsed(used)
+ .setFailed(failed)
+ .setRemaining(remaining);
+ StorageTypeProto storageTypeProto =
+ type == null ? StorageTypeProto.DISK : type;
+ srb.setStorageType(storageTypeProto);
+ return srb.build();
+ }
/**
* Generates random container reports.
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index 7d97d9042d7a..751dee838fd3 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -23,6 +23,8 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.StorageReportProto;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.NodeReportProto;
import org.apache.hadoop.hdds.protocol.proto
@@ -243,7 +245,13 @@ public List getNodes(
StorageReportProto storage1 = TestUtils.createStorageReport(
di.getUuid(), "/data1-" + di.getUuidString(),
capacity, used, remaining, null);
+ MetadataStorageReportProto metaStorage1 =
+ TestUtils.createMetadataStorageReport(
+ "/metadata1-" + di.getUuidString(), capacity, used,
+ remaining, null);
di.updateStorageReports(new ArrayList<>(Arrays.asList(storage1)));
+ di.updateMetaDataStorageReports(
+ new ArrayList<>(Arrays.asList(metaStorage1)));
healthyNodesWithInfo.add(di);
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java
index 0b46a64d8913..34f27ce4b57e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java
@@ -22,8 +22,10 @@
import java.util.List;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.scm.ContainerPlacementStatus;
import org.apache.hadoop.hdds.scm.PlacementPolicy;
@@ -42,6 +44,7 @@
import org.junit.Test;
import org.mockito.Mockito;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN;
import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA;
import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA;
import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA;
@@ -73,6 +76,8 @@ public void setup() {
public void testRackAwarePolicy() throws IOException {
conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
SCMContainerPlacementRackAware.class.getName());
+ conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN,
+ 0, StorageUnit.MB);
NodeSchema[] schemas = new NodeSchema[]
{ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA};
@@ -91,8 +96,14 @@ public void testRackAwarePolicy() throws IOException {
StorageReportProto storage1 = TestUtils.createStorageReport(
datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(),
STORAGE_CAPACITY, 0, 100L, null);
+ MetadataStorageReportProto metaStorage1 =
+ TestUtils.createMetadataStorageReport(
+ "/metadata1-" + datanodeInfo.getUuidString(),
+ STORAGE_CAPACITY, 0, 100L, null);
datanodeInfo.updateStorageReports(
new ArrayList<>(Arrays.asList(storage1)));
+ datanodeInfo.updateMetaDataStorageReports(
+ new ArrayList<>(Arrays.asList(metaStorage1)));
datanodes.add(datanodeInfo);
cluster.add(datanodeInfo);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
index 5f448d7d09fd..f1ff0d9bf4df 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
@@ -22,10 +22,11 @@
import java.util.List;
import java.util.Map;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
@@ -36,6 +37,8 @@
import org.apache.hadoop.hdds.scm.node.NodeStatus;
import org.junit.Assert;
import org.junit.Test;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN;
import static org.mockito.Matchers.anyObject;
import org.mockito.Mockito;
import static org.mockito.Mockito.when;
@@ -47,7 +50,10 @@ public class TestSCMContainerPlacementCapacity {
@Test
public void chooseDatanodes() throws SCMException {
//given
- ConfigurationSource conf = new OzoneConfiguration();
+ OzoneConfiguration conf = new OzoneConfiguration();
+ // We are using small units here
+ conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN,
+ 1, StorageUnit.BYTES);
List datanodes = new ArrayList<>();
for (int i = 0; i < 7; i++) {
@@ -58,8 +64,14 @@ public void chooseDatanodes() throws SCMException {
StorageReportProto storage1 = TestUtils.createStorageReport(
datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(),
100L, 0, 100L, null);
+ MetadataStorageReportProto metaStorage1 =
+ TestUtils.createMetadataStorageReport(
+ "/metadata1-" + datanodeInfo.getUuidString(),
+ 100L, 0, 100L, null);
datanodeInfo.updateStorageReports(
new ArrayList<>(Arrays.asList(storage1)));
+ datanodeInfo.updateMetaDataStorageReports(
+ new ArrayList<>(Arrays.asList(metaStorage1)));
datanodes.add(datanodeInfo);
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
index e1a8d4cc6ec2..013d3ff77dd6 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
@@ -21,10 +21,11 @@
import java.util.Collection;
import java.util.List;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.scm.ContainerPlacementStatus;
import org.apache.hadoop.hdds.scm.TestUtils;
@@ -45,6 +46,8 @@
import org.mockito.Mockito;
import org.apache.commons.lang3.StringUtils;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN;
import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA;
import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA;
import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA;
@@ -63,7 +66,7 @@
@RunWith(Parameterized.class)
public class TestSCMContainerPlacementRackAware {
private NetworkTopology cluster;
- private ConfigurationSource conf;
+ private OzoneConfiguration conf;
private NodeManager nodeManager;
private final Integer datanodeCount;
private final List datanodes = new ArrayList<>();
@@ -90,6 +93,9 @@ public static Collection